├── .dockerignore ├── .github ├── ISSUE_TEMPLATE │ ├── 1-bug-report.yml │ ├── 2-feature-request.yml │ └── config.yml ├── pull_request_template.md ├── renovate.json ├── scripts │ ├── README.md │ ├── build-fleet-binaries.sh │ ├── build-fleet-images.sh │ ├── check-for-auto-generated-changes.sh │ ├── check-for-go-mod-changes.sh │ ├── create-pr.sh │ ├── create-secrets.sh │ ├── create-zot-certs.sh │ ├── deploy-fleet-latest-release.sh │ ├── deploy-fleet.sh │ ├── k3d-import-retry.sh │ ├── label-downstream-cluster.sh │ ├── register-downstream-clusters.sh │ ├── release-against-charts.sh │ ├── release-against-rancher.sh │ ├── release-against-test-charts.sh │ ├── run-integration-tests-group1.sh │ ├── run-integration-tests-group2.sh │ ├── run-integration-tests.sh │ ├── setup-rancher.sh │ ├── update_known_hosts_configmap.sh │ ├── upgrade-rancher-fleet-to-dev-fleet.sh │ └── wait-for-loadbalancer.sh └── workflows │ ├── add_issue_to_project.yml │ ├── check-changes.yml │ ├── ci.yml │ ├── e2e-ci.yml │ ├── e2e-fleet-upgrade-ci.yml │ ├── e2e-multicluster-ci.yml │ ├── e2e-nightly-ci.yml │ ├── e2e-rancher-upgrade-fleet-to-head-ci.yml │ ├── e2e-rancher-upgrade-fleet.yml │ ├── e2e-test-fleet-in-rancher.yml │ ├── golangci-lint.yml │ ├── port-issue.yml │ ├── release-against-charts.yml │ ├── release-against-rancher.yml │ ├── release-against-test-charts.yml │ ├── release.yml │ ├── renovate-vault.yml │ ├── typos.yaml │ └── updatecli.yml ├── .gitignore ├── .golangci.json ├── .goreleaser.yaml ├── CODEOWNERS ├── CONTRIBUTING.md ├── DEVELOPING.md ├── LICENSE ├── README.md ├── _typos.toml ├── benchmarks ├── assets │ ├── create-1-bundledeployment-10-resources │ │ └── bundle.yaml │ ├── create-1-gitrepo-50-bundle │ │ └── gitrepo.yaml │ ├── create-50-gitrepo-50-bundle │ │ └── gitrepos.yaml │ ├── create-bundle │ │ └── bundles.tmpl.yaml │ └── create-bundledeployment-500-resources │ │ └── bundles.tmpl.yaml ├── cmd │ ├── csv.go │ ├── dataset.go │ ├── db.go │ ├── json.go │ ├── parser │ │ └── parser.go │ ├── report.go │ ├── root.go │ └── run.go ├── deploy.go ├── gitrepo_bundle.go ├── record │ └── record.go ├── report │ └── report.go ├── suite.go └── targeting.go ├── charts ├── .helmignore ├── fleet-agent │ ├── .helmignore │ ├── Chart.yaml │ ├── README.md │ ├── ci │ │ └── default-values.yaml │ ├── templates │ │ ├── _helpers.tpl │ │ ├── configmap.yaml │ │ ├── deployment.yaml │ │ ├── network_policy_allow_all.yaml │ │ ├── rbac.yaml │ │ ├── secret.yaml │ │ ├── serviceaccount.yaml │ │ └── validate.yaml │ └── values.yaml ├── fleet-crd │ ├── Chart.yaml │ ├── README.md │ ├── templates │ │ └── crds.yaml │ └── values.yaml └── fleet │ ├── .helmignore │ ├── Chart.yaml │ ├── README.md │ ├── ci │ ├── debug-values.yaml │ ├── default-values.yaml │ ├── nobootstrap-values.yaml │ ├── nodebug-values.yaml │ └── nogitops-values.yaml │ ├── templates │ ├── _helpers.tpl │ ├── configmap.yaml │ ├── configmap_known_hosts.yaml │ ├── deployment.yaml │ ├── deployment_gitjob.yaml │ ├── deployment_helmops.yaml │ ├── job_cleanup_clusterregistrations.yaml │ ├── job_cleanup_gitrepojobs.yaml │ ├── rbac.yaml │ ├── rbac_gitjob.yaml │ ├── rbac_helmops.yaml │ ├── service.yaml │ ├── service_gitjob.yaml │ ├── serviceaccount.yaml │ ├── serviceaccount_gitjob.yaml │ └── serviceaccount_helmops.yaml │ ├── tests │ ├── agent-leader-election.yaml │ ├── agent_replica_count_test.yaml │ ├── extraAnnotations_test.yaml │ ├── extraLabels_test.yaml │ ├── fleet_controller_replica_count_test.yaml │ ├── gitjob_controller_replica_count_test.yaml │ └── helmops_controller_replica_count_test.yaml │ └── values.yaml ├── cmd ├── codegen │ ├── boilerplate.go.txt │ ├── cleanup │ │ └── main.go │ ├── hack │ │ └── generate_and_sort_crds.sh │ └── main.go ├── docs │ └── generate-cli-docs.go ├── fleetagent │ └── main.go ├── fleetcli │ └── main.go └── fleetcontroller │ └── main.go ├── dev ├── LOGGING.md ├── LOGGING.png ├── README.md ├── benchmarks.sh ├── build-fleet ├── create-secrets ├── create-zot-certs ├── env.multi-cluster-defaults ├── env.single-cluster-defaults ├── import-images-k3d ├── import-images-tests-k3d ├── k3d-clean ├── logs ├── remove-fleet ├── setup-cluster-config ├── setup-fleet ├── setup-fleet-managed-downstream ├── setup-fleet-multi-cluster ├── setup-k3d ├── setup-k3ds-downstream ├── setup-multi-cluster ├── setup-rancher-clusters ├── setup-single-cluster ├── update-agent-k3d ├── update-controller-k3d └── update-fleet-in-rancher-k3d ├── docs ├── README.md ├── arch.png ├── design.md ├── performance.md ├── qa_template.md └── release.md ├── e2e ├── acceptance │ ├── multi-cluster-examples │ │ ├── multi_cluster_test.go │ │ └── suite_test.go │ └── single-cluster-examples │ │ ├── single_cluster_test.go │ │ └── suite_test.go ├── assets │ ├── cluster-template.yaml │ ├── clustergroup-template.yaml │ ├── deps-charts │ │ ├── gitrepo.yaml │ │ ├── no-fleet-yaml │ │ │ ├── Chart.yaml │ │ │ ├── templates │ │ │ │ └── configmap.yaml │ │ │ └── values.yaml │ │ └── with-fleet-yaml │ │ │ ├── Chart.yaml │ │ │ ├── fleet.yaml │ │ │ ├── templates │ │ │ └── configmap.yaml │ │ │ └── values.yaml │ ├── fleet-upgrade │ │ └── gitrepo-simple.yaml │ ├── gitrepo-template.yaml │ ├── gitrepo │ │ ├── Dockerfile.gitserver │ │ ├── gitrepo-polling-disabled.yaml │ │ ├── gitrepo.yaml │ │ ├── gitrepo_sharded.yaml │ │ ├── gitrepo_with_auth.yaml │ │ ├── nginx_deployment.yaml │ │ ├── nginx_git.conf │ │ ├── nginx_service.yaml │ │ ├── post-receive.sh │ │ └── sleeper-chart │ │ │ ├── .helmignore │ │ │ ├── Chart.yaml │ │ │ ├── templates │ │ │ └── deployment.yaml │ │ │ └── values.yaml │ ├── helm │ │ ├── chartmuseum_deployment.yaml │ │ ├── chartmuseum_service.yaml │ │ ├── repo │ │ │ ├── http-with-auth-chart-path │ │ │ │ └── fleet.yaml │ │ │ ├── http-with-auth-repo-path │ │ │ │ └── fleet.yaml │ │ │ └── oci-with-auth │ │ │ │ └── fleet.yaml │ │ ├── zot_configmap.yaml │ │ ├── zot_deployment.yaml │ │ ├── zot_secret.yaml │ │ └── zot_service.yaml │ ├── helmop-template.yaml │ ├── helmop │ │ └── helmop.yaml │ ├── imagescan │ │ ├── imagescan.yaml │ │ ├── pre-releases-ignored │ │ │ ├── deployment.yaml │ │ │ └── fleet.yaml │ │ ├── pre-releases-ok │ │ │ ├── deployment.yaml │ │ │ └── fleet.yaml │ │ └── repo │ │ │ ├── deployment.yaml │ │ │ └── fleet.yaml │ ├── installation │ │ ├── simple.yaml │ │ └── verify.yaml │ ├── keep-resources │ │ ├── do-not-keep │ │ │ └── gitrepo.yaml │ │ └── keep │ │ │ └── gitrepo.yaml │ ├── metrics │ │ └── service.yaml │ ├── multi-cluster-examples │ │ ├── helm-external.yaml │ │ ├── helm-kustomize.yaml │ │ ├── helm-target-customizations.yaml │ │ ├── helm.yaml │ │ ├── kustomize.yaml │ │ └── manifests.yaml │ ├── multi-cluster │ │ ├── bundle-cm.yaml │ │ ├── bundle-depends-on.yaml │ │ ├── bundle-deployment-labels.yaml │ │ └── bundle-namespace-mapping.yaml │ ├── single-cluster-examples │ │ ├── helm-kustomize.yaml │ │ ├── helm-multi-chart.yaml │ │ ├── helm.yaml │ │ ├── kustomize.yaml │ │ └── manifests.yaml │ ├── single-cluster │ │ ├── delete-namespace │ │ │ └── gitrepo.yaml │ │ ├── driven.yaml │ │ ├── helm-cluster-values.yaml │ │ ├── helm-kustomize-disabled.yaml │ │ ├── helm-oci.yaml │ │ ├── helm-options-disabledns.yaml │ │ ├── helm-options-skip-schema-validation.yaml │ │ ├── helm-verify.yaml │ │ ├── helm-with-auth.yaml │ │ ├── multiple-paths.yaml │ │ ├── ns-labels-target-customization-no-defaults.yaml │ │ ├── ns-labels-target-customization.yaml │ │ ├── release-cleanup │ │ │ ├── bundle-crds.yaml │ │ │ ├── bundle-namespace-update.yaml │ │ │ └── bundle-release-name-update.yaml │ │ ├── release-names.yaml │ │ ├── test-oci.yaml │ │ ├── values-from-configmap.yaml │ │ └── values-from-secret.yaml │ └── status │ │ ├── chart-with-template-vars │ │ ├── Chart.yaml │ │ ├── fleet.yaml │ │ └── templates │ │ │ └── configmap.yaml │ │ └── gitrepo.yaml ├── installation │ ├── suite_test.go │ └── verify_test.go ├── keep-resources │ ├── keep_resources_test.go │ └── suite_test.go ├── metrics │ ├── bundle_test.go │ ├── bundledeployment_test.go │ ├── cluster_test.go │ ├── clustergroup_test.go │ ├── exporter.go │ ├── gitjob_test.go │ ├── gitrepo_test.go │ ├── helmop_test.go │ └── suite_test.go ├── multi-cluster │ ├── bundle_namespace_mapping_test.go │ ├── depends_on_test.go │ ├── installation │ │ ├── agent_test.go │ │ └── suite_test.go │ ├── not_matching_targets_delete_bd_test.go │ └── suite_test.go ├── require-secrets │ ├── gitrepo_test.go │ ├── oci_auth_test.go │ └── suite_test.go ├── single-cluster │ ├── delete_namespaces_test.go │ ├── finalizers_test.go │ ├── gitrepo_polling_disabled_test.go │ ├── gitrepo_test.go │ ├── helm_auth_test.go │ ├── helm_dependencies_test.go │ ├── helm_options_test.go │ ├── helm_verify_test.go │ ├── helmop_test.go │ ├── imagescan_test.go │ ├── oci_registry_test.go │ ├── release_cleanup_test.go │ ├── release_names_test.go │ ├── sharding_test.go │ ├── single_cluster_test.go │ ├── status_test.go │ ├── suite_test.go │ ├── target_customization_test.go │ └── values_from_test.go └── testenv │ ├── env.go │ ├── fail.go │ ├── githelper │ └── git.go │ ├── infra │ ├── cmd │ │ ├── root.go │ │ ├── setup.go │ │ └── teardown.go │ └── main.go │ ├── k8sclient │ └── k8sclient.go │ ├── kubectl │ └── kubectl.go │ ├── path.go │ ├── template.go │ ├── template_test.go │ └── zothelper │ └── zothelper.go ├── generate.go ├── go.mod ├── go.sum ├── integrationtests ├── agent │ ├── adoption_test.go │ ├── assets │ │ ├── deployment-v1.yaml │ │ ├── deployment-v2.yaml │ │ ├── deployment-with-deployment.yaml │ │ └── deployment-with-status.yaml │ ├── bundle_deployment_diffs_test.go │ ├── bundle_deployment_drift_test.go │ ├── bundle_deployment_status_test.go │ ├── dryrun_test.go │ ├── helm_capabilities_test.go │ └── suite_test.go ├── cli │ ├── apply │ │ ├── apply_online_test.go │ │ ├── apply_test.go │ │ ├── helm_repository.go │ │ ├── helm_test.go │ │ ├── suite_test.go │ │ └── targetsfile_test.go │ ├── assets │ │ ├── bundle │ │ │ ├── bundle-all.yaml │ │ │ └── bundle.yaml │ │ ├── bundledeployment │ │ │ ├── bd-only.yaml │ │ │ ├── bd-with-kube-version.yaml │ │ │ ├── bd.yaml │ │ │ └── content.yaml │ │ ├── deps-charts │ │ │ ├── multi-chart │ │ │ │ ├── remote-chart-with-deps │ │ │ │ │ └── fleet.yaml │ │ │ │ ├── simple-with-fleet-yaml-no-deps │ │ │ │ │ ├── Chart.yaml │ │ │ │ │ ├── fleet.yaml │ │ │ │ │ ├── templates │ │ │ │ │ │ └── configmap.yaml │ │ │ │ │ └── values.yaml │ │ │ │ └── simple-with-fleet-yaml │ │ │ │ │ ├── Chart.yaml │ │ │ │ │ ├── fleet.yaml │ │ │ │ │ ├── templates │ │ │ │ │ └── configmap.yaml │ │ │ │ │ └── values.yaml │ │ │ ├── no-fleet-yaml │ │ │ │ ├── Chart.yaml │ │ │ │ ├── templates │ │ │ │ │ └── configmap.yaml │ │ │ │ └── values.yaml │ │ │ ├── remote-chart-with-deps-disabled │ │ │ │ └── fleet.yaml │ │ │ ├── remote-chart-with-deps │ │ │ │ └── fleet.yaml │ │ │ ├── simple-with-fleet-yaml-no-deps │ │ │ │ ├── Chart.yaml │ │ │ │ ├── fleet.yaml │ │ │ │ ├── templates │ │ │ │ │ └── configmap.yaml │ │ │ │ └── values.yaml │ │ │ └── simple-with-fleet-yaml │ │ │ │ ├── Chart.yaml │ │ │ │ ├── fleet.yaml │ │ │ │ ├── templates │ │ │ │ └── configmap.yaml │ │ │ │ └── values.yaml │ │ ├── driven │ │ │ ├── helm │ │ │ │ └── fleet.yaml │ │ │ ├── kustomize │ │ │ │ ├── base │ │ │ │ │ ├── kustomization.yaml │ │ │ │ │ └── secret.yaml │ │ │ │ ├── dev.yaml │ │ │ │ ├── overlays │ │ │ │ │ ├── dev │ │ │ │ │ │ ├── kustomization.yaml │ │ │ │ │ │ └── secret.yaml │ │ │ │ │ └── prod │ │ │ │ │ │ ├── kustomization.yaml │ │ │ │ │ │ └── secret.yaml │ │ │ │ └── prod.yaml │ │ │ └── simple │ │ │ │ ├── deployment.yaml │ │ │ │ └── svc.yaml │ │ ├── driven2 │ │ │ └── kustomize │ │ │ │ ├── base │ │ │ │ ├── kustomization.yaml │ │ │ │ └── secret.yaml │ │ │ │ ├── fleetDev.yaml │ │ │ │ ├── fleetProd.yaml │ │ │ │ └── overlays │ │ │ │ ├── dev │ │ │ │ ├── kustomization.yaml │ │ │ │ └── secret.yaml │ │ │ │ └── prod │ │ │ │ ├── kustomization.yaml │ │ │ │ └── secret.yaml │ │ ├── driven_fleet_yaml_subfolder │ │ │ └── helm │ │ │ │ └── test │ │ │ │ └── fleet.yaml │ │ ├── helm_chart_url │ │ │ └── fleet.yaml │ │ ├── helm_options_disabled │ │ │ ├── configmap.yaml │ │ │ └── fleet.yaml │ │ ├── helm_options_enabled │ │ │ ├── configmap.yaml │ │ │ └── fleet.yaml │ │ ├── helm_options_kustomize │ │ │ ├── fleet.yaml │ │ │ └── kustomization.yaml │ │ ├── helm_path_credentials │ │ │ ├── fleet.yaml │ │ │ └── subfolder │ │ │ │ └── fleet.yaml │ │ ├── helm_repo_url │ │ │ └── fleet.yaml │ │ ├── helmrepository │ │ │ ├── config-chart-0.1.0.tgz │ │ │ ├── config-chart │ │ │ │ ├── Chart.yaml │ │ │ │ ├── templates │ │ │ │ │ └── configmap.yaml │ │ │ │ └── values.yaml │ │ │ ├── deps-chart-1.0.0.tgz │ │ │ ├── deps-chart │ │ │ │ ├── Chart.yaml │ │ │ │ ├── templates │ │ │ │ │ └── configmap.yaml │ │ │ │ └── values.yaml │ │ │ └── index.yaml │ │ ├── keep_resources │ │ │ ├── fleet.yaml │ │ │ └── svc.yaml │ │ ├── labels_update │ │ │ ├── cm.yaml │ │ │ └── fleet.yaml │ │ ├── nested_mixed_two_levels │ │ │ └── nested │ │ │ │ ├── deploymentA │ │ │ │ ├── deployment.yaml │ │ │ │ └── fleet.yaml │ │ │ │ ├── deploymentB │ │ │ │ └── svc.yaml │ │ │ │ ├── deploymentC │ │ │ │ ├── cm.yaml │ │ │ │ └── fleet.yaml │ │ │ │ └── deploymentD │ │ │ │ └── deployment.yaml │ │ ├── nested_multiple │ │ │ ├── README.md │ │ │ ├── deploymentA │ │ │ │ ├── fleet.yaml │ │ │ │ └── svc │ │ │ │ │ └── svc.yaml │ │ │ ├── deploymentB │ │ │ │ ├── fleet.yaml │ │ │ │ └── svc │ │ │ │ │ └── nested │ │ │ │ │ └── svc.yaml │ │ │ └── deploymentC │ │ │ │ ├── deployment.yaml │ │ │ │ └── fleet.yaml │ │ ├── nested_simple │ │ │ ├── README.md │ │ │ └── simple │ │ │ │ ├── deployment.yaml │ │ │ │ └── svc.yaml │ │ ├── nested_two_levels │ │ │ └── nested │ │ │ │ ├── deployment │ │ │ │ └── deployment.yaml │ │ │ │ └── svc │ │ │ │ └── svc.yaml │ │ ├── simple │ │ │ ├── deployment.yaml │ │ │ └── svc.yaml │ │ └── targets │ │ │ ├── override │ │ │ ├── cm.yaml │ │ │ └── fleet.yaml │ │ │ └── simple │ │ │ ├── cm.yaml │ │ │ └── fleet.yaml │ ├── cleanup │ │ ├── cleanup_clusterregistrations_test.go │ │ ├── cleanup_jobs_test.go │ │ └── suite_test.go │ ├── deploy │ │ ├── deploy_test.go │ │ └── suite_test.go │ ├── helpers.go │ └── target │ │ ├── suite_test.go │ │ └── target_test.go ├── controller │ ├── bundle │ │ ├── bundle_helm_test.go │ │ ├── bundle_labels_test.go │ │ ├── bundle_targets_test.go │ │ ├── status_test.go │ │ └── suite_test.go │ ├── bundledeployment │ │ ├── status_test.go │ │ └── suite_test.go │ ├── cluster │ │ ├── status_test.go │ │ └── suite_test.go │ └── clustergroup │ │ ├── status_test.go │ │ └── suite_test.go ├── gitcloner │ ├── assets │ │ ├── gitserver │ │ │ ├── git │ │ │ │ ├── .gitconfig │ │ │ │ └── .ssh │ │ │ │ │ └── environment │ │ │ ├── gogs │ │ │ │ ├── conf │ │ │ │ │ └── app.ini │ │ │ │ └── data │ │ │ │ │ └── gogs.db │ │ │ └── ssh │ │ │ │ └── ssh_host_ecdsa_key.pub │ │ ├── gogs │ │ │ └── password │ │ └── repo │ │ │ └── README.md │ ├── clone_test.go │ └── suite_test.go ├── gitjob │ ├── assets │ │ └── gitserver │ │ │ ├── git │ │ │ ├── .gitconfig │ │ │ └── .ssh │ │ │ │ └── environment │ │ │ ├── gogs │ │ │ ├── conf │ │ │ │ └── app.ini │ │ │ └── data │ │ │ │ └── gogs.db │ │ │ └── ssh │ │ │ ├── ssh_host_ecdsa_key │ │ │ └── ssh_host_ecdsa_key.pub │ ├── controller │ │ ├── controller_test.go │ │ ├── gitrepo_test.go │ │ ├── status_test.go │ │ └── suite_test.go │ └── git │ │ └── git_test.go ├── helmops │ └── controller │ │ ├── assets │ │ ├── root.crt │ │ ├── server.crt │ │ └── server.key │ │ ├── controller_test.go │ │ ├── status_test.go │ │ └── suite_test.go └── utils │ ├── envtest.go │ ├── helpers.go │ ├── kubeconfig.go │ └── namespace.go ├── internal ├── bundlereader │ ├── auth.go │ ├── auth_test.go │ ├── charturl.go │ ├── helm.go │ ├── helm_test.go │ ├── loaddirectory.go │ ├── loaddirectory_test.go │ ├── read.go │ ├── resources.go │ ├── resources_test.go │ └── style.go ├── client │ └── client.go ├── cmd │ ├── agent │ │ ├── clusterstatus.go │ │ ├── clusterstatus │ │ │ ├── suite_test.go │ │ │ ├── ticker.go │ │ │ └── ticker_test.go │ │ ├── controller │ │ │ ├── bundledeployment_controller.go │ │ │ └── drift_controller.go │ │ ├── deployer │ │ │ ├── cleanup │ │ │ │ ├── cleanup.go │ │ │ │ └── cleanup_test.go │ │ │ ├── data │ │ │ │ ├── convert │ │ │ │ │ └── convert.go │ │ │ │ ├── data.go │ │ │ │ └── values.go │ │ │ ├── deployer.go │ │ │ ├── deployer_test.go │ │ │ ├── desiredset │ │ │ │ ├── clients.go │ │ │ │ ├── desiredset.go │ │ │ │ ├── desiredset_apply.go │ │ │ │ ├── desiredset_compare.go │ │ │ │ ├── desiredset_process.go │ │ │ │ ├── desiredset_process_test.go │ │ │ │ ├── diff.go │ │ │ │ ├── diff_test.go │ │ │ │ ├── plan.go │ │ │ │ └── style.go │ │ │ ├── driftdetect │ │ │ │ └── driftdetect.go │ │ │ ├── internal │ │ │ │ ├── diff │ │ │ │ │ ├── diff.go │ │ │ │ │ ├── diff_options.go │ │ │ │ │ ├── json │ │ │ │ │ │ └── json.go │ │ │ │ │ ├── kubernetes_vendor │ │ │ │ │ │ └── pkg │ │ │ │ │ │ │ ├── api │ │ │ │ │ │ │ └── v1 │ │ │ │ │ │ │ │ └── endpoints │ │ │ │ │ │ │ │ └── util.go │ │ │ │ │ │ │ └── util │ │ │ │ │ │ │ └── hash │ │ │ │ │ │ │ └── hash.go │ │ │ │ │ └── scheme │ │ │ │ │ │ └── scheme.go │ │ │ │ ├── normalizers │ │ │ │ │ ├── diff_normalizer.go │ │ │ │ │ ├── glob │ │ │ │ │ │ └── glob.go │ │ │ │ │ └── knowntypes_normalizer.go │ │ │ │ └── resource │ │ │ │ │ └── ignore.go │ │ │ ├── kv │ │ │ │ └── split.go │ │ │ ├── merr │ │ │ │ └── error.go │ │ │ ├── monitor │ │ │ │ ├── condition.go │ │ │ │ ├── conditions_test.go │ │ │ │ ├── updatestatus.go │ │ │ │ └── updatestatus_test.go │ │ │ ├── normalizers │ │ │ │ ├── jsonpatch.go │ │ │ │ ├── mutatingwebhook.go │ │ │ │ ├── norm.go │ │ │ │ ├── status.go │ │ │ │ ├── status_test.go │ │ │ │ └── validatingwebhook.go │ │ │ ├── objectset │ │ │ │ ├── objectset.go │ │ │ │ ├── objectset_test.go │ │ │ │ └── stringset.go │ │ │ └── summary │ │ │ │ ├── cattletypes.go │ │ │ │ ├── condition.go │ │ │ │ ├── coretypes.go │ │ │ │ ├── suite_test.go │ │ │ │ ├── summarize.go │ │ │ │ ├── summarized.go │ │ │ │ ├── summarizers.go │ │ │ │ └── summarizers_test.go │ │ ├── globals │ │ │ └── globals.go │ │ ├── operator.go │ │ ├── register.go │ │ ├── register │ │ │ ├── register.go │ │ │ └── register_test.go │ │ ├── root.go │ │ └── trigger │ │ │ └── watcher.go │ ├── builder.go │ ├── cli │ │ ├── apply.go │ │ ├── apply │ │ │ └── apply.go │ │ ├── apply_test.go │ │ ├── cleanup.go │ │ ├── cleanup │ │ │ └── cleanup.go │ │ ├── deploy.go │ │ ├── gitcloner │ │ │ ├── cloner.go │ │ │ ├── cloner_test.go │ │ │ ├── cmd.go │ │ │ └── cmd_test.go │ │ ├── match │ │ │ └── match.go │ │ ├── root.go │ │ ├── target.go │ │ ├── test.go │ │ └── writer │ │ │ └── writer.go │ ├── controller │ │ ├── agentmanagement │ │ │ ├── agent │ │ │ │ ├── agent.go │ │ │ │ ├── config.go │ │ │ │ ├── manifest.go │ │ │ │ └── manifest_test.go │ │ │ ├── connection │ │ │ │ └── connection.go │ │ │ ├── controllers │ │ │ │ ├── bootstrap │ │ │ │ │ └── bootstrap.go │ │ │ │ ├── cluster │ │ │ │ │ ├── controller.go │ │ │ │ │ ├── import.go │ │ │ │ │ └── import_test.go │ │ │ │ ├── clusterregistration │ │ │ │ │ ├── controller.go │ │ │ │ │ ├── controller_test.go │ │ │ │ │ └── suite_test.go │ │ │ │ ├── clusterregistrationtoken │ │ │ │ │ └── handler.go │ │ │ │ ├── config │ │ │ │ │ └── controller.go │ │ │ │ ├── controllers.go │ │ │ │ ├── manageagent │ │ │ │ │ ├── manageagent.go │ │ │ │ │ └── manageagent_test.go │ │ │ │ └── resources │ │ │ │ │ └── data.go │ │ │ ├── root.go │ │ │ ├── secret │ │ │ │ └── util.go │ │ │ └── start.go │ │ ├── cleanup │ │ │ ├── controllers │ │ │ │ ├── cleanup │ │ │ │ │ └── controller.go │ │ │ │ └── controllers.go │ │ │ ├── root.go │ │ │ └── start.go │ │ ├── errorutil │ │ │ └── errorutil.go │ │ ├── finalize │ │ │ └── finalize.go │ │ ├── gitops │ │ │ ├── operator.go │ │ │ └── reconciler │ │ │ │ ├── gitjob.go │ │ │ │ ├── gitjob_controller.go │ │ │ │ ├── gitjob_test.go │ │ │ │ ├── predicate.go │ │ │ │ ├── rbac.go │ │ │ │ ├── restrictions.go │ │ │ │ ├── restrictions_test.go │ │ │ │ ├── status_controller.go │ │ │ │ ├── suite_test.go │ │ │ │ └── targetsyaml.go │ │ ├── helmops │ │ │ ├── operator.go │ │ │ └── reconciler │ │ │ │ ├── helmop_controller.go │ │ │ │ ├── helmop_controller_test.go │ │ │ │ └── helmop_status.go │ │ ├── imagescan │ │ │ ├── gitcommit_job.go │ │ │ ├── tagscan_job.go │ │ │ ├── tagscan_job_test.go │ │ │ └── update │ │ │ │ ├── README.md │ │ │ │ ├── filereader.go │ │ │ │ ├── filter.go │ │ │ │ ├── result.go │ │ │ │ └── setters.go │ │ ├── namespace │ │ │ └── util.go │ │ ├── operator.go │ │ ├── options │ │ │ └── calculate.go │ │ ├── reconciler │ │ │ ├── bundle_controller.go │ │ │ ├── bundle_status.go │ │ │ ├── bundledeployment_controller.go │ │ │ ├── cluster_controller.go │ │ │ ├── clustergroup_controller.go │ │ │ ├── config_controller.go │ │ │ └── imagescan_controller.go │ │ ├── root.go │ │ ├── status │ │ │ └── status.go │ │ ├── summary │ │ │ ├── summary.go │ │ │ └── summary_test.go │ │ └── target │ │ │ ├── builder.go │ │ │ ├── mapping.go │ │ │ ├── matcher │ │ │ ├── bundlematch.go │ │ │ └── clustermatcher.go │ │ │ ├── partition.go │ │ │ ├── query.go │ │ │ ├── rollout.go │ │ │ ├── status.go │ │ │ ├── target.go │ │ │ └── target_test.go │ ├── debug.go │ └── options.go ├── config │ ├── config.go │ ├── config_test.go │ ├── overrides.go │ └── suite_test.go ├── content │ └── helpers.go ├── fleetyaml │ ├── fleetyaml.go │ └── fleetyaml_test.go ├── helmdeployer │ ├── capabilities.go │ ├── delete.go │ ├── deployer.go │ ├── helmcache │ │ ├── secret.go │ │ └── secret_test.go │ ├── history.go │ ├── impersonate.go │ ├── install.go │ ├── install_test.go │ ├── kustomize │ │ └── kustomize.go │ ├── list.go │ ├── list_test.go │ ├── postrender.go │ ├── postrender_test.go │ ├── rawyaml │ │ └── resources.go │ ├── render │ │ ├── helm.go │ │ └── patch │ │ │ └── patch.go │ ├── rollback.go │ └── template.go ├── helmupdater │ ├── helmupdater.go │ └── helmupdater_test.go ├── helmvalues │ ├── extract.go │ ├── extract_test.go │ ├── hash.go │ ├── hash_test.go │ └── set.go ├── manifest │ ├── lookup.go │ ├── manifest.go │ ├── output.go │ ├── store.go │ └── store_test.go ├── metrics │ ├── bundle_metrics.go │ ├── bundledeployment_metrics.go │ ├── cluster_metrics.go │ ├── clustergroup_metrics.go │ ├── gitrepo_metrics.go │ ├── helm_metrics.go │ └── metrics.go ├── mocks │ ├── client_mock.go │ ├── eventrecorder_mock.go │ ├── helm_deployer_mock.go │ └── orastarget_mock.go ├── names │ ├── keyhash.go │ ├── name.go │ ├── name_test.go │ ├── safeconcat.go │ ├── safeconcat_test.go │ └── suite_test.go ├── ocistorage │ ├── ociwrapper.go │ ├── ociwrapper_test.go │ ├── secret.go │ ├── secret_test.go │ └── suite_test.go ├── registration │ └── secret.go ├── resourcestatus │ ├── resourcekey.go │ └── resourcekey_test.go └── ssh │ ├── knownhosts.go │ ├── knownhosts_test.go │ ├── url.go │ └── url_test.go ├── package ├── Dockerfile ├── Dockerfile.agent └── log.sh ├── pkg ├── apis │ ├── fleet.cattle.io │ │ └── v1alpha1 │ │ │ ├── bundle_types.go │ │ │ ├── bundledeployment_types.go │ │ │ ├── bundlenamespacemapping_types.go │ │ │ ├── cluster_types.go │ │ │ ├── clustergroup_types.go │ │ │ ├── clusterregistration_types.go │ │ │ ├── clusterregistrationtoken_types.go │ │ │ ├── content_types.go │ │ │ ├── doc.go │ │ │ ├── fleetyaml.go │ │ │ ├── generic_map.go │ │ │ ├── generic_map_test.go │ │ │ ├── gitrepo_types.go │ │ │ ├── gitreporestriction_types.go │ │ │ ├── groupversion_info.go │ │ │ ├── helmop_types.go │ │ │ ├── imagescan_types.go │ │ │ ├── resource_types.go │ │ │ ├── status.go │ │ │ ├── summary │ │ │ └── summary.go │ │ │ └── zz_generated.deepcopy.go │ ├── go.mod │ ├── go.sum │ └── internal │ │ └── scheme.go ├── cert │ ├── cabundle.go │ └── cabundle_test.go ├── durations │ └── durations.go ├── event │ └── event.go ├── generated │ └── controllers │ │ └── fleet.cattle.io │ │ ├── factory.go │ │ ├── interface.go │ │ └── v1alpha1 │ │ ├── bundle.go │ │ ├── bundledeployment.go │ │ ├── bundlenamespacemapping.go │ │ ├── cluster.go │ │ ├── clustergroup.go │ │ ├── clusterregistration.go │ │ ├── clusterregistrationtoken.go │ │ ├── content.go │ │ ├── gitrepo.go │ │ ├── gitreporestriction.go │ │ ├── helmop.go │ │ ├── imagescan.go │ │ └── interface.go ├── git-urls │ ├── urls.go │ └── urls_test.go ├── git │ ├── fetch.go │ ├── fetch_test.go │ ├── mocks │ │ └── fetch_mock.go │ ├── netutils.go │ ├── netutils_test.go │ ├── remote.go │ ├── remote_test.go │ ├── suite_test.go │ ├── validate.go │ ├── validate_test.go │ ├── vendor.go │ └── vendor_test.go ├── sharding │ └── sharding.go ├── version │ └── version.go └── webhook │ ├── parser.go │ ├── parser_test.go │ ├── webhook.go │ └── webhook_test.go └── updatecli ├── updatecli.d ├── installation.yaml └── known-hosts.yaml └── values.d └── scm.yaml /.dockerignore: -------------------------------------------------------------------------------- 1 | ./.dapper 2 | ./.cache 3 | ./dist 4 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/2-feature-request.yml: -------------------------------------------------------------------------------- 1 | name: 'Feature Request' 2 | labels: ['kind/enhancement','area/fleet'] 3 | description: Create a feature request to help Fleet to improve 4 | title: 'Feature Request: ' 5 | body: 6 | - type: textarea 7 | attributes: 8 | label: Is your feature request related to a problem? 9 | description: A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 10 | validations: 11 | required: false 12 | - type: textarea 13 | attributes: 14 | label: Solution you'd like 15 | description: A clear and concise description of what you want to happen 16 | validations: 17 | required: false 18 | - type: textarea 19 | attributes: 20 | label: Alternatives you've considered 21 | description: A clear and concise description of any alternative solutions or features you've considered 22 | validations: 23 | required: false 24 | - type: textarea 25 | attributes: 26 | label: Anything else? 27 | description: | 28 | Links? References? Screenshots? Anything that will give us more context about the issue you are encountering! 29 | 30 | Tip: You can attach images or log files by clicking this area to highlight it and then dragging files in. 31 | validations: 32 | required: false 33 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | blank_issues_enabled: true 2 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | 2 | Refers to #XXX 3 | 4 | 5 | 6 | 7 | 10 | 11 | ## Additional Information 12 | 13 | ### Checklist 14 | 15 | - [ ] I have updated the documentation via a pull request in the 16 | [fleet-docs](https://github.com/rancher/fleet-docs) repository. 17 | -------------------------------------------------------------------------------- /.github/renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 3 | "extends": [ 4 | "github>rancher/renovate-config//rancher-main#release" 5 | ], 6 | "baseBranches": [ 7 | "main", 8 | "release/v0.12", 9 | "release/v0.11", 10 | "release/v0.10" 11 | ], 12 | "ignorePaths": [ 13 | "**/assets/**" 14 | ], 15 | "packageRules": [ 16 | { 17 | "matchBaseBranches": ["release/v0.12"], 18 | "extends": ["github>rancher/renovate-config//rancher-2.11#release"] 19 | }, 20 | { 21 | "matchBaseBranches": ["release/v0.11"], 22 | "extends": ["github>rancher/renovate-config//rancher-2.10#release"] 23 | }, 24 | { 25 | "matchBaseBranches": ["release/v0.10"], 26 | "extends": ["github>rancher/renovate-config//rancher-2.9#release"] 27 | } 28 | ] 29 | } -------------------------------------------------------------------------------- /.github/scripts/README.md: -------------------------------------------------------------------------------- 1 | The assets in this folder are used by the Github CI. 2 | Please do not modify them to accomodate your local workflow, this often ends up in hard to read and reason about scripts. 3 | -------------------------------------------------------------------------------- /.github/scripts/build-fleet-binaries.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Description: build fleet binary and image with debug flags 3 | 4 | set -euxo pipefail 5 | 6 | export GOARCH="${GOARCH:-amd64}" 7 | export CGO_ENABLED=0 8 | export GOOS=linux 9 | 10 | # fleet 11 | go build -gcflags='all=-N -l' -o bin/fleetcontroller-linux-"$GOARCH" ./cmd/fleetcontroller 12 | 13 | # fleet agent 14 | go build -gcflags='all=-N -l' -o "bin/fleet-linux-$GOARCH" ./cmd/fleetcli 15 | go build -gcflags='all=-N -l' -o "bin/fleetagent-linux-$GOARCH" ./cmd/fleetagent 16 | -------------------------------------------------------------------------------- /.github/scripts/build-fleet-images.sh: -------------------------------------------------------------------------------- 1 | export GOARCH="${GOARCH:-amd64}" 2 | 3 | docker build -f package/Dockerfile -t rancher/fleet:dev --build-arg="ARCH=$GOARCH" . 4 | docker build -f package/Dockerfile.agent -t rancher/fleet-agent:dev --build-arg="ARCH=$GOARCH" . 5 | -------------------------------------------------------------------------------- /.github/scripts/check-for-auto-generated-changes.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -ue 3 | 4 | go generate 5 | ginkgo unfocus 6 | 7 | if [ -n "$(git status --porcelain)" ]; then 8 | echo "Generated files have either been changed manually or were not updated.\n" 9 | 10 | echo "The following generated files did differ after regeneration:" 11 | git status --porcelain 12 | exit 1 13 | fi 14 | -------------------------------------------------------------------------------- /.github/scripts/check-for-go-mod-changes.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -ue 3 | 4 | find . -name 'go.mod' -execdir go mod tidy \; 5 | 6 | if [ -n "$(git status --porcelain)" ]; then 7 | echo "go.mod is not up to date. Please 'run go mod tidy' and commit the changes." 8 | echo 9 | echo "The following go files did differ after tidying them:" 10 | git status --porcelain 11 | exit 1 12 | fi -------------------------------------------------------------------------------- /.github/scripts/create-pr.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | target_branch=$1 4 | new_fleet=$2 5 | new_chart=$3 6 | repo=$4 7 | 8 | # Check if the environment variable is set 9 | if [ -z "$GITHUB_TOKEN" ]; then 10 | echo "Environment variable GITHUB_TOKEN is not set." 11 | exit 1 12 | fi 13 | 14 | # Configure git login 15 | git config --local --unset http.https://github.com/.extraheader ^AUTHORIZATION: 16 | gh auth setup-git 17 | 18 | # Create and push new branch 19 | git remote add fork "https://github.com/rancherbot/$repo" 20 | BRANCH_NAME="fleet-$(date +%s)" 21 | git checkout -b "$BRANCH_NAME" 22 | git push fork "$BRANCH_NAME" 23 | 24 | # Create a pull request 25 | gh pr create --title "[${target_branch}] fleet ${new_chart}+up${new_fleet} update" \ 26 | --body "Update Fleet to v${new_fleet}"$'\n\n'"Changelog: https://github.com/rancher/fleet/releases/tag/v${new_fleet}" \ 27 | --base "${target_branch}" \ 28 | --repo "rancher/$repo" --head "rancherbot:$BRANCH_NAME" -------------------------------------------------------------------------------- /.github/scripts/create-secrets.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | dir=${1-'FleetCI-RootCA'} 4 | 5 | # Create secret with certs, needed by test git server 6 | kubectl -n default create secret generic git-server-certs \ 7 | --from-file=./"$dir"/helm.crt \ 8 | --from-file=./"$dir"/helm.key 9 | 10 | # Create cattle-system namespace 11 | kubectl create ns cattle-system 12 | 13 | # Create Rancher CA bundle secret 14 | kubectl -n cattle-system create secret generic tls-ca-additional --from-file=ca-additional.pem=./"$dir"/root.crt 15 | -------------------------------------------------------------------------------- /.github/scripts/deploy-fleet-latest-release.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euxo pipefail 4 | 5 | assets=$(curl -s https://api.github.com/repos/rancher/fleet/releases | jq -r "sort_by(.tag_name) | [ .[] | select(.draft | not) ] | .[-1].assets") 6 | crd_url=$(echo "$assets" | jq -r '.[] | select(.name | test("fleet-crd-.*.tgz")) | .browser_download_url') 7 | controller_url=$(echo "$assets" | jq -r '.[] | select(.name | test("fleet-\\d.*.tgz")) | .browser_download_url') 8 | helm -n cattle-fleet-system upgrade --install --create-namespace --wait fleet-crd "$crd_url" 9 | helm -n cattle-fleet-system upgrade --install --create-namespace --wait fleet "$controller_url" 10 | 11 | # wait for controller and agent rollout 12 | kubectl -n cattle-fleet-system rollout status deploy/fleet-controller 13 | { grep -E -q -m 1 "fleet-agent-local.*1/1"; kill $!; } < <(kubectl get bundles -n fleet-local -w) 14 | kubectl -n cattle-fleet-system rollout status deploy/fleet-agent 15 | -------------------------------------------------------------------------------- /.github/scripts/k3d-import-retry.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -ux 4 | 5 | i=1 6 | # "Tool" mode doesn't exit with 1 in case of an error, direct mode may freeze. 7 | # Run tool mode and try to detect the error message in its output: 8 | while k3d image import "$@" 2>&1 | tee out.$i | grep -iq "failed to import"; do 9 | cat out.$i 10 | i=$((i + 1)) 11 | if (( i > 3 )); then 12 | echo "failed to import images" 13 | exit 1 14 | fi 15 | echo "retrying... $i" 16 | sleep 1 17 | done 18 | -------------------------------------------------------------------------------- /.github/scripts/label-downstream-cluster.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euxo pipefail 4 | 5 | ns=${FLEET_E2E_NS_DOWNSTREAM-fleet-default} 6 | 7 | { grep -q -m 1 -e "1/1"; kill $!; } < <(kubectl get clusters.fleet.cattle.io -n "$ns" -w) 8 | name=$(kubectl get clusters.fleet.cattle.io -o=jsonpath='{.items[0].metadata.name}' -n "$ns") 9 | kubectl patch clusters.fleet.cattle.io -n "$ns" "$name" --type=json -p '[{"op": "add", "path": "/metadata/labels/env", "value": "test" }]' 10 | -------------------------------------------------------------------------------- /.github/scripts/run-integration-tests-group1.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euxo pipefail 4 | 5 | SETUP_ENVTEST_VER=${SETUP_ENVTEST_VER-v0.0.0-20250218120612-6f6111124902} 6 | ENVTEST_K8S_VERSION=${ENVTEST_K8S_VERSION-1.32} 7 | 8 | # install and prepare setup-envtest 9 | go install sigs.k8s.io/controller-runtime/tools/setup-envtest@"$SETUP_ENVTEST_VER" 10 | KUBEBUILDER_ASSETS=$(setup-envtest use --use-env -p path "$ENVTEST_K8S_VERSION") 11 | export KUBEBUILDER_ASSETS 12 | 13 | # Group 1: Run specific packages (adjust these based on execution time analysis) 14 | ginkgo --github-output --trace\ 15 | ./integrationtests/agent/... \ 16 | ./integrationtests/bundlereader/... \ 17 | ./integrationtests/cli/... \ 18 | ./integrationtests/controller/... -------------------------------------------------------------------------------- /.github/scripts/run-integration-tests-group2.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euxo pipefail 4 | 5 | SETUP_ENVTEST_VER=${SETUP_ENVTEST_VER-v0.0.0-20250218120612-6f6111124902} 6 | ENVTEST_K8S_VERSION=${ENVTEST_K8S_VERSION-1.32} 7 | 8 | # install and prepare setup-envtest 9 | go install sigs.k8s.io/controller-runtime/tools/setup-envtest@"$SETUP_ENVTEST_VER" 10 | KUBEBUILDER_ASSETS=$(setup-envtest use --use-env -p path "$ENVTEST_K8S_VERSION") 11 | export KUBEBUILDER_ASSETS 12 | 13 | # Group 2: Run everything else - this will automatically include newly added directories 14 | find ./integrationtests -type d -not -path '*/.git*' \ 15 | -not -path './integrationtests/agent*' \ 16 | -not -path './integrationtests/bundlereader*' \ 17 | -not -path './integrationtests/cli*' \ 18 | -not -path './integrationtests/controller*' | \ 19 | xargs ginkgo --github-output --trace -------------------------------------------------------------------------------- /.github/scripts/run-integration-tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euxo pipefail 4 | 5 | SETUP_ENVTEST_VER=${SETUP_ENVTEST_VER-v0.0.0-20250218120612-6f6111124902} 6 | ENVTEST_K8S_VERSION=${ENVTEST_K8S_VERSION-1.32} 7 | 8 | # install and prepare setup-envtest 9 | go install sigs.k8s.io/controller-runtime/tools/setup-envtest@"$SETUP_ENVTEST_VER" 10 | KUBEBUILDER_ASSETS=$(setup-envtest use --use-env -p path "$ENVTEST_K8S_VERSION") 11 | export KUBEBUILDER_ASSETS 12 | 13 | # run integration tests 14 | ginkgo --github-output --trace ./integrationtests/... 15 | -------------------------------------------------------------------------------- /.github/scripts/update_known_hosts_configmap.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Write a new definition of the `known-hosts` config map in the fleet chart based on updated entries for each provider. 4 | # Entries are obtained through `ssh-keyscan` and sorted lexically to preserve ordering and hence prevent false 5 | # positives. 6 | providers=( 7 | "bitbucket.org" 8 | "github.com" 9 | "gitlab.com" 10 | "ssh.dev.azure.com" 11 | "vs-ssh.visualstudio.com" 12 | ) 13 | 14 | dst=charts/fleet/templates/configmap_known_hosts.yaml 15 | echo "apiVersion: v1" > "$dst" 16 | echo "kind: ConfigMap" >> "$dst" 17 | echo "metadata:" >> "$dst" 18 | echo " name: known-hosts" >> "$dst" 19 | echo "data:" >> "$dst" 20 | echo " known_hosts: |" >> "$dst" 21 | 22 | for prov in "${providers[@]}"; do 23 | ssh-keyscan "$prov" | grep "^$prov" | sort -b | sed 's/^/ /' >> "$dst" 24 | done 25 | -------------------------------------------------------------------------------- /.github/scripts/wait-for-loadbalancer.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euxo pipefail 4 | 5 | # wait for Rancher to create the ingress before waiting for the loadbalancer 6 | while ! kubectl get ingress -n cattle-system rancher; do 7 | sleep 1 8 | done 9 | 10 | # wait for loadBalancer IPs 11 | { grep -q -m 1 -e ".*"; kill $!; } < <(kubectl get ingress -n cattle-system rancher -o 'go-template={{range .status.loadBalancer.ingress}}{{.ip}}{{"\n"}}{{end}}' -w) 12 | # wait for certificate 13 | { grep -q -m 1 -e "tls-rancher-ingress.*True"; kill $!; } < <(kubectl get certs -n cattle-system -w) 14 | -------------------------------------------------------------------------------- /.github/workflows/add_issue_to_project.yml: -------------------------------------------------------------------------------- 1 | name: Add issue to project board 2 | # This action could be removed when editing workflow project feature will be enabled. 3 | # Still notify with "Coming soon" flag. 4 | # Until now, that's not possible to specify a column, see https://github.com/actions/add-to-project/issues/71 5 | on: 6 | issues: 7 | types: 8 | - opened 9 | - transferred 10 | 11 | jobs: 12 | add-to-project: 13 | name: Add issue to project 14 | runs-on: ubuntu-latest 15 | if: > 16 | github.repository == 'rancher/fleet' 17 | steps: 18 | - uses: actions/add-to-project@main 19 | with: 20 | project-url: https://github.com/orgs/rancher/projects/12 21 | github-token: ${{ secrets.ADD_TO_PROJECT_PAT }} 22 | -------------------------------------------------------------------------------- /.github/workflows/check-changes.yml: -------------------------------------------------------------------------------- 1 | name: Check for unallowed changes 2 | 3 | on: 4 | pull_request: 5 | 6 | env: 7 | MAIN_BRANCH: origin/main 8 | GOARCH: amd64 9 | CGO_ENABLED: 0 10 | 11 | jobs: 12 | check-changes: 13 | runs-on: ubuntu-latest 14 | 15 | steps: 16 | - 17 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 18 | with: 19 | fetch-depth: 0 20 | - 21 | uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5 22 | with: 23 | go-version-file: 'go.mod' 24 | check-latest: true 25 | - 26 | name: Install Ginkgo CLI 27 | run: go install github.com/onsi/ginkgo/v2/ginkgo 28 | - 29 | name: go.mod 30 | run: ./.github/scripts/check-for-go-mod-changes.sh 31 | - 32 | name: generate.go 33 | run: ./.github/scripts/check-for-auto-generated-changes.sh 34 | -------------------------------------------------------------------------------- /.github/workflows/golangci-lint.yml: -------------------------------------------------------------------------------- 1 | name: golangci-lint 2 | 3 | on: 4 | schedule: 5 | - cron: '0 5 * * *' 6 | pull_request: 7 | 8 | jobs: 9 | golangci: 10 | name: golangci-lint 11 | runs-on: ubuntu-latest 12 | 13 | steps: 14 | - name: Checkout 15 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 16 | with: 17 | submodules: recursive 18 | 19 | - name: Setup Go 20 | uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5 21 | with: 22 | go-version-file: 'go.mod' 23 | check-latest: true 24 | cache: false 25 | 26 | - name: Generate Golang 27 | run: | 28 | export PATH=$PATH:/home/runner/go/bin/ 29 | 30 | - name: golangci-lint 31 | uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0 32 | with: 33 | # Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version. 34 | version: v2.1.6 35 | 36 | args: --timeout=10m --config=.golangci.json 37 | 38 | # Optional: show only new issues if it's a pull request. The default value is `false`. 39 | # The condition sets this to true for PR events. 40 | only-new-issues: "${{ github.event_name == 'pull_request'}}" 41 | 42 | skip-cache: true 43 | -------------------------------------------------------------------------------- /.github/workflows/renovate-vault.yml: -------------------------------------------------------------------------------- 1 | name: Renovate 2 | on: 3 | workflow_dispatch: 4 | inputs: 5 | logLevel: 6 | description: "Override default log level" 7 | required: false 8 | default: "info" 9 | type: string 10 | overrideSchedule: 11 | description: "Override all schedules" 12 | required: false 13 | default: "false" 14 | type: string 15 | # Run twice in the early morning (UTC) for initial and follow up steps (create pull request and merge) 16 | schedule: 17 | - cron: '30 4,6 * * *' 18 | 19 | permissions: 20 | contents: read 21 | id-token: write 22 | 23 | jobs: 24 | call-workflow: 25 | uses: rancher/renovate-config/.github/workflows/renovate-vault.yml@release 26 | with: 27 | logLevel: ${{ inputs.logLevel || 'info' }} 28 | overrideSchedule: ${{ github.event.inputs.overrideSchedule == 'true' && '{''schedule'':null}' || '' }} 29 | secrets: inherit 30 | -------------------------------------------------------------------------------- /.github/workflows/typos.yaml: -------------------------------------------------------------------------------- 1 | name: Test Typos 2 | on: [pull_request] 3 | 4 | jobs: 5 | run: 6 | name: Spell Check with Typos 7 | runs-on: ubuntu-latest 8 | 9 | steps: 10 | - name: Checkout Repository 11 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 12 | 13 | - name: Check spelling of file.txt 14 | uses: crate-ci/typos@master 15 | 16 | -------------------------------------------------------------------------------- /.github/workflows/updatecli.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: "Updatecli: Dependency Management" 3 | 4 | on: 5 | release: 6 | types: [published] 7 | workflow_dispatch: 8 | schedule: 9 | # * is a special character in YAML so you have to quote this string 10 | # Run once a day 11 | - cron: '0 1 * * *' 12 | 13 | permissions: 14 | contents: write 15 | pull-requests: write 16 | 17 | jobs: 18 | updatecli: 19 | runs-on: ubuntu-latest 20 | 21 | if: github.ref == 'refs/heads/main' 22 | steps: 23 | - name: Checkout 24 | uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 25 | 26 | - name: Install Updatecli in the runner 27 | uses: updatecli/updatecli-action@v2 28 | 29 | - name: Apply 30 | run: "updatecli apply --config ./updatecli/updatecli.d --values ./updatecli/values.d/scm.yaml" 31 | env: 32 | UPDATECLI_GITHUB_ACTOR: ${{ github.actor }} 33 | UPDATECLI_GITHUB_TOKEN: ${{ secrets.ADD_TO_PROJECT_PAT }} 34 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /build 2 | /.cache 3 | /bin 4 | /dist 5 | *.swp 6 | .idea 7 | *.DS_Store 8 | /fleet 9 | /.vscode 10 | docs/fleet-agent/ 11 | docs/fleet-cli/ 12 | docs/fleet-controller/ 13 | e2e/testenv/infra/infra 14 | *.tgz 15 | ^fleet$ 16 | FleetCI-RootCA 17 | .envrc 18 | env.multi-cluster 19 | env.single-cluster 20 | /fossa 21 | benchmarks/db 22 | -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | # Order is important. The last matching pattern has the most precedence. 2 | 3 | * @rancher/fleet 4 | -------------------------------------------------------------------------------- /_typos.toml: -------------------------------------------------------------------------------- 1 | [files] 2 | extend-exclude = [ 3 | "go.mod", 4 | "go.sum", 5 | "ssh_host_ecdsa_key", 6 | "cloner_test.go", 7 | "netutils_test.go", 8 | "pkg/apis/internal/scheme.go", 9 | ] 10 | 11 | [default.extend-identifiers] 12 | "passin" = "passin" 13 | "ANDed" = "ANDed" 14 | "HoTWPQxTJ5dIY31" = "HoTWPQxTJ5dIY31" 15 | -------------------------------------------------------------------------------- /benchmarks/assets/create-1-gitrepo-50-bundle/gitrepo.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: fleet.cattle.io/v1alpha1 2 | kind: GitRepo 3 | metadata: 4 | name: bm-1-gitrepo-50-bundle 5 | spec: 6 | repo: https://github.com/rancher/fleet-test-data 7 | branch: master 8 | paths: 9 | - benchmarks/create-1-gitrepo-50-bundle 10 | targetNamespace: bm-1-gitrepo-50-bundle 11 | targets: 12 | - clusterSelector: 13 | matchLabels: 14 | fleet.cattle.io/benchmark: "true" 15 | -------------------------------------------------------------------------------- /benchmarks/cmd/json.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/spf13/cobra" 7 | ) 8 | 9 | var jsonCmd = &cobra.Command{ 10 | Use: "json", 11 | Short: "print report as JSON", 12 | RunE: func(cmd *cobra.Command, args []string) error { 13 | sample, err := loadSampleFile(input) 14 | if err != nil { 15 | return err 16 | } 17 | 18 | // clean up structs for export 19 | type row struct { 20 | Value string `json:"value,omitempty"` 21 | Units string `json:"units,omitempty"` 22 | } 23 | type export struct { 24 | Description string 25 | Experiments map[string]map[string]row 26 | Setup map[string]row 27 | } 28 | 29 | s := export{ 30 | Experiments: map[string]map[string]row{}, 31 | Setup: map[string]row{}, 32 | Description: sample.Description, 33 | } 34 | for name, e := range sample.Experiments { 35 | if s.Experiments[name] == nil { 36 | s.Experiments[name] = map[string]row{} 37 | } 38 | for n, r := range e.Measurements { 39 | s.Experiments[name][n] = row{ 40 | Value: r.String(), 41 | Units: r.Units, 42 | } 43 | } 44 | } 45 | for name, r := range sample.Setup { 46 | s.Setup[name] = row{ 47 | Value: r.String(), 48 | Units: r.Units, 49 | } 50 | } 51 | 52 | fmt.Println(prettyPrint(s)) 53 | 54 | return nil 55 | }, 56 | } 57 | -------------------------------------------------------------------------------- /charts/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /charts/fleet-agent/.helmignore: -------------------------------------------------------------------------------- 1 | .helmignore 2 | ci/ 3 | -------------------------------------------------------------------------------- /charts/fleet-agent/Chart.yaml: -------------------------------------------------------------------------------- 1 | annotations: 2 | catalog.cattle.io/certified: rancher 3 | catalog.cattle.io/hidden: "true" 4 | catalog.cattle.io/kube-version: '>= 1.28.0-0 < 1.34.0-0' 5 | catalog.cattle.io/namespace: cattle-fleet-system 6 | catalog.cattle.io/os: linux 7 | catalog.cattle.io/permits-os: linux,windows 8 | catalog.cattle.io/rancher-version: '>= 2.12.0-0 < 2.13.0-0' 9 | catalog.cattle.io/release-name: fleet-agent 10 | apiVersion: v2 11 | appVersion: 0.0.0 12 | description: Fleet Agent - GitOps at Scale 13 | icon: https://charts.rancher.io/assets/logos/fleet.svg 14 | name: fleet-agent 15 | version: 0.0.0 16 | -------------------------------------------------------------------------------- /charts/fleet-agent/README.md: -------------------------------------------------------------------------------- 1 | ## Fleet Agent Helm Chart 2 | 3 | Every Fleet-managed downstream cluster will run an agent that communicates back to the Fleet controller. This agent is just another set of Kubernetes controllers running in the downstream cluster. 4 | 5 | Standalone Fleet users use this chart for agent-initiated registration. For more details see [agent-initiated registration](https://fleet.rancher.io/cluster-registration#agent-initiated). 6 | Fleet in Rancher does not use this chart, but creates the agent deployments programmatically. 7 | 8 | The Fleet documentation is centralized in the [doc website](https://fleet.rancher.io/). -------------------------------------------------------------------------------- /charts/fleet-agent/ci/default-values.yaml: -------------------------------------------------------------------------------- 1 | apiServerURL: "https://localhost" 2 | apiServerCA: "abc" 3 | -------------------------------------------------------------------------------- /charts/fleet-agent/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{- define "system_default_registry" -}} 2 | {{- if .Values.global.cattle.systemDefaultRegistry -}} 3 | {{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} 4 | {{- else -}} 5 | {{- "" -}} 6 | {{- end -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Windows cluster will add default taint for linux nodes, 11 | add below linux tolerations to workloads could be scheduled to those linux nodes 12 | */}} 13 | {{- define "linux-node-tolerations" -}} 14 | - key: "cattle.io/os" 15 | value: "linux" 16 | effect: "NoSchedule" 17 | operator: "Equal" 18 | {{- end -}} 19 | 20 | {{- define "linux-node-selector" -}} 21 | kubernetes.io/os: linux 22 | {{- end -}} -------------------------------------------------------------------------------- /charts/fleet-agent/templates/configmap.yaml: -------------------------------------------------------------------------------- 1 | kind: ConfigMap 2 | apiVersion: v1 3 | metadata: 4 | name: fleet-agent 5 | data: 6 | config: |- 7 | { 8 | {{ if .Values.labels }} 9 | "labels":{{toJson .Values.labels}}, 10 | {{ end }} 11 | "clientID":"{{.Values.clientID}}", 12 | {{ if .Values.garbageCollectionInterval }} 13 | "garbageCollectionInterval": "{{.Values.garbageCollectionInterval}}", 14 | {{ end }} 15 | "agentTLSMode": "{{.Values.agentTLSMode}}" 16 | } 17 | -------------------------------------------------------------------------------- /charts/fleet-agent/templates/network_policy_allow_all.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: networking.k8s.io/v1 3 | kind: NetworkPolicy 4 | metadata: 5 | name: default-allow-all 6 | namespace: {{ .Values.internal.systemNamespace }} 7 | spec: 8 | podSelector: {} 9 | ingress: 10 | - {} 11 | egress: 12 | - {} 13 | policyTypes: 14 | - Ingress 15 | - Egress 16 | -------------------------------------------------------------------------------- /charts/fleet-agent/templates/rbac.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: fleet-agent-system-fleet-agent-role 5 | rules: 6 | - apiGroups: 7 | - '*' 8 | resources: 9 | - '*' 10 | verbs: 11 | - '*' 12 | - nonResourceURLs: 13 | - "*" 14 | verbs: 15 | - "*" 16 | --- 17 | apiVersion: rbac.authorization.k8s.io/v1 18 | kind: ClusterRoleBinding 19 | metadata: 20 | name: fleet-agent-system-fleet-agent-role-binding 21 | roleRef: 22 | apiGroup: rbac.authorization.k8s.io 23 | kind: ClusterRole 24 | name: fleet-agent-system-fleet-agent-role 25 | subjects: 26 | - kind: ServiceAccount 27 | name: fleet-agent 28 | namespace: {{.Release.Namespace}} 29 | -------------------------------------------------------------------------------- /charts/fleet-agent/templates/secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | systemRegistrationNamespace: "{{b64enc .Values.systemRegistrationNamespace}}" 4 | clusterNamespace: "{{b64enc .Values.clusterNamespace}}" 5 | token: "{{b64enc .Values.token}}" 6 | apiServerURL: "{{b64enc .Values.apiServerURL}}" 7 | apiServerCA: "{{b64enc .Values.apiServerCA}}" 8 | kind: Secret 9 | metadata: 10 | name: fleet-agent-bootstrap 11 | -------------------------------------------------------------------------------- /charts/fleet-agent/templates/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: fleet-agent 5 | -------------------------------------------------------------------------------- /charts/fleet-agent/templates/validate.yaml: -------------------------------------------------------------------------------- 1 | {{if ne .Release.Namespace .Values.internal.systemNamespace }} 2 | {{ fail (printf "This chart must be installed in the %s namespace" .Values.internal.systemNamespace) }} 3 | {{end}} 4 | 5 | {{if ne .Release.Name .Values.internal.managedReleaseName }} 6 | {{ fail (printf "This chart must be installed with release name %s" .Values.internal.managedReleaseName) }} 7 | {{end}} 8 | 9 | {{if not .Values.apiServerURL }} 10 | {{ fail "apiServerURL is required to be set, and most likely also apiServerCA" }} 11 | {{end}} 12 | -------------------------------------------------------------------------------- /charts/fleet-crd/Chart.yaml: -------------------------------------------------------------------------------- 1 | annotations: 2 | catalog.cattle.io/certified: rancher 3 | catalog.cattle.io/hidden: "true" 4 | catalog.cattle.io/namespace: cattle-fleet-system 5 | catalog.cattle.io/os: linux 6 | catalog.cattle.io/permits-os: linux,windows 7 | catalog.cattle.io/release-name: fleet-crd 8 | apiVersion: v2 9 | appVersion: 0.0.0 10 | description: Fleet CustomResourceDefinitions 11 | icon: https://charts.rancher.io/assets/logos/fleet.svg 12 | name: fleet-crd 13 | version: 0.0.0 14 | -------------------------------------------------------------------------------- /charts/fleet-crd/README.md: -------------------------------------------------------------------------------- 1 | # Fleet CRD Helm Chart 2 | 3 | Fleet CustomResourceDefinitions Helm chart is a requirement for the Fleet Helm Chart. 4 | 5 | The Fleet documentation is centralized in the [doc website](https://fleet.rancher.io/). 6 | -------------------------------------------------------------------------------- /charts/fleet-crd/values.yaml: -------------------------------------------------------------------------------- 1 | # This file is intentionally empty 2 | -------------------------------------------------------------------------------- /charts/fleet/.helmignore: -------------------------------------------------------------------------------- 1 | .helmignore 2 | ci/ 3 | -------------------------------------------------------------------------------- /charts/fleet/Chart.yaml: -------------------------------------------------------------------------------- 1 | annotations: 2 | catalog.cattle.io/auto-install: fleet-crd=match 3 | catalog.cattle.io/certified: rancher 4 | catalog.cattle.io/experimental: "true" 5 | catalog.cattle.io/hidden: "true" 6 | catalog.cattle.io/kube-version: '>= 1.28.0-0 < 1.34.0-0' 7 | catalog.cattle.io/namespace: cattle-fleet-system 8 | catalog.cattle.io/os: linux 9 | catalog.cattle.io/permits-os: linux,windows 10 | catalog.cattle.io/provides-gvr: clusters.fleet.cattle.io/v1alpha1 11 | catalog.cattle.io/rancher-version: '>= 2.12.0-0 < 2.13.0-0' 12 | catalog.cattle.io/release-name: fleet 13 | apiVersion: v2 14 | appVersion: 0.0.0 15 | description: Fleet Controller - GitOps at Scale 16 | icon: https://charts.rancher.io/assets/logos/fleet.svg 17 | name: fleet 18 | version: 0.0.0 19 | -------------------------------------------------------------------------------- /charts/fleet/ci/default-values.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rancher/fleet/5ecc66a1e2cd75668656f6c3c651330a768c0134/charts/fleet/ci/default-values.yaml -------------------------------------------------------------------------------- /charts/fleet/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{- define "system_default_registry" -}} 2 | {{- if .Values.global.cattle.systemDefaultRegistry -}} 3 | {{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} 4 | {{- else -}} 5 | {{- "" -}} 6 | {{- end -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Windows cluster will add default taint for linux nodes, 11 | add below linux tolerations to workloads could be scheduled to those linux nodes 12 | */}} 13 | {{- define "linux-node-tolerations" -}} 14 | - key: "cattle.io/os" 15 | value: "linux" 16 | effect: "NoSchedule" 17 | operator: "Equal" 18 | {{- end -}} 19 | 20 | {{- define "linux-node-selector" -}} 21 | kubernetes.io/os: linux 22 | {{- end -}} -------------------------------------------------------------------------------- /charts/fleet/templates/service.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.metrics.enabled }} 2 | {{- $shards := list (dict "id" "" "nodeSelector" dict) -}} 3 | {{- $uniqueShards := list -}} 4 | {{- if .Values.shards -}} 5 | {{- range .Values.shards -}} 6 | {{- if not (has .id $uniqueShards) -}} 7 | {{- $shards = append $shards . -}} 8 | {{- $uniqueShards = append $uniqueShards .id -}} 9 | {{- end -}} 10 | {{- end -}} 11 | {{- end -}} 12 | 13 | {{ range $shard := $shards }} 14 | apiVersion: v1 15 | kind: Service 16 | metadata: 17 | name: "monitoring-fleet-controller{{if $shard.id }}-shard-{{ $shard.id }}{{end}}" 18 | labels: 19 | app: fleet-controller 20 | spec: 21 | type: ClusterIP 22 | ports: 23 | - port: 8080 24 | targetPort: 8080 25 | protocol: TCP 26 | name: metrics 27 | selector: 28 | app: fleet-controller 29 | {{- if empty $shard.id }} 30 | fleet.cattle.io/shard-default: "true" 31 | {{- else }} 32 | fleet.cattle.io/shard-id: "{{ $shard.id }}" 33 | {{- end }} 34 | --- 35 | {{- end }} 36 | {{- end }} 37 | -------------------------------------------------------------------------------- /charts/fleet/templates/service_gitjob.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.gitops.enabled }} 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: gitjob 6 | spec: 7 | ports: 8 | - name: http-80 9 | port: 80 10 | protocol: TCP 11 | targetPort: 8080 12 | selector: 13 | app: "gitjob" 14 | --- 15 | {{- if .Values.metrics.enabled }} 16 | {{- $shards := list (dict "id" "" "nodeSelector" dict) -}} 17 | {{- $uniqueShards := list -}} 18 | {{- if .Values.shards -}} 19 | {{- range .Values.shards -}} 20 | {{- if not (has .id $uniqueShards) -}} 21 | {{- $shards = append $shards . -}} 22 | {{- $uniqueShards = append $uniqueShards .id -}} 23 | {{- end -}} 24 | {{- end -}} 25 | {{- end -}} 26 | 27 | {{ range $shard := $shards }} 28 | apiVersion: v1 29 | kind: Service 30 | metadata: 31 | name: "monitoring-gitjob{{if $shard.id }}-shard-{{ $shard.id }}{{end}}" 32 | labels: 33 | app: gitjob 34 | spec: 35 | type: ClusterIP 36 | ports: 37 | - port: 8081 38 | targetPort: 8081 39 | protocol: TCP 40 | name: metrics 41 | selector: 42 | app: gitjob 43 | {{- if empty $shard.id }} 44 | fleet.cattle.io/shard-default: "true" 45 | {{- else }} 46 | fleet.cattle.io/shard-id: "{{ $shard.id }}" 47 | {{- end }} 48 | --- 49 | {{- end }} 50 | {{- end }} 51 | {{- end }} 52 | -------------------------------------------------------------------------------- /charts/fleet/templates/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: fleet-controller 5 | 6 | {{- if .Values.bootstrap.enabled }} 7 | --- 8 | apiVersion: v1 9 | kind: ServiceAccount 10 | metadata: 11 | name: fleet-controller-bootstrap 12 | {{- end }} 13 | -------------------------------------------------------------------------------- /charts/fleet/templates/serviceaccount_gitjob.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.gitops.enabled }} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: gitjob 6 | {{- end }} 7 | -------------------------------------------------------------------------------- /charts/fleet/templates/serviceaccount_helmops.yaml: -------------------------------------------------------------------------------- 1 | {{- if has (dict "name" "EXPERIMENTAL_HELM_OPS" "value" "true") .Values.extraEnv }} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: helmops 6 | {{- end }} 7 | -------------------------------------------------------------------------------- /charts/fleet/tests/agent-leader-election.yaml: -------------------------------------------------------------------------------- 1 | suite: leader election values test for fleet agent 2 | templates: 3 | - deployment.yaml 4 | tests: 5 | - it: should set the environment variable to the duration 6 | set: 7 | agent: 8 | leaderElection: 9 | leaseDuration: 60s 10 | retryPeriod: 5s 11 | renewDeadline: 10s 12 | asserts: 13 | - isKind: 14 | of: Deployment 15 | - equal: 16 | path: spec.template.spec.containers[?(@.name == 'fleet-agentmanagement')].env[?(@.name == 'FLEET_AGENT_ELECTION_LEASE_DURATION')].value 17 | value: "60s" 18 | - equal: 19 | path: spec.template.spec.containers[?(@.name == 'fleet-agentmanagement')].env[?(@.name == 'FLEET_AGENT_ELECTION_RETRY_PERIOD')].value 20 | value: "5s" 21 | - equal: 22 | path: spec.template.spec.containers[?(@.name == 'fleet-agentmanagement')].env[?(@.name == 'FLEET_AGENT_ELECTION_RENEW_DEADLINE')].value 23 | value: "10s" 24 | 25 | 26 | -------------------------------------------------------------------------------- /charts/fleet/tests/agent_replica_count_test.yaml: -------------------------------------------------------------------------------- 1 | suite: replica values test for fleet agent 2 | templates: 3 | - deployment.yaml 4 | tests: 5 | - it: should set the environment variable to the number of replicas 6 | set: 7 | agent.replicas: 3 8 | asserts: 9 | - isKind: 10 | of: Deployment 11 | - equal: 12 | path: spec.template.spec.containers[?(@.name == 'fleet-agentmanagement')].env[?(@.name == 'FLEET_AGENT_REPLICA_COUNT')].value 13 | value: "3" 14 | -------------------------------------------------------------------------------- /charts/fleet/tests/fleet_controller_replica_count_test.yaml: -------------------------------------------------------------------------------- 1 | suite: replica values test for fleet controller 2 | templates: 3 | - deployment.yaml 4 | tests: 5 | - it: should set spec.replicas to the number of replicas specified in the controller's replicas field 6 | set: 7 | controller.replicas: 3 8 | asserts: 9 | - isKind: 10 | of: Deployment 11 | - equal: 12 | path: spec.replicas 13 | value: 3 14 | -------------------------------------------------------------------------------- /charts/fleet/tests/gitjob_controller_replica_count_test.yaml: -------------------------------------------------------------------------------- 1 | suite: replica values test for fleet controller 2 | templates: 3 | - deployment_gitjob.yaml 4 | tests: 5 | - it: should set spec.replicas to the number of replicas specified in the controller's replicas field 6 | set: 7 | gitjob.replicas: 3 8 | asserts: 9 | - isKind: 10 | of: Deployment 11 | - equal: 12 | path: spec.replicas 13 | value: 3 14 | -------------------------------------------------------------------------------- /charts/fleet/tests/helmops_controller_replica_count_test.yaml: -------------------------------------------------------------------------------- 1 | suite: replica values test for fleet controller 2 | templates: 3 | - deployment_helmops.yaml 4 | tests: 5 | - it: should set spec.replicas to the number of replicas specified in the controller's replicas field 6 | set: 7 | helmops.replicas: 3 8 | extraEnv: 9 | - name: 'EXPERIMENTAL_HELM_OPS' 10 | value: 'true' 11 | asserts: 12 | - isKind: 13 | of: Deployment 14 | - equal: 15 | path: spec.replicas 16 | value: 3 17 | 18 | - it: should not render a document at all when 'EXPERIMENTAL_HELM_OPS' is false 19 | set: 20 | helmops.replicas: 3 21 | extraEnv: 22 | - name: 'EXPERIMENTAL_HELM_OPS' 23 | value: 'false' 24 | asserts: 25 | - hasDocuments: 26 | count: 0 27 | -------------------------------------------------------------------------------- /cmd/codegen/boilerplate.go.txt: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2020 - YEAR SUSE LLC 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | -------------------------------------------------------------------------------- /cmd/codegen/cleanup/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "os" 5 | 6 | "github.com/sirupsen/logrus" 7 | 8 | "github.com/rancher/wrangler/v3/pkg/cleanup" 9 | ) 10 | 11 | func main() { 12 | if err := cleanup.Cleanup("./pkg/apis"); err != nil { 13 | logrus.Fatal(err) 14 | } 15 | if err := os.RemoveAll("./pkg/generated"); err != nil { 16 | logrus.Fatal(err) 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /cmd/codegen/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "os" 5 | 6 | controllergen "github.com/rancher/wrangler/v3/pkg/controller-gen" 7 | "github.com/rancher/wrangler/v3/pkg/controller-gen/args" 8 | 9 | // Ensure gvk gets loaded in wrangler/pkg/gvk cache 10 | _ "github.com/rancher/wrangler/v3/pkg/generated/controllers/apiextensions.k8s.io/v1" 11 | 12 | // To keep the dependency in go.mod 13 | _ "sigs.k8s.io/controller-tools/pkg/crd" 14 | ) 15 | 16 | func main() { 17 | os.Unsetenv("GOPATH") 18 | controllergen.Run(args.Options{ 19 | OutputPackage: "github.com/rancher/fleet/pkg/generated", 20 | Boilerplate: "cmd/codegen/boilerplate.go.txt", 21 | Groups: map[string]args.Group{ 22 | "fleet.cattle.io": { 23 | Types: []interface{}{ 24 | "./pkg/apis/fleet.cattle.io/v1alpha1", 25 | }, 26 | }, 27 | }, 28 | }) 29 | } 30 | -------------------------------------------------------------------------------- /cmd/fleetagent/main.go: -------------------------------------------------------------------------------- 1 | // Package main is the entrypoint for the fleet-agent binary. 2 | package main 3 | 4 | import ( 5 | _ "net/http/pprof" 6 | 7 | "github.com/rancher/fleet/internal/cmd/agent" 8 | 9 | "github.com/rancher/wrangler/v3/pkg/signals" 10 | "github.com/sirupsen/logrus" 11 | ) 12 | 13 | func main() { 14 | ctx := signals.SetupSignalContext() 15 | cmd := agent.App() 16 | if err := cmd.ExecuteContext(ctx); err != nil { 17 | logrus.Fatal(err) 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /cmd/fleetcli/main.go: -------------------------------------------------------------------------------- 1 | // Package main is the entry point for the fleet apply binary. 2 | package main 3 | 4 | import ( 5 | "os" 6 | "strings" 7 | 8 | // Ensure GVKs are registered 9 | _ "github.com/rancher/fleet/pkg/generated/controllers/fleet.cattle.io" 10 | _ "github.com/rancher/wrangler/v3/pkg/generated/controllers/apiextensions.k8s.io" 11 | _ "github.com/rancher/wrangler/v3/pkg/generated/controllers/apps" 12 | _ "github.com/rancher/wrangler/v3/pkg/generated/controllers/core" 13 | _ "github.com/rancher/wrangler/v3/pkg/generated/controllers/rbac" 14 | 15 | // Add non-default auth providers 16 | _ "k8s.io/client-go/plugin/pkg/client/auth" 17 | 18 | "github.com/rancher/wrangler/v3/pkg/signals" 19 | "github.com/sirupsen/logrus" 20 | 21 | cmds "github.com/rancher/fleet/internal/cmd/cli" 22 | ) 23 | 24 | func main() { 25 | ctx := signals.SetupSignalContext() 26 | cmd := cmds.App() 27 | if err := cmd.ExecuteContext(ctx); err != nil { 28 | if strings.ToLower(os.Getenv(cmds.JSONOutputEnvVar)) == "true" { 29 | log := logrus.New() 30 | log.SetFormatter(&logrus.JSONFormatter{}) 31 | // use a fleet specific field name so we are sure logs from other libraries 32 | // are not considered. 33 | log.WithFields(logrus.Fields{ 34 | "fleetErrorMessage": err.Error(), 35 | }).Fatal("Fleet cli failed") 36 | } else { 37 | logrus.Fatal(err) 38 | } 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /cmd/fleetcontroller/main.go: -------------------------------------------------------------------------------- 1 | // Package main provides the entrypoint for the fleet-controller binary. 2 | package main 3 | 4 | import ( 5 | _ "net/http/pprof" 6 | 7 | _ "github.com/rancher/wrangler/v3/pkg/generated/controllers/apiextensions.k8s.io" 8 | _ "github.com/rancher/wrangler/v3/pkg/generated/controllers/networking.k8s.io" 9 | "github.com/rancher/wrangler/v3/pkg/signals" 10 | "github.com/sirupsen/logrus" 11 | 12 | "github.com/rancher/fleet/internal/cmd/controller" 13 | ) 14 | 15 | func main() { 16 | ctx := signals.SetupSignalContext() 17 | cmd := controller.App() 18 | if err := cmd.ExecuteContext(ctx); err != nil { 19 | logrus.Fatal(err) 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /dev/LOGGING.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rancher/fleet/5ecc66a1e2cd75668656f6c3c651330a768c0134/dev/LOGGING.png -------------------------------------------------------------------------------- /dev/benchmarks.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | date=$(date +"%F_%T") 5 | out="b-$date.json" 6 | FLEET_BENCH_TIMEOUT=${FLEET_BENCH_TIMEOUT-"5m"} 7 | FLEET_BENCH_NAMESPACE=${FLEET_BENCH_NAMESPACE-"fleet-local"} 8 | 9 | go run ./benchmarks/cmd run -d benchmarks/db -t "$FLEET_BENCH_TIMEOUT" -n "$FLEET_BENCH_NAMESPACE" 10 | -------------------------------------------------------------------------------- /dev/create-secrets: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ./.github/scripts/create-secrets.sh "$@" 4 | -------------------------------------------------------------------------------- /dev/create-zot-certs: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ./.github/scripts/create-zot-certs.sh "$@" 4 | -------------------------------------------------------------------------------- /dev/env.multi-cluster-defaults: -------------------------------------------------------------------------------- 1 | export FLEET_E2E_NS=fleet-local 2 | export FLEET_E2E_NS_DOWNSTREAM=fleet-default 3 | 4 | export FLEET_E2E_CLUSTER=k3d-upstream 5 | export FLEET_E2E_CLUSTER_DOWNSTREAM=k3d-downstream1 6 | 7 | export GIT_HTTP_USER=fleet-ci 8 | export GIT_HTTP_PASSWORD=foo 9 | 10 | export CI_OCI_USERNAME=fleet-ci 11 | export CI_OCI_PASSWORD=foo 12 | export CI_OCI_CERTS_DIR=FleetCI-RootCA/ 13 | -------------------------------------------------------------------------------- /dev/env.single-cluster-defaults: -------------------------------------------------------------------------------- 1 | export FLEET_E2E_NS=fleet-local 2 | 3 | export FLEET_E2E_CLUSTER=k3d-upstream 4 | export FLEET_E2E_CLUSTER_DOWNSTREAM=k3d-upstream 5 | 6 | export GIT_HTTP_USER=fleet-ci 7 | export GIT_HTTP_PASSWORD=foo 8 | 9 | export CI_OCI_USERNAME=fleet-ci 10 | export CI_OCI_PASSWORD=foo 11 | export CI_OCI_CERTS_DIR=FleetCI-RootCA/ 12 | -------------------------------------------------------------------------------- /dev/import-images-k3d: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euxo pipefail 4 | 5 | # The upstream cluster to import all the images to. 6 | upstream_ctx="${FLEET_E2E_CLUSTER-k3d-upstream}" 7 | 8 | # The single downstream cluster to import the agent image to. 9 | downstream_ctx="${FLEET_E2E_CLUSTER_DOWNSTREAM-k3d-downstream1}" 10 | 11 | k3d image import rancher/fleet:dev rancher/fleet-agent:dev -m direct -c "${upstream_ctx#k3d-}" 12 | 13 | downstream_keyword="${downstream_ctx#k3d-}" 14 | downstream_keyword="${downstream_keyword%[0-9]*}" 15 | if [ "$upstream_ctx" != "$downstream_ctx" ]; then 16 | for cluster in $(k3d cluster list -o json | \ 17 | jq -r ".[].name | select(. | contains(\"${downstream_keyword}\"))"); do 18 | k3d image import rancher/fleet-agent:dev -m direct -c "${cluster}" 19 | done 20 | else 21 | echo "not importing agent to any downstream clusters. Set FLEET_E2E_CLUSTER_DOWNSTREAM" 22 | fi 23 | -------------------------------------------------------------------------------- /dev/import-images-tests-k3d: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Build and import git server image 3 | 4 | set -euxo pipefail 5 | 6 | upstream_ctx="${FLEET_E2E_CLUSTER-k3d-upstream}" 7 | 8 | gitSrvImage=$( docker image ls -q nginx-git:test ) 9 | 10 | GIT_HTTP_PASSWORD=${GIT_HTTP_PASSWORD-foo} 11 | if [ -n "${FORCE_GIT_SERVER_BUILD-}" -o -z "$gitSrvImage" ]; then 12 | cd e2e/assets/gitrepo 13 | docker build . -f Dockerfile.gitserver --build-arg passwd=$(openssl passwd $GIT_HTTP_PASSWORD) -t nginx-git:test 14 | else 15 | echo "Git test server image already present. Skipping build." 16 | fi 17 | 18 | k3d image import nginx-git:test -m direct -c "${upstream_ctx#k3d-}" 19 | -------------------------------------------------------------------------------- /dev/logs: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | app=${1-fleet-controller} 4 | 5 | kubectl logs -n cattle-fleet-system -l "app=$app" -f 6 | -------------------------------------------------------------------------------- /dev/setup-cluster-config: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -ex 4 | 5 | if [ "$1" = "teardown" ]; then 6 | go run ./e2e/testenv/infra/main.go teardown 7 | exit 0 8 | fi 9 | 10 | if [ ! -f "$DEFAULT_CONFIG" ]; then 11 | echo >&2 "Run this from the root of the repo" 12 | exit 1 13 | fi 14 | 15 | if [ -n "$FLEET_TEST_CONFIG" ]; then 16 | if [ ! -f "$FLEET_TEST_CONFIG" ]; then 17 | echo >&2 "File not found: \$FLEET_TEST_CONFIG: $FLEET_TEST_CONFIG" 18 | exit 1 19 | fi 20 | echo "Using custom config file: $FLEET_TEST_CONFIG" 21 | # shellcheck source=/dev/null 22 | source "$FLEET_TEST_CONFIG" 23 | elif [ -f "$CUSTOM_CONFIG_FILE" ]; then 24 | echo "Using custom config file: $CUSTOM_CONFIG_FILE" 25 | # shellcheck source=/dev/null 26 | source "$CUSTOM_CONFIG_FILE" 27 | else 28 | echo "Using default config file: $DEFAULT_CONFIG" 29 | # shellcheck source=/dev/null 30 | source "$DEFAULT_CONFIG" 31 | fi 32 | -------------------------------------------------------------------------------- /dev/setup-fleet-multi-cluster: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Description: install fleet into upstream and downstream cluster 3 | 4 | set -euxo pipefail 5 | 6 | if [ ! -d ./charts/fleet ]; then 7 | echo "please change the current directory to the fleet repo checkout" 8 | exit 1 9 | fi 10 | 11 | upstream_ctx="${FLEET_E2E_CLUSTER-k3d-upstream}" 12 | 13 | kubectl config use-context "$upstream_ctx" 14 | 15 | dev/setup-fleet 16 | dev/setup-fleet-managed-downstream 17 | 18 | kubectl config use-context "$upstream_ctx" 19 | 20 | ns=${FLEET_E2E_NS_DOWNSTREAM-fleet-default} 21 | 22 | # Wait for clusters to become "ready" by waiting for bundles to become ready. 23 | num_clusters=$(k3d cluster list -o json | jq -r '.[].name | select( . | contains("downstream") )' | wc -l) 24 | while [[ $(kubectl get clusters.fleet.cattle.io -n "$ns" | grep '1/1' -c) -ne $num_clusters ]]; do 25 | sleep 1 26 | done 27 | 28 | kubectl patch clusters.fleet.cattle.io -n "$ns" --all --type=json -p '[{"op": "add", "path": "/metadata/labels/env", "value": "test" }]' 29 | -------------------------------------------------------------------------------- /dev/setup-k3ds-downstream: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Description: Create n downstream clusters 3 | 4 | set -euxo pipefail 5 | 6 | args=${k3d_args---network fleet} 7 | docker_mirror=${docker_mirror-} 8 | name="downstream" 9 | FLEET_E2E_DS_CLUSTER_COUNT=${FLEET_E2E_DS_CLUSTER_COUNT-1} 10 | 11 | if [ -n "$docker_mirror" ]; then 12 | TMP_CONFIG="$(mktemp)" 13 | trap "rm -f $TMP_CONFIG" EXIT 14 | 15 | cat <"$TMP_CONFIG" 16 | mirrors: 17 | "docker.io": 18 | endpoint: 19 | - $docker_mirror 20 | EOF 21 | args="$args --registry-config $TMP_CONFIG" 22 | fi 23 | 24 | for i in $(seq 1 "$FLEET_E2E_DS_CLUSTER_COUNT"); do 25 | k3d cluster create "$name$i" \ 26 | --servers 1 \ 27 | --api-port $((36443 + i)) \ 28 | -p "$((4080 + (1000 * i))):80@server:0" \ 29 | -p "$((3443 + i)):443@server:0" \ 30 | --k3s-arg "--tls-san=k3d-$name$i-server-0@server:0" \ 31 | $args 32 | done 33 | 34 | kubectl config use-context k3d-upstream 35 | -------------------------------------------------------------------------------- /dev/setup-multi-cluster: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | export DEFAULT_CONFIG="dev/env.multi-cluster-defaults" 4 | export CUSTOM_CONFIG_FILE="env.multi-cluster" 5 | 6 | # shellcheck source=dev/setup-cluster-config 7 | source dev/setup-cluster-config 8 | 9 | FLEET_E2E_DS_CLUSTER_COUNT=${FLEET_E2E_DS_CLUSTER_COUNT:-1} 10 | 11 | # Cleans with settings sourced, so it should be rather selective. 12 | ./dev/k3d-clean 13 | 14 | PORT_OFFSET=0 15 | if [ -z "$external_ip" ]; 16 | then 17 | PORT_OFFSET=$(( RANDOM % 10001 )) 18 | fi 19 | 20 | ./dev/setup-k3d "${FLEET_E2E_CLUSTER#k3d-}" "$PORT_OFFSET" 21 | ./dev/setup-k3ds-downstream 22 | ./dev/build-fleet 23 | ./dev/import-images-k3d 24 | ./dev/setup-fleet-multi-cluster 25 | 26 | # needed for gitrepo tests 27 | ./dev/import-images-tests-k3d 28 | ./dev/create-zot-certs 'FleetCI-RootCA' # for OCI tests 29 | ./dev/create-secrets 'FleetCI-RootCA' 30 | go run ./e2e/testenv/infra/main.go setup 31 | -------------------------------------------------------------------------------- /dev/setup-single-cluster: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | export DEFAULT_CONFIG="dev/env.single-cluster-defaults" 4 | export CUSTOM_CONFIG_FILE="env.single-cluster" 5 | 6 | # shellcheck source=dev/setup-cluster-config 7 | source ./dev/setup-cluster-config 8 | 9 | if [ $1 = "--reuse" ]; then 10 | ./dev/remove-fleet 11 | else 12 | # Cleans with settings sourced, so it should be rather selective. 13 | ./dev/k3d-clean 14 | 15 | PORT_OFFSET=0 16 | if [ -z "$external_ip" ]; 17 | then 18 | PORT_OFFSET=$(( RANDOM % 10001 )) 19 | fi 20 | 21 | ./dev/setup-k3d "${FLEET_E2E_CLUSTER#k3d-}" "$PORT_OFFSET" 22 | fi 23 | 24 | ./dev/build-fleet 25 | ./dev/import-images-k3d 26 | ./dev/setup-fleet "${FLEET_E2E_CLUSTER#k3d-}" '[ 27 | { 28 | "id": "shard0", 29 | "nodeSelector": { 30 | "kubernetes.io/hostname": "k3d-upstream-server-0" 31 | } 32 | }, 33 | { 34 | "id": "shard1" 35 | }, 36 | { 37 | "id": "shard2", 38 | "nodeSelector": { 39 | "kubernetes.io/hostname": "k3d-upstream-server-2" 40 | } 41 | } 42 | ]' 43 | 44 | # needed for gitrepo tests 45 | ./dev/import-images-tests-k3d 46 | ./dev/create-zot-certs 'FleetCI-RootCA' # for OCI tests 47 | set +e # keep going if secrets already exist 48 | ./dev/create-secrets 'FleetCI-RootCA' 49 | go run ./e2e/testenv/infra/main.go setup 50 | -------------------------------------------------------------------------------- /dev/update-agent-k3d: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euxo pipefail 4 | 5 | if [ ! -d ./cmd/fleetagent ]; then 6 | echo "please change the current directory to the fleet repo checkout" 7 | exit 1 8 | fi 9 | 10 | export GOOS=linux 11 | export GOARCH="${GOARCH:-amd64}" 12 | export CGO_ENABLED=0 13 | 14 | # fleet agent 15 | go build -gcflags='all=-N -l' -o "bin/fleetagent-linux-$GOARCH" ./cmd/fleetagent 16 | docker build -f package/Dockerfile.agent -t rancher/fleet-agent:dev --build-arg="ARCH=$GOARCH" . 17 | 18 | fleet_ctx=$(kubectl config current-context) 19 | k3d image import rancher/fleet-agent:dev -m direct -c "${fleet_ctx#k3d-}" 20 | kubectl delete pod -l app=fleet-agent -n cattle-fleet-local-system 21 | 22 | upstream_ctx="${FLEET_E2E_CLUSTER-k3d-upstream}" 23 | downstream_ctx="${FLEET_E2E_CLUSTER_DOWNSTREAM-k3d-downstream1}" 24 | downstream_keyword="${downstream_ctx#k3d-}" 25 | downstream_keyword="${downstream_keyword%[0-9]*}" 26 | if [ "$upstream_ctx" != "$downstream_ctx" ]; then 27 | for cluster in $(k3d cluster list -o json | \ 28 | jq -r ".[].name | select(. | contains(\"${downstream_keyword}\"))"); do 29 | k3d image import rancher/fleet-agent:dev -m direct -c "${cluster}" 30 | kubectl --context "k3d-$cluster" delete pod -l app=fleet-agent -n cattle-fleet-system 31 | done 32 | fi 33 | -------------------------------------------------------------------------------- /dev/update-controller-k3d: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euxo pipefail 4 | 5 | if [ ! -d ./cmd/fleetcontroller ]; then 6 | echo "please change the current directory to the fleet repo checkout" 7 | exit 1 8 | fi 9 | 10 | export GOOS=linux 11 | export GOARCH="${GOARCH:-amd64}" 12 | export CGO_ENABLED=0 13 | 14 | # fleetcontroller 15 | go build -gcflags='all=-N -l' -o bin/fleetcontroller-linux-"$GOARCH" ./cmd/fleetcontroller 16 | go build -gcflags='all=-N -l' -o "bin/fleet-linux-$GOARCH" ./cmd/fleetcli 17 | docker build -f package/Dockerfile -t rancher/fleet:dev --build-arg="ARCH=$GOARCH" . 18 | 19 | fleet_ctx=$(kubectl config current-context) 20 | k3d image import rancher/fleet:dev -m direct -c "${fleet_ctx#k3d-}" 21 | kubectl delete pod -l app=fleet-controller -n cattle-fleet-system 22 | kubectl delete pod -l app=gitjob -n cattle-fleet-system 23 | -------------------------------------------------------------------------------- /dev/update-fleet-in-rancher-k3d: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euxo pipefail 4 | 5 | if [ ! -d ./.github/scripts ]; then 6 | echo "please change the current directory to the fleet repo checkout" 7 | exit 1 8 | fi 9 | 10 | fleet_version="${1-0.7.0-rc.2}" 11 | if [ "$fleet_version" == "dev" ]; then 12 | echo "don't forget to run dev/build-fleet before running this script" 13 | dev/import-images-k3d 14 | ./.github/scripts/upgrade-rancher-fleet-to-dev-fleet.sh 15 | exit 0 16 | fi 17 | 18 | # install released fleet from url 19 | url_crd="https://github.com/rancher/fleet/releases/download/v${fleet_version}/fleet-crd-${fleet_version}.tgz" 20 | url="https://github.com/rancher/fleet/releases/download/v${fleet_version}/fleet-${fleet_version}.tgz" 21 | version="v${fleet_version}" 22 | fleetns="cattle-fleet-system" 23 | 24 | helm upgrade fleet-crd "$url_crd" --wait -n "$fleetns" 25 | until helm -n "$fleetns" status fleet-crd | grep -q "STATUS: deployed"; do echo waiting for original fleet-crd chart to be deployed; sleep 1; done 26 | 27 | helm upgrade fleet "$url" \ 28 | --wait -n "$fleetns" \ 29 | --set image.tag="$version" \ 30 | --set agentImage.tag="$version" \ 31 | --set debug=true --set debugLevel=1 32 | -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | This directory is used for maintainer and developer documentation. The [Fleet docs site](https://fleet.rancher.io/) provides the latest end-user documentation. 2 | -------------------------------------------------------------------------------- /docs/arch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rancher/fleet/5ecc66a1e2cd75668656f6c3c651330a768c0134/docs/arch.png -------------------------------------------------------------------------------- /docs/performance.md: -------------------------------------------------------------------------------- 1 | # Examining Performance Issues 2 | 3 | Fleet differs from Rancher in one major design philosophy: nearly all "business logic" happens in the local cluster rather than in downstream clusters via agents. 4 | The good news here is that the `fleet-controller` will tell us nearly all that we need to know via pod logs, network traffic and resource usage. 5 | That being said, downstream `fleet-agent` deployments can perform Kubernetes API requests _back_ to the local cluster, which means that we have to monitor traffic inbound to the local cluster from our agents _as well as_ the outbound traffic we'd come to expect from the local `fleet-controller`. 6 | 7 | While network traffic is major point of consideration, we also have to consider whether our performance issues are **compute-based**, **memory-based**, or **network-based**. 8 | For example: you may encounter a pod with high compute usage, but that could be caused by heightened network traffic received from the _truly_ malfunctioning pod. 9 | 10 | ## Using pprof 11 | 12 | [http pprof](https://pkg.go.dev/net/http/pprof) handlers are enabled by default with all [default profiles](https://pkg.go.dev/runtime/pprof#Profile) under the `/debug/pprof` prefix. 13 | 14 | To collect profiling information continuously one can use https://github.com/rancherlabs/support-tools/tree/master/collection/rancher/v2.x/profile-collector 15 | -------------------------------------------------------------------------------- /e2e/acceptance/multi-cluster-examples/suite_test.go: -------------------------------------------------------------------------------- 1 | package mc_examples_test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/rancher/fleet/e2e/testenv" 7 | 8 | . "github.com/onsi/ginkgo/v2" 9 | . "github.com/onsi/gomega" 10 | ) 11 | 12 | func TestE2E(t *testing.T) { 13 | RegisterFailHandler(testenv.FailAndGather) 14 | RunSpecs(t, "E2E Suite for Multi-Cluster Examples") 15 | } 16 | 17 | var ( 18 | env *testenv.Env 19 | ) 20 | 21 | var _ = BeforeSuite(func() { 22 | SetDefaultEventuallyTimeout(testenv.Timeout) 23 | testenv.SetRoot("../../..") 24 | 25 | env = testenv.New() 26 | }) 27 | -------------------------------------------------------------------------------- /e2e/acceptance/single-cluster-examples/suite_test.go: -------------------------------------------------------------------------------- 1 | package examples_test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/rancher/fleet/e2e/testenv" 7 | 8 | . "github.com/onsi/ginkgo/v2" 9 | . "github.com/onsi/gomega" 10 | ) 11 | 12 | func TestE2E(t *testing.T) { 13 | RegisterFailHandler(testenv.FailAndGather) 14 | RunSpecs(t, "E2E Suite for Single-Cluster Examples") 15 | } 16 | 17 | var ( 18 | env *testenv.Env 19 | ) 20 | 21 | var _ = BeforeSuite(func() { 22 | SetDefaultEventuallyTimeout(testenv.Timeout) 23 | testenv.SetRoot("../../..") 24 | 25 | env = testenv.New() 26 | }) 27 | -------------------------------------------------------------------------------- /e2e/assets/cluster-template.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: fleet.cattle.io/v1alpha1 2 | kind: Cluster 3 | metadata: 4 | name: {{ .Name }} 5 | namespace: {{ .Namespace }} 6 | {{- if .Labels}} 7 | labels: 8 | {{- range $key, $value := .Labels}} 9 | {{$key}}: {{$value}} 10 | {{- end}} 11 | {{- end}} 12 | {{- if .Spec }} 13 | spec: 14 | {{- range $key, $value := .Spec}} 15 | {{$key}}: {{$value}} 16 | {{- end}} 17 | {{- end}} 18 | 19 | 20 | -------------------------------------------------------------------------------- /e2e/assets/clustergroup-template.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: fleet.cattle.io/v1alpha1 2 | kind: ClusterGroup 3 | metadata: 4 | name: {{ .Name }} 5 | namespace: {{ .Namespace }} 6 | {{- if .Labels }} 7 | labels: 8 | {{- range $key, $value := .Labels }} 9 | {{$key}}: {{$value}} 10 | {{- end }} 11 | {{- end }} 12 | spec: 13 | selector: 14 | matchLabels: 15 | {{- range $key, $value := .MatchLabels}} 16 | {{$key}}: {{$value}} 17 | {{- end}} 18 | -------------------------------------------------------------------------------- /e2e/assets/deps-charts/gitrepo.yaml: -------------------------------------------------------------------------------- 1 | kind: GitRepo 2 | apiVersion: fleet.cattle.io/v1alpha1 3 | metadata: 4 | name: {{.Name}} 5 | spec: 6 | repo: {{.Repo}} 7 | branch: {{.Branch}} 8 | helmSecretName: "helm-secret" 9 | targetNamespace: {{.TargetNamespace}} 10 | paths: 11 | - examples 12 | -------------------------------------------------------------------------------- /e2e/assets/deps-charts/no-fleet-yaml/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | appVersion: 1.16.0 3 | description: A chart for testing dependencies 4 | name: deps-chart 5 | type: application 6 | version: 1.0.0 7 | dependencies: 8 | - name: sleeper-chart 9 | version: "0.1.0" 10 | repository: {{.HelmRepoUrl}} 11 | -------------------------------------------------------------------------------- /e2e/assets/deps-charts/no-fleet-yaml/templates/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: test-simple-deps-chart 5 | data: 6 | test: "valuedeps" 7 | name: {{ .Values.name }} 8 | -------------------------------------------------------------------------------- /e2e/assets/deps-charts/no-fleet-yaml/values.yaml: -------------------------------------------------------------------------------- 1 | name: deps-default-name 2 | -------------------------------------------------------------------------------- /e2e/assets/deps-charts/with-fleet-yaml/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | appVersion: 1.16.0 3 | description: A chart for testing dependencies 4 | name: deps-chart 5 | type: application 6 | version: 1.0.0 7 | dependencies: 8 | - name: sleeper-chart 9 | version: "0.1.0" 10 | repository: {{.HelmRepoUrl}} 11 | -------------------------------------------------------------------------------- /e2e/assets/deps-charts/with-fleet-yaml/fleet.yaml: -------------------------------------------------------------------------------- 1 | namespace: {{.TestNamespace}} 2 | helm: 3 | releaseName: simple-with-fleet-yaml 4 | chart: "" 5 | repo: "" 6 | version: "" 7 | disableDependencyUpdate: {{.DisableDependencyUpdate}} 8 | 9 | -------------------------------------------------------------------------------- /e2e/assets/deps-charts/with-fleet-yaml/templates/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: test-simple-deps-chart 5 | data: 6 | test: "valuedeps" 7 | name: {{ .Values.name }} 8 | -------------------------------------------------------------------------------- /e2e/assets/deps-charts/with-fleet-yaml/values.yaml: -------------------------------------------------------------------------------- 1 | name: deps-default-name 2 | -------------------------------------------------------------------------------- /e2e/assets/fleet-upgrade/gitrepo-simple.yaml: -------------------------------------------------------------------------------- 1 | kind: GitRepo 2 | apiVersion: fleet.cattle.io/v1alpha1 3 | metadata: 4 | name: test-simple 5 | spec: 6 | repo: https://github.com/rancher/fleet-test-data 7 | branch: master 8 | paths: 9 | - simple-chart 10 | - simple-manifest 11 | targets: 12 | - clusterSelector: 13 | matchExpressions: 14 | - key: provider.cattle.io 15 | operator: NotIn 16 | values: 17 | - harvester 18 | -------------------------------------------------------------------------------- /e2e/assets/gitrepo-template.yaml: -------------------------------------------------------------------------------- 1 | kind: GitRepo 2 | apiVersion: fleet.cattle.io/v1alpha1 3 | metadata: 4 | name: {{ .Name }} 5 | {{- if ne .Shard "" }} 6 | labels: 7 | fleet.cattle.io/shard-ref: {{ .Shard }} 8 | {{- end }} 9 | 10 | spec: 11 | repo: https://github.com/rancher/fleet-test-data 12 | branch: {{ .Branch }} 13 | paths: 14 | {{- range .Paths}} 15 | - {{.}} 16 | {{- end}} 17 | targetNamespace: {{ .TargetNamespace }} 18 | -------------------------------------------------------------------------------- /e2e/assets/gitrepo/Dockerfile.gitserver: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1 2 | FROM nginx:1.25.4-alpine 3 | 4 | ARG user=fleet-ci 5 | ARG passwd 6 | 7 | RUN apk add git 8 | RUN apk add git-daemon 9 | RUN apk add fcgiwrap 10 | RUN apk add spawn-fcgi 11 | 12 | # Set user info and enable force-push 13 | COPY <<-"EOT" /root/.gitconfig 14 | [user] 15 | name = Fleet CI 16 | email = fleet.ci@test.com 17 | [http] 18 | receivepack = true 19 | [receive] 20 | denyNonFastforwards = false 21 | EOT 22 | 23 | # Configure git remote 24 | RUN mkdir -p /srv/git/repo 25 | WORKDIR /srv/git/repo 26 | RUN git init . --bare 27 | RUN git update-server-info 28 | 29 | # Configure nginx 30 | COPY nginx_git.conf /etc/nginx/nginx.conf 31 | RUN echo "$user:$passwd" > /srv/.htpasswd 32 | 33 | CMD spawn-fcgi -s /var/run/fcgiwrap.socket -M 777 /usr/bin/fcgiwrap && nginx-debug -g 'daemon off;' 34 | -------------------------------------------------------------------------------- /e2e/assets/gitrepo/gitrepo-polling-disabled.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: fleet.cattle.io/v1alpha1 2 | kind: GitRepo 3 | metadata: 4 | name: {{ .Name }} 5 | spec: 6 | repo: {{ .Repo }} 7 | branch: {{ .Branch }} 8 | paths: 9 | - disable_polling 10 | targetNamespace: {{ .TargetNamespace }} 11 | disablePolling: true 12 | 13 | 14 | -------------------------------------------------------------------------------- /e2e/assets/gitrepo/gitrepo.yaml: -------------------------------------------------------------------------------- 1 | kind: GitRepo 2 | apiVersion: fleet.cattle.io/v1alpha1 3 | metadata: 4 | name: {{.Name}} 5 | spec: 6 | repo: {{.Repo}} 7 | branch: {{.Branch}} 8 | pollingInterval: {{.PollingInterval}} 9 | {{- if .TargetNamespace }} 10 | targetNamespace: {{.TargetNamespace}} 11 | {{- end }} 12 | paths: 13 | - examples 14 | -------------------------------------------------------------------------------- /e2e/assets/gitrepo/gitrepo_sharded.yaml: -------------------------------------------------------------------------------- 1 | kind: GitRepo 2 | apiVersion: fleet.cattle.io/v1alpha1 3 | metadata: 4 | name: {{.Name}} 5 | labels: 6 | fleet.cattle.io/shard-ref: {{.ShardID}} 7 | spec: 8 | repo: {{.Repo}} 9 | branch: {{.Branch}} 10 | pollingInterval: {{.PollingInterval}} 11 | targetNamespace: {{.TargetNamespace}} 12 | paths: 13 | - simple-chart 14 | -------------------------------------------------------------------------------- /e2e/assets/gitrepo/gitrepo_with_auth.yaml: -------------------------------------------------------------------------------- 1 | kind: GitRepo 2 | apiVersion: fleet.cattle.io/v1alpha1 3 | metadata: 4 | name: gitrepo-test 5 | spec: 6 | repo: {{.Repo}} 7 | clientSecretName: git-auth 8 | branch: {{.Branch}} 9 | pollingInterval: {{.PollingInterval}} 10 | paths: 11 | - examples 12 | 13 | -------------------------------------------------------------------------------- /e2e/assets/gitrepo/nginx_deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: git-server 5 | labels: 6 | fleet: testing 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: git-server 12 | template: 13 | metadata: 14 | labels: 15 | app: git-server 16 | spec: 17 | volumes: 18 | - name: git-certs 19 | secret: 20 | secretName: git-server-certs 21 | items: 22 | - key: helm.crt 23 | path: helm.crt 24 | - key: helm.key 25 | path: helm.key 26 | containers: 27 | - name: git-server 28 | image: nginx-git:test 29 | imagePullPolicy: IfNotPresent 30 | ports: 31 | - containerPort: 4343 32 | - containerPort: 8080 33 | volumeMounts: 34 | - name: git-certs 35 | mountPath: "/etc/ssl/certs" 36 | readOnly: true 37 | -------------------------------------------------------------------------------- /e2e/assets/gitrepo/nginx_git.conf: -------------------------------------------------------------------------------- 1 | events {} 2 | http { 3 | server { 4 | error_log stderr info; 5 | listen 80; 6 | listen 443 ssl; 7 | server_name localhost; 8 | ssl_certificate /etc/ssl/certs/helm.crt; 9 | ssl_certificate_key /etc/ssl/certs/helm.key; 10 | 11 | # This is where the repositories live on the server 12 | root /srv/git; 13 | 14 | auth_basic "git requires auth"; 15 | auth_basic_user_file /srv/.htpasswd; 16 | 17 | location ~ (/.*) { 18 | include /etc/nginx/fastcgi_params; 19 | fastcgi_pass unix:/var/run/fcgiwrap.socket; 20 | fastcgi_param SCRIPT_FILENAME /usr/libexec/git-core/git-http-backend; 21 | fastcgi_param PATH_INFO $uri; 22 | fastcgi_param REMOTE_USER $remote_user; 23 | fastcgi_param GIT_HTTP_EXPORT_ALL ""; 24 | fastcgi_param GIT_PROJECT_ROOT /srv/git; 25 | } 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /e2e/assets/gitrepo/nginx_service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: git-service 5 | spec: 6 | selector: 7 | app: git-server 8 | ports: 9 | - name: http 10 | protocol: TCP 11 | port: 8080 12 | targetPort: 80 13 | - name: https 14 | protocol: TCP 15 | port: 4343 16 | targetPort: 443 17 | type: LoadBalancer 18 | -------------------------------------------------------------------------------- /e2e/assets/gitrepo/post-receive.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # This simulates a webhook call from Github, populating only fields which gitjob is known to care about [1]. 4 | # [1]: https://github.com/rancher/fleet/blob/main/pkg/webhook/webhook.go#L129 5 | 6 | # From https://stackoverflow.com/a/11150763 7 | # (note: the branch last pushed to is not necessarily the checked out branch on the remote) 8 | ref=$(find refs/heads -type f | sort | tail -1) 9 | after=$(cat $ref) 10 | 11 | # necessary to make gitjob interpret the call as a push event coming from Github 12 | github_header="X-GitHub-Event: push" 13 | 14 | curl \ 15 | --retry-delay 5 \ 16 | --retry 12 \ 17 | --fail-with-body gitjob.cattle-fleet-system.svc.cluster.local \ 18 | -H "$github_header" \ 19 | -d "{\"ref\": \"$ref\", \"after\": \"$after\", \"repository\": {\"html_url\": \"{{.RepoURL}}\"}}" 20 | 21 | echo "Webhook sent successfully" 22 | -------------------------------------------------------------------------------- /e2e/assets/gitrepo/sleeper-chart/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /e2e/assets/gitrepo/sleeper-chart/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: sleeper-chart 3 | description: A test chart 4 | type: application 5 | version: 0.1.0 6 | appVersion: "1.16.0" 7 | -------------------------------------------------------------------------------- /e2e/assets/gitrepo/sleeper-chart/values.yaml: -------------------------------------------------------------------------------- 1 | replicaCount: 1 2 | 3 | image: 4 | repository: rancher/mirrored-library-busybox 5 | pullPolicy: IfNotPresent 6 | tag: "1.34.1" 7 | 8 | imagePullSecrets: [] 9 | 10 | podAnnotations: {} 11 | 12 | podSecurityContext: {} 13 | securityContext: {} 14 | 15 | nodeSelector: {} 16 | tolerations: [] 17 | affinity: {} 18 | -------------------------------------------------------------------------------- /e2e/assets/helm/chartmuseum_deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: chartmuseum 5 | labels: 6 | fleet: testing 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: chartmuseum 12 | template: 13 | metadata: 14 | labels: 15 | app: chartmuseum 16 | spec: 17 | containers: 18 | - name: chartmuseum 19 | image: ghcr.io/helm/chartmuseum:v0.14.0 20 | imagePullPolicy: IfNotPresent 21 | ports: 22 | - containerPort: 8081 23 | env: 24 | - name: PORT 25 | value: "8081" 26 | - name: STORAGE_LOCAL_ROOTDIR 27 | value: /tmp 28 | - name: TLS_CERT 29 | value: /etc/chartmuseum/certs/tls.crt 30 | - name: TLS_KEY 31 | value: /etc/chartmuseum/certs/tls.key 32 | - name: BASIC_AUTH_USER 33 | value: {{.User}} 34 | - name: BASIC_AUTH_PASS 35 | value: {{.Password}} 36 | - name: STORAGE 37 | value: local 38 | volumeMounts: 39 | - name: tls-cert 40 | mountPath: /etc/chartmuseum/certs 41 | volumes: 42 | - name: tls-cert 43 | secret: 44 | secretName: helm-tls 45 | 46 | -------------------------------------------------------------------------------- /e2e/assets/helm/chartmuseum_service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: chartmuseum-service 5 | spec: 6 | selector: 7 | app: chartmuseum 8 | ports: 9 | - protocol: TCP 10 | port: 8081 11 | targetPort: 8081 12 | type: LoadBalancer 13 | -------------------------------------------------------------------------------- /e2e/assets/helm/repo/http-with-auth-chart-path/fleet.yaml: -------------------------------------------------------------------------------- 1 | # This file and all contents in it are OPTIONAL. 2 | 3 | # The namespace this chart will be installed and restricted to, 4 | # if not specified the chart will be installed to "default" 5 | namespace: fleet-helm-http-with-auth-chart-path 6 | 7 | # Custom helm options 8 | helm: 9 | # The release name to use. If empty a generated release name will be used 10 | releaseName: sleeper-chart 11 | 12 | chart: "https://chartmuseum-service.default.svc.cluster.local:8081/charts/sleeper-chart-0.1.0.tgz" 13 | 14 | # Used if repo is set to look up the version of the chart 15 | version: "0.1.0" 16 | 17 | # Force recreate resource that can not be updated 18 | force: false 19 | 20 | # How long for helm to wait for the release to be active. If the value 21 | # is less that or equal to zero, we will not wait in Helm 22 | timeoutSeconds: 0 23 | 24 | # Custom values that will be passed as values.yaml to the installation 25 | values: 26 | replicas: 2 27 | -------------------------------------------------------------------------------- /e2e/assets/helm/repo/http-with-auth-repo-path/fleet.yaml: -------------------------------------------------------------------------------- 1 | # This file and all contents in it are OPTIONAL. 2 | 3 | # The namespace this chart will be installed and restricted to, 4 | # if not specified the chart will be installed to "default" 5 | namespace: fleet-helm-http-with-auth-repo-path 6 | 7 | # Custom helm options 8 | helm: 9 | # The release name to use. If empty a generated release name will be used 10 | releaseName: sleeper-chart 11 | 12 | chart: "sleeper-chart" 13 | repo: "https://chartmuseum-service.default.svc.cluster.local:8081" 14 | 15 | # Used if repo is set to look up the version of the chart 16 | version: "0.1.0" 17 | 18 | # Force recreate resource that can not be updated 19 | force: false 20 | 21 | # How long for helm to wait for the release to be active. If the value 22 | # is less that or equal to zero, we will not wait in Helm 23 | timeoutSeconds: 0 24 | 25 | # Custom values that will be passed as values.yaml to the installation 26 | values: 27 | replicas: 2 28 | 29 | -------------------------------------------------------------------------------- /e2e/assets/helm/repo/oci-with-auth/fleet.yaml: -------------------------------------------------------------------------------- 1 | # This file and all contents in it are OPTIONAL. 2 | 3 | # The namespace this chart will be installed and restricted to, 4 | # if not specified the chart will be installed to "default" 5 | namespace: fleet-helm-oci-with-auth 6 | 7 | # Custom helm options 8 | helm: 9 | # The release name to use. If empty a generated release name will be used 10 | releaseName: sleeper-chart 11 | 12 | # The directory of the chart in the repo. Any valid go-getter supported 13 | # URL can also be used here to specify where to download the chart from. 14 | # If repo below is set, this value is the chart name in the repo. 15 | chart: "oci://zot-service.default.svc.cluster.local:8082/sleeper-chart" 16 | 17 | # Used if repo is set to look up the version of the chart 18 | version: "0.1.0" 19 | 20 | # Force recreate resource that can not be updated 21 | force: false 22 | 23 | # How long for helm to wait for the release to be active. If the value 24 | # is less that or equal to zero, we will not wait in Helm 25 | timeoutSeconds: 0 26 | 27 | # Custom values that will be passed as values.yaml to the installation 28 | values: 29 | replicas: 2 30 | -------------------------------------------------------------------------------- /e2e/assets/helm/zot_configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: zot-config 5 | data: 6 | config.json: | 7 | { 8 | "storage": { "rootDirectory": "/var/lib/registry" }, 9 | "http": { 10 | "auth": { 11 | "htpasswd": { "path": "/secret/htpasswd" } 12 | }, 13 | "tls": { 14 | "cert": "/etc/zot/certs/tls.crt", 15 | "key": "/etc/zot/certs/tls.key" 16 | }, 17 | "accessControl": { 18 | "repositories": { 19 | "**": { 20 | "policies": [{ 21 | "users": ["admin"], 22 | "actions": ["read", "create", "update", "delete"] 23 | }], 24 | "defaultPolicy": ["read", "create"] 25 | } 26 | } 27 | }, 28 | "address": "0.0.0.0", 29 | "port": "8082" 30 | }, 31 | "log": { "level": "debug" }, 32 | "storage": { 33 | "rootDirectory": "/tmp/zot" 34 | }, 35 | "extensions": { 36 | "ui": { 37 | "enable": true 38 | }, 39 | "search": { 40 | "enable": true 41 | } 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /e2e/assets/helm/zot_deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: zot 5 | labels: 6 | fleet: testing 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: zot 12 | template: 13 | metadata: 14 | labels: 15 | app: zot 16 | spec: 17 | containers: 18 | - name: zot 19 | image: ghcr.io/project-zot/zot-linux-amd64:v2.1.1 20 | imagePullPolicy: IfNotPresent 21 | ports: 22 | - containerPort: 8082 23 | volumeMounts: 24 | - name: config-file 25 | mountPath: /etc/zot/config.json 26 | subPath: config.json 27 | - name: htpasswd-secret 28 | mountPath: /secret 29 | - name: tls-cert 30 | mountPath: /etc/zot/certs 31 | volumes: 32 | - name: config-file 33 | configMap: 34 | name: zot-config 35 | items: 36 | - key: config.json 37 | path: config.json 38 | - name: htpasswd-secret 39 | secret: 40 | secretName: zot-htpasswd 41 | - name: tls-cert 42 | secret: 43 | secretName: helm-tls 44 | -------------------------------------------------------------------------------- /e2e/assets/helm/zot_secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: zot-htpasswd 5 | labels: 6 | app.kubernetes.io/managed-by: "Helm" 7 | annotations: 8 | meta.helm.sh/release-name: "zot" 9 | #meta.helm.sh/release-namespace: "default" 10 | stringData: 11 | htpasswd: "{{.HTTPPasswd}}" 12 | -------------------------------------------------------------------------------- /e2e/assets/helm/zot_service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: zot-service 5 | spec: 6 | selector: 7 | app: zot 8 | ports: 9 | - protocol: TCP 10 | port: 8082 11 | targetPort: 8082 12 | type: LoadBalancer 13 | -------------------------------------------------------------------------------- /e2e/assets/helmop-template.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: fleet.cattle.io/v1alpha1 2 | kind: HelmOp 3 | metadata: 4 | name: {{ .Name }} 5 | {{- if ne .Shard "" }} 6 | labels: 7 | fleet.cattle.io/shard-ref: {{ .Shard }} 8 | {{- end }} 9 | namespace: {{.Namespace}} 10 | spec: 11 | helm: 12 | chart: {{.Chart}} 13 | version: {{.Version}} 14 | namespace: {{.Namespace}} -------------------------------------------------------------------------------- /e2e/assets/helmop/helmop.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: fleet.cattle.io/v1alpha1 2 | kind: HelmOp 3 | metadata: 4 | name: {{.Name}} 5 | namespace: "fleet-local" 6 | spec: 7 | helm: 8 | releaseName: testhelm 9 | repo: {{.Repo}} 10 | chart: {{.Chart}} 11 | version: {{.Version}} 12 | namespace: {{.Namespace}} 13 | helmSecretName: {{.HelmSecretName}} 14 | insecureSkipTLSVerify: {{.InsecureSkipTLSVerify}} 15 | -------------------------------------------------------------------------------- /e2e/assets/imagescan/imagescan.yaml: -------------------------------------------------------------------------------- 1 | kind: GitRepo 2 | apiVersion: fleet.cattle.io/v1alpha1 3 | metadata: 4 | name: imagescan 5 | namespace: fleet-local 6 | spec: 7 | # change this to be your own repo 8 | repo: {{.Repo}} 9 | branch: {{.Branch}} 10 | # define how long it will sync all the images and decide to apply change 11 | imageScanInterval: 5s 12 | # user must properly provide a secret that has write access to the git repository 13 | clientSecretName: git-auth 14 | 15 | # specify the commit pattern 16 | imageScanCommit: 17 | authorName: foo 18 | authorEmail: foo@bar.com 19 | messageTemplate: "update image" 20 | paths: 21 | - examples 22 | -------------------------------------------------------------------------------- /e2e/assets/imagescan/pre-releases-ignored/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: pause-prerelease 5 | labels: 6 | app: pause 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: pause 12 | template: 13 | metadata: 14 | labels: 15 | app: pause 16 | spec: 17 | containers: 18 | - name: pause 19 | image: {{.ImageWithTag}} # {"$imagescan": "test-scan"} 20 | ports: 21 | - containerPort: 80 22 | -------------------------------------------------------------------------------- /e2e/assets/imagescan/pre-releases-ignored/fleet.yaml: -------------------------------------------------------------------------------- 1 | imageScans: 2 | # specify the policy to retrieve images, can be semver or alphabetical order 3 | - policy: 4 | # if range is specified, it will take the latest image according to semver order in the range 5 | # for more details on how to use semver, see https://github.com/Masterminds/semver 6 | # in this test case we're going to use prerelease versions. 7 | # as we're specifying * semver will ignore new tags but should not crash the fleet controller 8 | semver: 9 | range: "*" 10 | # can use ascending or descending order 11 | alphabetical: 12 | order: asc 13 | 14 | # specify images to scan 15 | image: {{.Image}} 16 | 17 | # Specify the tag name, it has to be unique in the same bundle 18 | tagName: test-scan 19 | 20 | # Specify the scan interval 21 | interval: 5s 22 | -------------------------------------------------------------------------------- /e2e/assets/imagescan/pre-releases-ok/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: pause-prerelease 5 | labels: 6 | app: pause 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: pause 12 | template: 13 | metadata: 14 | labels: 15 | app: pause 16 | spec: 17 | containers: 18 | - name: pause 19 | image: {{.ImageWithTag}} # {"$imagescan": "test-scan"} 20 | ports: 21 | - containerPort: 80 22 | -------------------------------------------------------------------------------- /e2e/assets/imagescan/pre-releases-ok/fleet.yaml: -------------------------------------------------------------------------------- 1 | imageScans: 2 | # specify the policy to retrieve images, can be semver or alphabetical order 3 | - policy: 4 | # if range is specified, it will take the latest image according to semver order in the range 5 | # for more details on how to use semver, see https://github.com/Masterminds/semver 6 | semver: 7 | range: ">= 0.0.0-40" 8 | # can use ascending or descending order 9 | alphabetical: 10 | order: asc 11 | 12 | # specify images to scan 13 | image: {{.Image}} 14 | 15 | # Specify the tag name, it has to be unique in the same bundle 16 | tagName: test-scan 17 | 18 | # Specify the scan interval 19 | interval: 5s 20 | -------------------------------------------------------------------------------- /e2e/assets/imagescan/repo/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nginx-deployment 5 | labels: 6 | app: nginx 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: nginx 12 | template: 13 | metadata: 14 | labels: 15 | app: nginx 16 | spec: 17 | containers: 18 | - name: nginx 19 | image: public.ecr.aws/nginx/nginx:latest # {"$imagescan": "test-scan:digest"} 20 | ports: 21 | - containerPort: 80 22 | -------------------------------------------------------------------------------- /e2e/assets/imagescan/repo/fleet.yaml: -------------------------------------------------------------------------------- 1 | imageScans: 2 | # specify the policy to retrieve images, can be semver or alphabetical order 3 | - policy: 4 | # if range is specified, it will take the latest image according to semver order in the range 5 | # for more details on how to use semver, see https://github.com/Masterminds/semver 6 | semver: 7 | range: "*" 8 | # can use ascending or descending order 9 | alphabetical: 10 | order: asc 11 | 12 | # specify images to scan 13 | image: "public.ecr.aws/nginx/nginx" 14 | 15 | # Specify the tag name, it has to be unique in the same bundle 16 | tagName: test-scan 17 | 18 | # Specify the scan interval 19 | interval: 5s 20 | -------------------------------------------------------------------------------- /e2e/assets/installation/simple.yaml: -------------------------------------------------------------------------------- 1 | kind: GitRepo 2 | apiVersion: fleet.cattle.io/v1alpha1 3 | metadata: 4 | name: simple-test 5 | spec: 6 | repo: https://github.com/rancher/fleet-test-data 7 | branch: master 8 | paths: 9 | - simple 10 | -------------------------------------------------------------------------------- /e2e/assets/installation/verify.yaml: -------------------------------------------------------------------------------- 1 | kind: GitRepo 2 | apiVersion: fleet.cattle.io/v1alpha1 3 | metadata: 4 | name: {{.Name}} 5 | spec: 6 | repo: https://github.com/rancher/fleet-test-data 7 | branch: master 8 | paths: 9 | - simple 10 | targetNamespace: {{.TargetNamespace}} 11 | targets: 12 | - clusterSelector: 13 | matchExpressions: 14 | - key: provider.cattle.io 15 | operator: NotIn 16 | values: 17 | - harvester 18 | -------------------------------------------------------------------------------- /e2e/assets/keep-resources/do-not-keep/gitrepo.yaml: -------------------------------------------------------------------------------- 1 | kind: GitRepo 2 | apiVersion: fleet.cattle.io/v1alpha1 3 | metadata: 4 | name: dont-keep 5 | spec: 6 | repo: https://github.com/rancher/fleet-test-data 7 | branch: master 8 | paths: 9 | - helm-verify 10 | targetNamespace: do-not-keep-resources 11 | targets: 12 | - clusterSelector: 13 | matchExpressions: 14 | - key: provider.cattle.io 15 | operator: NotIn 16 | values: 17 | - harvester 18 | -------------------------------------------------------------------------------- /e2e/assets/keep-resources/keep/gitrepo.yaml: -------------------------------------------------------------------------------- 1 | kind: GitRepo 2 | apiVersion: fleet.cattle.io/v1alpha1 3 | metadata: 4 | name: keep 5 | spec: 6 | repo: https://github.com/rancher/fleet-test-data 7 | branch: master 8 | paths: 9 | - helm-verify 10 | targetNamespace: keep-resources 11 | keepResources: true 12 | targets: 13 | - clusterSelector: 14 | matchExpressions: 15 | - key: provider.cattle.io 16 | operator: NotIn 17 | values: 18 | - harvester 19 | -------------------------------------------------------------------------------- /e2e/assets/metrics/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ .Name }} 5 | labels: 6 | app: {{ .App }} 7 | env: test 8 | spec: 9 | selector: 10 | app: {{ .App }} 11 | {{- if .IsDefaultShard }} 12 | fleet.cattle.io/shard-default: "{{ .IsDefaultShard }}" 13 | {{ else }} 14 | fleet.cattle.io/shard-id: {{ .Shard }} 15 | {{- end }} 16 | ports: 17 | - protocol: TCP 18 | port: {{ .Port }} 19 | targetPort: metrics 20 | type: LoadBalancer 21 | -------------------------------------------------------------------------------- /e2e/assets/multi-cluster-examples/helm-external.yaml: -------------------------------------------------------------------------------- 1 | kind: GitRepo 2 | apiVersion: fleet.cattle.io/v1alpha1 3 | metadata: 4 | name: helm-external 5 | spec: 6 | repo: https://github.com/rancher/fleet-examples 7 | paths: 8 | - multi-cluster/helm-external 9 | targets: 10 | - name: dev 11 | clusterSelector: 12 | matchLabels: 13 | env: dev 14 | 15 | - name: test 16 | clusterSelector: 17 | matchLabels: 18 | env: test 19 | 20 | - name: prod 21 | clusterSelector: 22 | matchLabels: 23 | env: prod 24 | -------------------------------------------------------------------------------- /e2e/assets/multi-cluster-examples/helm-kustomize.yaml: -------------------------------------------------------------------------------- 1 | kind: GitRepo 2 | apiVersion: fleet.cattle.io/v1alpha1 3 | metadata: 4 | name: helm-kustomize 5 | spec: 6 | repo: https://github.com/rancher/fleet-examples 7 | paths: 8 | - multi-cluster/helm-kustomize 9 | targets: 10 | - name: dev 11 | clusterSelector: 12 | matchLabels: 13 | env: dev 14 | 15 | - name: test 16 | clusterSelector: 17 | matchLabels: 18 | env: test 19 | 20 | - name: prod 21 | clusterSelector: 22 | matchLabels: 23 | env: prod 24 | -------------------------------------------------------------------------------- /e2e/assets/multi-cluster-examples/helm-target-customizations.yaml: -------------------------------------------------------------------------------- 1 | kind: GitRepo 2 | apiVersion: fleet.cattle.io/v1alpha1 3 | metadata: 4 | name: helm-target-customizations 5 | spec: 6 | repo: https://github.com/rancher/fleet-examples 7 | branch: test-target-customizations 8 | paths: 9 | - multi-cluster/helm-target-customizations/ 10 | targets: 11 | - name: test 12 | clusterSelector: 13 | matchLabels: 14 | env: test 15 | -------------------------------------------------------------------------------- /e2e/assets/multi-cluster-examples/helm.yaml: -------------------------------------------------------------------------------- 1 | kind: GitRepo 2 | apiVersion: fleet.cattle.io/v1alpha1 3 | metadata: 4 | name: helm 5 | spec: 6 | repo: https://github.com/rancher/fleet-examples 7 | paths: 8 | - multi-cluster/helm 9 | targets: 10 | - name: dev 11 | clusterSelector: 12 | matchLabels: 13 | env: dev 14 | 15 | - name: test 16 | clusterSelector: 17 | matchLabels: 18 | env: test 19 | 20 | - name: prod 21 | clusterSelector: 22 | matchLabels: 23 | env: prod 24 | -------------------------------------------------------------------------------- /e2e/assets/multi-cluster-examples/kustomize.yaml: -------------------------------------------------------------------------------- 1 | kind: GitRepo 2 | apiVersion: fleet.cattle.io/v1alpha1 3 | metadata: 4 | name: kustomize 5 | spec: 6 | repo: https://github.com/rancher/fleet-examples 7 | paths: 8 | - multi-cluster/kustomize 9 | targets: 10 | - name: dev 11 | clusterSelector: 12 | matchLabels: 13 | env: dev 14 | 15 | - name: test 16 | clusterSelector: 17 | matchLabels: 18 | env: test 19 | 20 | - name: prod 21 | clusterSelector: 22 | matchLabels: 23 | env: prod 24 | -------------------------------------------------------------------------------- /e2e/assets/multi-cluster-examples/manifests.yaml: -------------------------------------------------------------------------------- 1 | kind: GitRepo 2 | apiVersion: fleet.cattle.io/v1alpha1 3 | metadata: 4 | name: manifests 5 | spec: 6 | repo: https://github.com/rancher/fleet-examples 7 | paths: 8 | - multi-cluster/manifests 9 | targets: 10 | - name: dev 11 | clusterSelector: 12 | matchLabels: 13 | env: dev 14 | 15 | - name: test 16 | clusterSelector: 17 | matchLabels: 18 | env: test 19 | 20 | - name: prod 21 | clusterSelector: 22 | matchLabels: 23 | env: prod 24 | -------------------------------------------------------------------------------- /e2e/assets/multi-cluster/bundle-cm.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: fleet.cattle.io/v1alpha1 3 | kind: Bundle 4 | metadata: 5 | labels: 6 | role: root 7 | name: {{.Name}} 8 | namespace: {{.ClusterRegistrationNamespace}} 9 | spec: 10 | defaultNamespace: {{.ProjectNamespace}} 11 | resources: 12 | - content: | 13 | kind: ConfigMap 14 | apiVersion: v1 15 | metadata: 16 | name: root 17 | data: 18 | value: root 19 | name: cm.yaml 20 | targets: 21 | - clusterSelector: {} 22 | -------------------------------------------------------------------------------- /e2e/assets/multi-cluster/bundle-depends-on.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: fleet.cattle.io/v1alpha1 3 | kind: Bundle 4 | metadata: 5 | labels: 6 | role: leaf 7 | name: {{.Name}} 8 | namespace: {{.ClusterRegistrationNamespace}} 9 | spec: 10 | defaultNamespace: {{.ProjectNamespace}} 11 | dependsOn: 12 | - selector: 13 | matchLabels: 14 | role: root 15 | resources: 16 | - content: | 17 | kind: ConfigMap 18 | apiVersion: v1 19 | metadata: 20 | name: node 21 | data: 22 | value: node 23 | name: cm.yaml 24 | targets: 25 | - clusterSelector: {} 26 | -------------------------------------------------------------------------------- /e2e/assets/multi-cluster/bundle-deployment-labels.yaml: -------------------------------------------------------------------------------- 1 | kind: GitRepo 2 | apiVersion: fleet.cattle.io/v1alpha1 3 | metadata: 4 | name: simpleapplabels 5 | namespace: {{.ProjectNamespace}} 6 | labels: 7 | team: one 8 | 9 | spec: 10 | repo: https://github.com/rancher/fleet-test-data 11 | branch: master 12 | paths: 13 | - simple 14 | 15 | targetNamespace: {{.TargetNamespace}} 16 | 17 | targets: 18 | - name: test 19 | clusterSelector: 20 | matchLabels: 21 | envlabels: test 22 | -------------------------------------------------------------------------------- /e2e/assets/multi-cluster/bundle-namespace-mapping.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: {{.ProjectNamespace}} 6 | {{if .Restricted}} 7 | --- 8 | kind: GitRepoRestriction 9 | apiVersion: fleet.cattle.io/v1alpha1 10 | metadata: 11 | name: restriction 12 | namespace: {{.ProjectNamespace}} 13 | 14 | allowedTargetNamespaces: 15 | - project1simpleapp 16 | {{end}} 17 | --- 18 | kind: BundleNamespaceMapping 19 | apiVersion: fleet.cattle.io/v1alpha1 20 | metadata: 21 | name: mapping 22 | namespace: {{.ProjectNamespace}} 23 | 24 | bundleSelector: 25 | matchLabels: 26 | team: {{ .BundleSelectorLabel }} 27 | 28 | namespaceSelector: 29 | matchLabels: 30 | kubernetes.io/metadata.name: {{.ClusterNamespace}} 31 | --- 32 | kind: GitRepo 33 | apiVersion: fleet.cattle.io/v1alpha1 34 | metadata: 35 | name: simpleapp 36 | namespace: {{.ProjectNamespace}} 37 | labels: 38 | team: one 39 | 40 | spec: 41 | repo: https://github.com/rancher/fleet-test-data 42 | branch: master 43 | paths: 44 | - simple 45 | 46 | {{.TargetNamespace}} 47 | 48 | targets: 49 | - name: test 50 | clusterSelector: 51 | matchLabels: 52 | env: test 53 | -------------------------------------------------------------------------------- /e2e/assets/single-cluster-examples/helm-kustomize.yaml: -------------------------------------------------------------------------------- 1 | kind: GitRepo 2 | apiVersion: fleet.cattle.io/v1alpha1 3 | metadata: 4 | name: helm-kustomize 5 | spec: 6 | repo: https://github.com/rancher/fleet-examples 7 | paths: 8 | - single-cluster/helm-kustomize 9 | -------------------------------------------------------------------------------- /e2e/assets/single-cluster-examples/helm-multi-chart.yaml: -------------------------------------------------------------------------------- 1 | kind: GitRepo 2 | apiVersion: fleet.cattle.io/v1alpha1 3 | metadata: 4 | name: helm 5 | spec: 6 | repo: https://github.com/rancher/fleet-examples 7 | paths: 8 | - single-cluster/helm-multi-chart 9 | -------------------------------------------------------------------------------- /e2e/assets/single-cluster-examples/helm.yaml: -------------------------------------------------------------------------------- 1 | kind: GitRepo 2 | apiVersion: fleet.cattle.io/v1alpha1 3 | metadata: 4 | name: helm 5 | labels: 6 | test: me 7 | spec: 8 | repo: https://github.com/rancher/fleet-examples 9 | paths: 10 | - single-cluster/helm 11 | targets: 12 | - clusterSelector: 13 | matchExpressions: 14 | - key: provider.cattle.io 15 | operator: NotIn 16 | values: 17 | - harvester 18 | -------------------------------------------------------------------------------- /e2e/assets/single-cluster-examples/kustomize.yaml: -------------------------------------------------------------------------------- 1 | kind: GitRepo 2 | apiVersion: fleet.cattle.io/v1alpha1 3 | metadata: 4 | name: kustomize 5 | spec: 6 | repo: https://github.com/rancher/fleet-examples 7 | paths: 8 | - single-cluster/kustomize 9 | -------------------------------------------------------------------------------- /e2e/assets/single-cluster-examples/manifests.yaml: -------------------------------------------------------------------------------- 1 | kind: GitRepo 2 | apiVersion: fleet.cattle.io/v1alpha1 3 | metadata: 4 | name: manifests 5 | spec: 6 | repo: https://github.com/rancher/fleet-examples 7 | paths: 8 | - single-cluster/manifests 9 | -------------------------------------------------------------------------------- /e2e/assets/single-cluster/delete-namespace/gitrepo.yaml: -------------------------------------------------------------------------------- 1 | kind: GitRepo 2 | apiVersion: fleet.cattle.io/v1alpha1 3 | metadata: 4 | name: my-gitrepo 5 | spec: 6 | repo: https://github.com/rancher/fleet-test-data 7 | branch: master 8 | paths: 9 | - helm-verify 10 | targetNamespace: {{.TargetNamespace}} 11 | deleteNamespace: {{.DeleteNamespace}} 12 | -------------------------------------------------------------------------------- /e2e/assets/single-cluster/driven.yaml: -------------------------------------------------------------------------------- 1 | kind: GitRepo 2 | apiVersion: fleet.cattle.io/v1alpha1 3 | metadata: 4 | name: driven 5 | namespace: fleet-local 6 | spec: 7 | repo: https://github.com/rancher/fleet-test-data 8 | bundles: 9 | - base: driven/helm 10 | - base: driven/simple 11 | - base: driven/kustomize 12 | options: dev.yaml 13 | - base: driven/kustomize 14 | options: test.yaml 15 | -------------------------------------------------------------------------------- /e2e/assets/single-cluster/helm-cluster-values.yaml: -------------------------------------------------------------------------------- 1 | kind: GitRepo 2 | apiVersion: fleet.cattle.io/v1alpha1 3 | metadata: 4 | name: helm-cluster-values 5 | spec: 6 | repo: https://github.com/rancher/fleet-test-data 7 | branch: master 8 | paths: 9 | - helm-cluster-values 10 | -------------------------------------------------------------------------------- /e2e/assets/single-cluster/helm-kustomize-disabled.yaml: -------------------------------------------------------------------------------- 1 | # Kustomize is supposed to be disabled if no configuration file for Kustomize 2 | # exists that would need to be honored. This prevents issues with file and 3 | # archive names, which are restricted in Kustomize but helm has no issues with 4 | # them (like a plus in the filename). In fact, helm even creates file names with 5 | # a plus symbol if the version in Chart.yaml contains it. 6 | kind: GitRepo 7 | apiVersion: fleet.cattle.io/v1alpha1 8 | metadata: 9 | name: helm-kustomize-disabled 10 | spec: 11 | repo: https://github.com/rancher/fleet-test-data 12 | branch: master 13 | targetNamespace: helm-kustomize-disabled 14 | paths: 15 | - helm-kustomize-disabled 16 | -------------------------------------------------------------------------------- /e2e/assets/single-cluster/helm-oci.yaml: -------------------------------------------------------------------------------- 1 | kind: GitRepo 2 | apiVersion: fleet.cattle.io/v1alpha1 3 | metadata: 4 | name: helm 5 | spec: 6 | repo: https://github.com/rancher/fleet-test-data 7 | paths: 8 | - helm-oci 9 | targets: 10 | - clusterSelector: 11 | matchExpressions: 12 | - key: provider.cattle.io 13 | operator: NotIn 14 | values: 15 | - harvester 16 | -------------------------------------------------------------------------------- /e2e/assets/single-cluster/helm-options-disabledns.yaml: -------------------------------------------------------------------------------- 1 | kind: GitRepo 2 | apiVersion: fleet.cattle.io/v1alpha1 3 | metadata: 4 | name: helm-options-disabledns 5 | spec: 6 | repo: https://github.com/rancher/fleet-test-data 7 | branch: master 8 | paths: 9 | - helm-disable-dns/set 10 | - helm-disable-dns/not-set 11 | -------------------------------------------------------------------------------- /e2e/assets/single-cluster/helm-options-skip-schema-validation.yaml: -------------------------------------------------------------------------------- 1 | kind: GitRepo 2 | apiVersion: fleet.cattle.io/v1alpha1 3 | metadata: 4 | name: helm-options-skip-schema-val 5 | spec: 6 | repo: https://github.com/rancher/fleet-test-data 7 | branch: master 8 | paths: 9 | - helm-schemas/set 10 | - helm-schemas/not-set 11 | -------------------------------------------------------------------------------- /e2e/assets/single-cluster/helm-verify.yaml: -------------------------------------------------------------------------------- 1 | kind: GitRepo 2 | apiVersion: fleet.cattle.io/v1alpha1 3 | metadata: 4 | name: helm-verify-test 5 | spec: 6 | repo: https://github.com/rancher/fleet-test-data 7 | branch: master 8 | paths: 9 | - helm-verify 10 | -------------------------------------------------------------------------------- /e2e/assets/single-cluster/helm-with-auth.yaml: -------------------------------------------------------------------------------- 1 | kind: GitRepo 2 | apiVersion: fleet.cattle.io/v1alpha1 3 | metadata: 4 | name: helm 5 | spec: 6 | repo: {{ .Repo }} 7 | helmSecretName: {{ .SecretName }} 8 | paths: 9 | - {{ .Path }} 10 | targets: 11 | - clusterSelector: 12 | matchExpressions: 13 | - key: provider.cattle.io 14 | operator: NotIn 15 | values: 16 | - harvester 17 | -------------------------------------------------------------------------------- /e2e/assets/single-cluster/multiple-paths.yaml: -------------------------------------------------------------------------------- 1 | kind: GitRepo 2 | apiVersion: fleet.cattle.io/v1alpha1 3 | metadata: 4 | name: multiple-paths 5 | spec: 6 | repo: https://github.com/rancher/fleet-test-data 7 | paths: 8 | - multiple-paths/config 9 | - multiple-paths/service 10 | -------------------------------------------------------------------------------- /e2e/assets/single-cluster/ns-labels-target-customization-no-defaults.yaml: -------------------------------------------------------------------------------- 1 | kind: GitRepo 2 | apiVersion: fleet.cattle.io/v1alpha1 3 | metadata: 4 | name: test 5 | namespace: fleet-local 6 | spec: 7 | repo: https://github.com/rancher/fleet-test-data 8 | branch: master 9 | paths: 10 | - target-customization-namespace-labels/without-default-values 11 | -------------------------------------------------------------------------------- /e2e/assets/single-cluster/ns-labels-target-customization.yaml: -------------------------------------------------------------------------------- 1 | kind: GitRepo 2 | apiVersion: fleet.cattle.io/v1alpha1 3 | metadata: 4 | name: test 5 | namespace: fleet-local 6 | spec: 7 | repo: https://github.com/rancher/fleet-test-data 8 | branch: master 9 | paths: 10 | - target-customization-namespace-labels/with-default-values 11 | -------------------------------------------------------------------------------- /e2e/assets/single-cluster/release-cleanup/bundle-namespace-update.yaml: -------------------------------------------------------------------------------- 1 | kind: Bundle 2 | apiVersion: fleet.cattle.io/v1alpha1 3 | metadata: 4 | name: namespace-update 5 | spec: 6 | namespace: {{.TargetNamespace}} 7 | resources: 8 | - content: | 9 | apiVersion: v1 10 | kind: ConfigMap 11 | metadata: 12 | name: app-config 13 | data: 14 | test: "value" 15 | targets: 16 | - clusterGroup: default 17 | ignore: {} 18 | name: default 19 | -------------------------------------------------------------------------------- /e2e/assets/single-cluster/release-cleanup/bundle-release-name-update.yaml: -------------------------------------------------------------------------------- 1 | kind: Bundle 2 | apiVersion: fleet.cattle.io/v1alpha1 3 | metadata: 4 | name: release-name-update 5 | spec: 6 | helm: 7 | releaseName: {{.ReleaseName}} 8 | resources: 9 | - content: | 10 | apiVersion: v1 11 | kind: ConfigMap 12 | metadata: 13 | name: app-config 14 | data: 15 | test: "value" 16 | targets: 17 | - clusterGroup: default 18 | ignore: {} 19 | name: default 20 | -------------------------------------------------------------------------------- /e2e/assets/single-cluster/release-names.yaml: -------------------------------------------------------------------------------- 1 | kind: GitRepo 2 | apiVersion: fleet.cattle.io/v1alpha1 3 | metadata: 4 | name: long-name-test 5 | spec: 6 | repo: https://github.com/rancher/fleet-test-data 7 | branch: test-bundle-names 8 | paths: 9 | - shortpath 10 | # collision strip chars: 11 | - shortpath-with@char 12 | - shortpath-with+char 13 | # collision long name: 14 | - longpathwithmorecharactersthanyouwouldeverexpectinagitrepopath 15 | - longpathwithmorecharactersthanyouwouldeverexpectinagitrepopathpart2 16 | # possible collision via duplicate release names: 17 | - customhelmreleasename 18 | - customspecialhelmreleasename 19 | - funcharts 20 | - "-" 21 | -------------------------------------------------------------------------------- /e2e/assets/single-cluster/test-oci.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: fleet.cattle.io/v1alpha1 2 | kind: GitRepo 3 | metadata: 4 | name: sample 5 | namespace: fleet-local 6 | spec: 7 | repo: "https://github.com/rancher/fleet-test-data" 8 | branch: master 9 | paths: 10 | - simple-chart-oci 11 | ociRegistrySecret: {{.OCIRegistrySecret}} 12 | -------------------------------------------------------------------------------- /e2e/assets/single-cluster/values-from-configmap.yaml: -------------------------------------------------------------------------------- 1 | name: configmap overrides values from fleet.yaml 2 | config: config option 3 | options: 4 | onlyconfigmap: configmap option 5 | english: 6 | name: configmap override 7 | -------------------------------------------------------------------------------- /e2e/assets/single-cluster/values-from-secret.yaml: -------------------------------------------------------------------------------- 1 | name: secret overrides values from fleet.yaml 2 | secret: xyz secret 3 | options: 4 | onlysecret: secret option 5 | english: 6 | name: secret override 7 | -------------------------------------------------------------------------------- /e2e/assets/status/chart-with-template-vars/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: chart-with-template-vars 3 | description: A Helm chart for Kubernetes 4 | 5 | # A chart can be either an 'application' or a 'library' chart. 6 | # 7 | # Application charts are a collection of templates that can be packaged into versioned archives 8 | # to be deployed. 9 | # 10 | # Library charts provide useful utilities or functions for the chart developer. They're included as 11 | # a dependency of application charts to inject those utilities and functions into the rendering 12 | # pipeline. Library charts do not define any templates and therefore cannot be deployed. 13 | type: application 14 | 15 | # This is the chart version. This version number should be incremented each time you make changes 16 | # to the chart and its templates, including the app version. 17 | # Versions are expected to follow Semantic Versioning (https://semver.org/) 18 | version: 0.1.0 19 | 20 | # This is the version number of the application being deployed. This version number should be 21 | # incremented each time you make changes to the application. Versions are not expected to 22 | # follow Semantic Versioning. They should reflect the version the application is using. 23 | # It is recommended to use it with quotes. 24 | appVersion: "1.16.0" 25 | -------------------------------------------------------------------------------- /e2e/assets/status/chart-with-template-vars/fleet.yaml: -------------------------------------------------------------------------------- 1 | helm: 2 | values: 3 | templatedLabel: "${ .ClusterLabels.foo }-foo" 4 | releaseName: reproducer 5 | -------------------------------------------------------------------------------- /e2e/assets/status/chart-with-template-vars/templates/configmap.yaml: -------------------------------------------------------------------------------- 1 | kind: ConfigMap 2 | apiVersion: v1 3 | metadata: 4 | name: chart-with-template-vars-configmap 5 | namespace: fleet-local 6 | data: 7 | foo: bar 8 | -------------------------------------------------------------------------------- /e2e/assets/status/gitrepo.yaml: -------------------------------------------------------------------------------- 1 | kind: GitRepo 2 | apiVersion: fleet.cattle.io/v1alpha1 3 | metadata: 4 | name: {{.Name}} 5 | namespace: fleet-local 6 | spec: 7 | repo: {{.Repo}} 8 | branch: {{.Branch}} 9 | targetNamespace: {{.TargetNamespace}} 10 | paths: 11 | - examples 12 | -------------------------------------------------------------------------------- /e2e/installation/suite_test.go: -------------------------------------------------------------------------------- 1 | package installation_test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/rancher/fleet/e2e/testenv" 7 | 8 | . "github.com/onsi/ginkgo/v2" 9 | . "github.com/onsi/gomega" 10 | ) 11 | 12 | func TestE2E(t *testing.T) { 13 | RegisterFailHandler(testenv.FailAndGather) 14 | RunSpecs(t, "Verify Fleet Installation") 15 | } 16 | 17 | var ( 18 | env *testenv.Env 19 | ) 20 | 21 | var _ = BeforeSuite(func() { 22 | SetDefaultEventuallyTimeout(testenv.Timeout) 23 | testenv.SetRoot("../..") 24 | 25 | env = testenv.New() 26 | }) 27 | -------------------------------------------------------------------------------- /e2e/keep-resources/suite_test.go: -------------------------------------------------------------------------------- 1 | package examples_test 2 | 3 | import ( 4 | "testing" 5 | 6 | . "github.com/onsi/ginkgo/v2" 7 | . "github.com/onsi/gomega" 8 | "github.com/rancher/fleet/e2e/testenv" 9 | ) 10 | 11 | func TestE2E(t *testing.T) { 12 | RegisterFailHandler(testenv.FailAndGather) 13 | RunSpecs(t, "E2E Suite for keepResources") 14 | } 15 | 16 | var ( 17 | env *testenv.Env 18 | ) 19 | 20 | var _ = BeforeSuite(func() { 21 | SetDefaultEventuallyTimeout(testenv.Timeout) 22 | testenv.SetRoot("../..") 23 | 24 | env = testenv.New() 25 | }) 26 | -------------------------------------------------------------------------------- /e2e/multi-cluster/suite_test.go: -------------------------------------------------------------------------------- 1 | // Package multicluster contains e2e tests deploying to multiple clusters. The tests use kubectl to apply manifests. Expectations are verified by checking cluster resources. Assets refer to the https://github.com/rancher/fleet-test-data git repo. 2 | package multicluster_test 3 | 4 | import ( 5 | "os" 6 | "testing" 7 | 8 | "github.com/rancher/fleet/e2e/testenv" 9 | 10 | . "github.com/onsi/ginkgo/v2" 11 | . "github.com/onsi/gomega" 12 | ) 13 | 14 | func TestE2E(t *testing.T) { 15 | RegisterFailHandler(testenv.FailAndGather) 16 | RunSpecs(t, "E2E Suite for Multi-Cluster") 17 | } 18 | 19 | var ( 20 | env *testenv.Env 21 | dsCluster = "second" 22 | ) 23 | 24 | var _ = BeforeSuite(func() { 25 | SetDefaultEventuallyTimeout(testenv.Timeout) 26 | testenv.SetRoot("../..") 27 | 28 | env = testenv.New() 29 | 30 | if dsClusterEnvVar := os.Getenv("CI_REGISTERED_CLUSTER"); dsClusterEnvVar != "" { 31 | dsCluster = dsClusterEnvVar 32 | } 33 | }) 34 | -------------------------------------------------------------------------------- /e2e/require-secrets/suite_test.go: -------------------------------------------------------------------------------- 1 | package require_secrets 2 | 3 | import ( 4 | "os" 5 | "path" 6 | "testing" 7 | 8 | "github.com/rancher/fleet/e2e/testenv" 9 | "github.com/rancher/fleet/e2e/testenv/githelper" 10 | 11 | . "github.com/onsi/ginkgo/v2" 12 | . "github.com/onsi/gomega" 13 | ) 14 | 15 | func TestE2E(t *testing.T) { 16 | RegisterFailHandler(testenv.FailAndGather) 17 | RunSpecs(t, "E2E Suite for Github Secrets based Examples") 18 | } 19 | 20 | var ( 21 | env *testenv.Env 22 | khDir string 23 | knownHostsPath string 24 | ) 25 | 26 | var _ = BeforeSuite(func() { 27 | SetDefaultEventuallyTimeout(testenv.Timeout) 28 | testenv.SetRoot("../..") 29 | 30 | env = testenv.New() 31 | 32 | // setup SSH known_hosts for all tests, since environment variables are 33 | // shared between parallel test runs 34 | khDir, _ = os.MkdirTemp("", "fleet-") 35 | 36 | knownHostsPath = path.Join(khDir, "known_hosts") 37 | os.Setenv("SSH_KNOWN_HOSTS", knownHostsPath) 38 | out, err := githelper.CreateKnownHosts(knownHostsPath, os.Getenv("GIT_REPO_HOST")) 39 | Expect(err).ToNot(HaveOccurred(), out) 40 | }) 41 | 42 | var _ = AfterSuite(func() { 43 | os.RemoveAll(khDir) 44 | }) 45 | -------------------------------------------------------------------------------- /e2e/testenv/fail.go: -------------------------------------------------------------------------------- 1 | package testenv 2 | 3 | import ( 4 | "os" 5 | "os/exec" 6 | "path" 7 | 8 | ginkgo "github.com/onsi/ginkgo/v2" 9 | ) 10 | 11 | func FailAndGather(message string, callerSkip ...int) { 12 | if _, err := exec.LookPath("crust-gather"); err != nil { 13 | ginkgo.GinkgoWriter.Print("⛔ crust-gather is not available, not dumping cluster info") 14 | ginkgo.Fail(message, callerSkip...) 15 | } 16 | 17 | pwd := os.Getenv("GITHUB_WORKSPACE") 18 | if pwd != "" { 19 | pwd = path.Join(pwd, "tmp", "upstream") 20 | } else { 21 | pwd = path.Join(os.TempDir(), "fleet-gather") 22 | } 23 | path := path.Join(pwd, ginkgo.CurrentSpecReport().FullText()) 24 | 25 | ginkgo.GinkgoWriter.Printf("💬 Gathering cluster info for '%s' to '%s'...\n", ginkgo.CurrentSpecReport().FullText(), pwd) 26 | cmd := exec.Command("crust-gather", "collect", 27 | "--exclude-namespace=kube-system", "--exclude-kind=Lease", "--duration=10s", 28 | "-verror", "-f", path) 29 | cmd.Stdout = ginkgo.GinkgoWriter 30 | cmd.Stderr = ginkgo.GinkgoWriter 31 | // Outputting errors, but don't care about error code as crust-gather 32 | // often runs into a "deadline" error. Data collection is successful 33 | // nevertheless. 34 | _ = cmd.Run() 35 | 36 | ginkgo.Fail(message, callerSkip...) 37 | } 38 | -------------------------------------------------------------------------------- /e2e/testenv/infra/cmd/root.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "os" 5 | 6 | "github.com/spf13/cobra" 7 | ) 8 | 9 | var ( 10 | rootCmd = &cobra.Command{ 11 | Use: "testenv", 12 | Short: "root test env command", 13 | Long: `This command should not be run directly.`, 14 | } 15 | withGitServer, withHelmRegistry, withOCIRegistry bool 16 | ) 17 | 18 | // Execute adds all child commands to the root command and sets flags appropriately. 19 | // This is called by main.main(). It only needs to happen once to the rootCmd. 20 | func Execute() { 21 | err := rootCmd.Execute() 22 | if err != nil { 23 | os.Exit(1) 24 | } 25 | } 26 | 27 | func init() { 28 | rootCmd.AddCommand(setupCmd) 29 | rootCmd.AddCommand(teardownCmd) 30 | 31 | rootCmd.PersistentFlags().BoolVarP(&withGitServer, "git-server", "g", false, "with git server") 32 | rootCmd.PersistentFlags().BoolVarP(&withHelmRegistry, "helm-registry", "r", false, "with Helm registry") 33 | rootCmd.PersistentFlags().BoolVarP(&withOCIRegistry, "oci-registry", "c", false, "with OCI registry") 34 | } 35 | -------------------------------------------------------------------------------- /e2e/testenv/infra/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import "github.com/rancher/fleet/e2e/testenv/infra/cmd" 4 | 5 | func main() { 6 | cmd.Execute() 7 | } 8 | -------------------------------------------------------------------------------- /e2e/testenv/path.go: -------------------------------------------------------------------------------- 1 | package testenv 2 | 3 | import "path" 4 | 5 | var ( 6 | root = "../.." 7 | ) 8 | 9 | // SetRoot set the root path for the other relative paths, e.g. AssetPath. 10 | // Usually set to point to the repositories root. 11 | func SetRoot(dir string) { 12 | root = dir 13 | } 14 | 15 | // Root returns the relative path to the repositories root 16 | func Root() string { 17 | return root 18 | } 19 | 20 | // AssetPath returns the path to an asset 21 | func AssetPath(p ...string) string { 22 | parts := append([]string{root, "e2e", "assets"}, p...) 23 | return path.Join(parts...) 24 | } 25 | 26 | // ExamplePath returns the path to the fleet examples 27 | func ExamplePath(p ...string) string { 28 | parts := append([]string{root, "fleet-examples"}, p...) 29 | return path.Join(parts...) 30 | } 31 | -------------------------------------------------------------------------------- /e2e/testenv/zothelper/zothelper.go: -------------------------------------------------------------------------------- 1 | package zothelper 2 | 3 | import ( 4 | "fmt" 5 | "net" 6 | 7 | "github.com/rancher/fleet/e2e/testenv/infra/cmd" 8 | "github.com/rancher/fleet/e2e/testenv/kubectl" 9 | ) 10 | 11 | func GetOCIReference(k kubectl.Command) (string, error) { 12 | externalIP, err := k.Namespace(cmd.InfraNamespace).Get("service", "zot-service", "-o", "jsonpath={.status.loadBalancer.ingress[0].ip}") 13 | if err != nil { 14 | return "", err 15 | } 16 | if net.ParseIP(externalIP) == nil { 17 | return "", fmt.Errorf("external ip is not valid") 18 | } 19 | return fmt.Sprintf("oci://%s:8082", externalIP), err 20 | } 21 | -------------------------------------------------------------------------------- /generate.go: -------------------------------------------------------------------------------- 1 | //go:generate go run ./cmd/codegen/cleanup/main.go 2 | //go:generate go run ./cmd/codegen/main.go 3 | //go:generate bash ./cmd/codegen/hack/generate_and_sort_crds.sh ./charts/fleet-crd/templates/crds.yaml 4 | 5 | package main 6 | -------------------------------------------------------------------------------- /integrationtests/agent/assets/deployment-v1.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: svc-test 5 | spec: 6 | selector: 7 | app.kubernetes.io/name: MyApp 8 | ports: 9 | - protocol: TCP 10 | port: 80 11 | targetPort: 9376 12 | name: myport 13 | --- 14 | apiVersion: v1 15 | kind: Service 16 | metadata: 17 | name: svc-ext 18 | spec: 19 | type: ExternalName 20 | externalName: svc-ext 21 | --- 22 | apiVersion: v1 23 | kind: Service 24 | metadata: 25 | name: svc-finalizer 26 | finalizers: 27 | - kubernetes 28 | spec: 29 | selector: 30 | app.kubernetes.io/name: MyApp 31 | ports: 32 | - protocol: TCP 33 | port: 80 34 | targetPort: 9376 35 | --- 36 | apiVersion: v1 37 | kind: ConfigMap 38 | metadata: 39 | name: cm-test 40 | data: 41 | foo: bar 42 | test.properties: | 43 | foo=bar 44 | -------------------------------------------------------------------------------- /integrationtests/agent/assets/deployment-v2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: svc-test 5 | spec: 6 | selector: 7 | app.kubernetes.io/name: MyApp 8 | ports: 9 | - protocol: TCP 10 | port: 80 11 | targetPort: 9376 12 | --- 13 | kind: ConfigMap 14 | metadata: 15 | name: cm-test 16 | data: 17 | test.properties: | 18 | foo=barModified 19 | apiVersion: v1 20 | 21 | -------------------------------------------------------------------------------- /integrationtests/agent/assets/deployment-with-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: drift-dummy-deployment 5 | labels: 6 | app: drift-dummy 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: drift-dummy 12 | template: 13 | metadata: 14 | labels: 15 | app: drift-dummy 16 | spec: 17 | containers: 18 | - name: pause 19 | image: k8s.gcr.io/pause 20 | ports: 21 | - containerPort: 80 22 | -------------------------------------------------------------------------------- /integrationtests/agent/assets/deployment-with-status.yaml: -------------------------------------------------------------------------------- 1 | # This deployment is meant to represent the bug at https://github.com/rancher/fleet/issues/2521 2 | # It includes: 3 | # - A spec field set to its default value (spec.publishNotReadyAddresses in this case) 4 | # - A non-empty "status", despite being a subresource and not modifiable by apply 5 | apiVersion: v1 6 | kind: Service 7 | metadata: 8 | name: svc-status-test 9 | spec: 10 | publishNotReadyAddresses: false 11 | selector: 12 | app.kubernetes.io/name: MyApp 13 | ports: 14 | - protocol: TCP 15 | port: 80 16 | targetPort: 9376 17 | name: myport 18 | status: 19 | loadBalancer: 20 | ingress: 21 | - hostname: foo.bar 22 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/bundle/bundle-all.yaml: -------------------------------------------------------------------------------- 1 | kind: Bundle 2 | apiVersion: fleet.cattle.io/v1alpha1 3 | metadata: 4 | name: match-all-clusters 5 | namespace: fleet-local 6 | spec: 7 | resources: 8 | - content: | 9 | kind: ConfigMap 10 | apiVersion: v1 11 | metadata: 12 | name: root 13 | data: 14 | value: root 15 | name: cm.yaml 16 | targets: 17 | - clusterSelector: {} 18 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/bundle/bundle.yaml: -------------------------------------------------------------------------------- 1 | kind: Bundle 2 | apiVersion: fleet.cattle.io/v1alpha1 3 | metadata: 4 | name: external-secret 5 | namespace: fleet-local 6 | spec: 7 | resources: 8 | - content: | 9 | apiVersion: apps/v1 10 | kind: Deployment 11 | metadata: 12 | name: nginx-deployment 13 | labels: 14 | app: nginx 15 | spec: 16 | replicas: 3 17 | selector: 18 | matchLabels: 19 | app: nginx 20 | template: 21 | metadata: 22 | labels: 23 | app: nginx 24 | spec: 25 | containers: 26 | - name: nginx 27 | image: nginx:1.14.2 28 | ports: 29 | - containerPort: 80 30 | name: nginx.yaml 31 | targets: 32 | - clusterName: local 33 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/bundledeployment/bd-only.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: fleet.cattle.io/v1alpha1 3 | kind: BundleDeployment 4 | metadata: 5 | creationTimestamp: null 6 | labels: 7 | fleet.cattle.io/bundle-name: testbundle-simple-chart 8 | fleet.cattle.io/bundle-namespace: fleet-local 9 | fleet.cattle.io/cluster: local 10 | fleet.cattle.io/cluster-namespace: fleet-local 11 | fleet.cattle.io/commit: e40edabfeada51874ac9caf5770655b720177380 12 | fleet.cattle.io/managed: "true" 13 | name: testbundle-simple-chart 14 | namespace: cluster-fleet-local-local-1a3d67d0a899 15 | spec: 16 | deploymentID: s-ee0480cffe0c0da150814f6844d7c5bc49cc05c158c5ba1efe9a45142f36e:c32e813ecbf48f56833aa2267cd3d8758eecc94f9948fb0dea510147a57760b5 17 | options: 18 | helm: 19 | chart: config-chart 20 | values: 21 | name: example-value 22 | ignore: {} 23 | stagedDeploymentID: s-ee0480cffe0c0da150814f6844d7c5bc49cc05c158c5ba1efe9a45142f36e:c32e813ecbf48f56833aa2267cd3d8758eecc94f9948fb0dea510147a57760b5 24 | stagedOptions: 25 | helm: 26 | chart: config-chart 27 | values: 28 | name: example-value 29 | ignore: {} 30 | status: 31 | display: {} 32 | 33 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/bundledeployment/content.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: fleet.cattle.io/v1alpha1 3 | content: H4sIAAAAAAAA/4SRMW/bMBCF9/6KA2eLtlygA7fCa7t6KTucpZNNhKII8iTEEPTfA5KRESdOsgjQ3Xv33gfOIlAcxtBQFOrfLBz2JJRoBteZc9VcMPD2kL7yir0Vm7RhciyUQG+OFKIZnIJpr12yKnjr1K6l2ATjOYt+A1NkyCvgCzJMFExnKILh+OrUjq+eFKD31jSYnNpNa85O1nKnHXp/i9ailvUvudNCO7FsHiMw9d4iU9yWcY/+G6BauyfjWgWHbPiLXrueGFtkVNoBFNxEVEXTe0slqloxbsIkSTUntCPV+5+p6GqfZ5DHNI8yDWBZvoCYivBD73KqpQ5Hy1X6uz/SWaIH73ch2+eC+fr7lwMocVmx1qVnzKR5dR/yWbkxWAVnO5zQylKksWNkCn/wRLZgp0v/lx8vAQAA//+su6S1jAIAAA== 4 | kind: Content 5 | metadata: 6 | creationTimestamp: null 7 | name: s-ee0480cffe0c0da150814f6844d7c5bc49cc05c158c5ba1efe9a45142f36e 8 | sha256sum: ee0480cffe0c0da150814f6844d7c5bc49cc05c158c5ba1efe9a45142f36eebc 9 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/deps-charts/multi-chart/remote-chart-with-deps/fleet.yaml: -------------------------------------------------------------------------------- 1 | namespace: test-deps-helm-yamls-with-fleet 2 | helm: 3 | releaseName: simple-with-fleet-yaml 4 | chart: deps-chart 5 | repo: http://localhost:3000 6 | version: 1.0.0 7 | force: false 8 | 9 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/deps-charts/multi-chart/simple-with-fleet-yaml-no-deps/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | appVersion: 1.16.0 3 | description: A chart for testing dependencies 4 | name: deps-chart 5 | type: application 6 | version: 1.0.0 7 | dependencies: 8 | - name: config-chart 9 | version: "0.1.0" 10 | repository: "http://localhost:3000" 11 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/deps-charts/multi-chart/simple-with-fleet-yaml-no-deps/fleet.yaml: -------------------------------------------------------------------------------- 1 | namespace: test-deps-helm-yamls-with-fleet 2 | helm: 3 | releaseName: simple-with-fleet-yaml-no-deps 4 | chart: "" 5 | repo: "" 6 | version: "" 7 | disableDependencyUpdate: true 8 | 9 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/deps-charts/multi-chart/simple-with-fleet-yaml-no-deps/templates/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: test-simple-deps-chart 5 | data: 6 | test: "valuedeps" 7 | name: {{ .Values.name }} 8 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/deps-charts/multi-chart/simple-with-fleet-yaml-no-deps/values.yaml: -------------------------------------------------------------------------------- 1 | name: deps-default-name 2 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/deps-charts/multi-chart/simple-with-fleet-yaml/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | appVersion: 1.16.0 3 | description: A chart for testing dependencies 4 | name: deps-chart 5 | type: application 6 | version: 1.0.0 7 | dependencies: 8 | - name: config-chart 9 | version: "0.1.0" 10 | repository: "http://localhost:3000" 11 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/deps-charts/multi-chart/simple-with-fleet-yaml/fleet.yaml: -------------------------------------------------------------------------------- 1 | namespace: test-deps-helm-yamls-with-fleet 2 | helm: 3 | releaseName: simple-with-fleet-yaml 4 | chart: "" 5 | repo: "" 6 | version: "" 7 | force: false 8 | 9 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/deps-charts/multi-chart/simple-with-fleet-yaml/templates/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: test-simple-deps-chart 5 | data: 6 | test: "valuedeps" 7 | name: {{ .Values.name }} 8 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/deps-charts/multi-chart/simple-with-fleet-yaml/values.yaml: -------------------------------------------------------------------------------- 1 | name: deps-default-name 2 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/deps-charts/no-fleet-yaml/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | appVersion: 1.16.0 3 | description: A chart for testing dependencies 4 | name: deps-chart 5 | type: application 6 | version: 1.0.0 7 | dependencies: 8 | - name: config-chart 9 | version: "0.1.0" 10 | repository: "http://localhost:3000" 11 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/deps-charts/no-fleet-yaml/templates/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: test-simple-deps-chart 5 | data: 6 | test: "valuedeps" 7 | name: {{ .Values.name }} 8 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/deps-charts/no-fleet-yaml/values.yaml: -------------------------------------------------------------------------------- 1 | name: deps-default-name 2 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/deps-charts/remote-chart-with-deps-disabled/fleet.yaml: -------------------------------------------------------------------------------- 1 | namespace: test-deps-helm-yamls-with-fleet 2 | helm: 3 | releaseName: simple-with-fleet-yaml 4 | chart: deps-chart 5 | repo: http://localhost:3000 6 | version: 1.0.0 7 | force: false 8 | disableDependencyUpdate: true 9 | 10 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/deps-charts/remote-chart-with-deps/fleet.yaml: -------------------------------------------------------------------------------- 1 | namespace: test-deps-helm-yamls-with-fleet 2 | helm: 3 | releaseName: simple-with-fleet-yaml 4 | chart: deps-chart 5 | repo: http://localhost:3000 6 | version: 1.0.0 7 | force: false 8 | 9 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/deps-charts/simple-with-fleet-yaml-no-deps/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | appVersion: 1.16.0 3 | description: A chart for testing dependencies 4 | name: deps-chart 5 | type: application 6 | version: 1.0.0 7 | dependencies: 8 | - name: config-chart 9 | version: "0.1.0" 10 | repository: "http://localhost:3000" 11 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/deps-charts/simple-with-fleet-yaml-no-deps/fleet.yaml: -------------------------------------------------------------------------------- 1 | namespace: test-deps-helm-yamls-with-fleet 2 | helm: 3 | releaseName: simple-with-fleet-yaml-no-deps 4 | chart: "" 5 | repo: "" 6 | version: "" 7 | disableDependencyUpdate: true 8 | 9 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/deps-charts/simple-with-fleet-yaml-no-deps/templates/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: test-simple-deps-chart 5 | data: 6 | test: "valuedeps" 7 | name: {{ .Values.name }} 8 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/deps-charts/simple-with-fleet-yaml-no-deps/values.yaml: -------------------------------------------------------------------------------- 1 | name: deps-default-name 2 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/deps-charts/simple-with-fleet-yaml/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | appVersion: 1.16.0 3 | description: A chart for testing dependencies 4 | name: deps-chart 5 | type: application 6 | version: 1.0.0 7 | dependencies: 8 | - name: config-chart 9 | version: "0.1.0" 10 | repository: "http://localhost:3000" 11 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/deps-charts/simple-with-fleet-yaml/fleet.yaml: -------------------------------------------------------------------------------- 1 | namespace: test-deps-helm-yamls-with-fleet 2 | helm: 3 | releaseName: simple-with-fleet-yaml 4 | chart: "" 5 | repo: "" 6 | version: "" 7 | force: false 8 | 9 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/deps-charts/simple-with-fleet-yaml/templates/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: test-simple-deps-chart 5 | data: 6 | test: "valuedeps" 7 | name: {{ .Values.name }} 8 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/deps-charts/simple-with-fleet-yaml/values.yaml: -------------------------------------------------------------------------------- 1 | name: deps-default-name 2 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/driven/helm/fleet.yaml: -------------------------------------------------------------------------------- 1 | helm: 2 | releaseName: config-chart 3 | chart: http://localhost:3000/config-chart-0.1.0.tgz 4 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/driven/kustomize/base/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - secret.yaml -------------------------------------------------------------------------------- /integrationtests/cli/assets/driven/kustomize/base/secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: supersecret 5 | type: Opaque 6 | data: 7 | username: YmxhaA== 8 | password: YmxhaA== -------------------------------------------------------------------------------- /integrationtests/cli/assets/driven/kustomize/dev.yaml: -------------------------------------------------------------------------------- 1 | namespace: fleet-kustomize-example 2 | kustomize: 3 | dir: "overlays/dev" -------------------------------------------------------------------------------- /integrationtests/cli/assets/driven/kustomize/overlays/dev/kustomization.yaml: -------------------------------------------------------------------------------- 1 | nameSuffix: "-dev" 2 | resources: 3 | - ../../base 4 | patchesStrategicMerge: 5 | - secret.yaml -------------------------------------------------------------------------------- /integrationtests/cli/assets/driven/kustomize/overlays/dev/secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: supersecret 5 | data: 6 | username: YmxhaDE= 7 | password: YmxhaDE= -------------------------------------------------------------------------------- /integrationtests/cli/assets/driven/kustomize/overlays/prod/kustomization.yaml: -------------------------------------------------------------------------------- 1 | nameSuffix: "-prod" 2 | resources: 3 | - ../../base 4 | patchesStrategicMerge: 5 | - secret.yaml -------------------------------------------------------------------------------- /integrationtests/cli/assets/driven/kustomize/overlays/prod/secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: supersecret 5 | data: 6 | username: YmxhaDI= 7 | password: YmxhaDI= -------------------------------------------------------------------------------- /integrationtests/cli/assets/driven/kustomize/prod.yaml: -------------------------------------------------------------------------------- 1 | namespace: fleet-kustomize-example 2 | kustomize: 3 | dir: "overlays/prod" -------------------------------------------------------------------------------- /integrationtests/cli/assets/driven/simple/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nginx-deployment 5 | labels: 6 | app: nginx 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: nginx 12 | template: 13 | metadata: 14 | labels: 15 | app: nginx 16 | spec: 17 | containers: 18 | - name: nginx 19 | image: nginx:1.14.2 20 | ports: 21 | - containerPort: 80 22 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/driven/simple/svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: nginx-service 5 | spec: 6 | selector: 7 | app: nginx 8 | ports: 9 | - name: name-of-service-port 10 | protocol: TCP 11 | port: 80 12 | targetPort: http-web-svc 13 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/driven2/kustomize/base/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - secret.yaml -------------------------------------------------------------------------------- /integrationtests/cli/assets/driven2/kustomize/base/secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: supersecret 5 | type: Opaque 6 | data: 7 | username: YmxhaA== 8 | password: YmxhaA== -------------------------------------------------------------------------------- /integrationtests/cli/assets/driven2/kustomize/fleetDev.yaml: -------------------------------------------------------------------------------- 1 | namespace: fleet-kustomize-example 2 | kustomize: 3 | dir: "overlays/dev" -------------------------------------------------------------------------------- /integrationtests/cli/assets/driven2/kustomize/fleetProd.yaml: -------------------------------------------------------------------------------- 1 | namespace: fleet-kustomize-example 2 | kustomize: 3 | dir: "overlays/prod" -------------------------------------------------------------------------------- /integrationtests/cli/assets/driven2/kustomize/overlays/dev/kustomization.yaml: -------------------------------------------------------------------------------- 1 | nameSuffix: "-dev" 2 | resources: 3 | - ../../base 4 | patchesStrategicMerge: 5 | - secret.yaml -------------------------------------------------------------------------------- /integrationtests/cli/assets/driven2/kustomize/overlays/dev/secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: supersecret 5 | data: 6 | username: YmxhaDE= 7 | password: YmxhaDE= -------------------------------------------------------------------------------- /integrationtests/cli/assets/driven2/kustomize/overlays/prod/kustomization.yaml: -------------------------------------------------------------------------------- 1 | nameSuffix: "-prod" 2 | resources: 3 | - ../../base 4 | patchesStrategicMerge: 5 | - secret.yaml -------------------------------------------------------------------------------- /integrationtests/cli/assets/driven2/kustomize/overlays/prod/secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: supersecret 5 | data: 6 | username: YmxhaDI= 7 | password: YmxhaDI= -------------------------------------------------------------------------------- /integrationtests/cli/assets/driven_fleet_yaml_subfolder/helm/test/fleet.yaml: -------------------------------------------------------------------------------- 1 | helm: 2 | releaseName: config-chart 3 | chart: http://localhost:3000/config-chart-0.1.0.tgz 4 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/helm_chart_url/fleet.yaml: -------------------------------------------------------------------------------- 1 | helm: 2 | releaseName: config-chart 3 | chart: http://localhost:3000/config-chart-0.1.0.tgz 4 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/helm_options_disabled/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: config 5 | data: 6 | config.json: | 7 | {} 8 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/helm_options_disabled/fleet.yaml: -------------------------------------------------------------------------------- 1 | helm: 2 | releaseName: disabled 3 | takeOwnership: false 4 | atomic: false 5 | force: false 6 | waitForJobs: false 7 | disablePreProcess: false 8 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/helm_options_enabled/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: config 5 | data: 6 | config.json: | 7 | {} 8 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/helm_options_enabled/fleet.yaml: -------------------------------------------------------------------------------- 1 | helm: 2 | releaseName: enabled 3 | takeOwnership: true 4 | atomic: true 5 | force: true 6 | waitForJobs: true 7 | disablePreProcess: true 8 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/helm_options_kustomize/fleet.yaml: -------------------------------------------------------------------------------- 1 | # This file and all contents in it are OPTIONAL. 2 | 3 | # The default namespace to apply to resources that are namespaced 4 | # but do not specify a namespace. In this example no resources 5 | # are configured with a namespace so this default will apply 6 | namespace: fleet-kustomize-example 7 | 8 | 9 | kustomize: 10 | # To use a kustomization.yaml different from the one in the root folder 11 | dir: "" 12 | 13 | helm: 14 | releaseName: kustomize 15 | takeOwnership: true 16 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/helm_options_kustomize/kustomization.yaml: -------------------------------------------------------------------------------- 1 | bases: 2 | - overlays/dev 3 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/helm_path_credentials/fleet.yaml: -------------------------------------------------------------------------------- 1 | helm: 2 | releaseName: config-chart 3 | chart: config-chart 4 | repo: http://localhost:3000 5 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/helm_path_credentials/subfolder/fleet.yaml: -------------------------------------------------------------------------------- 1 | helm: 2 | releaseName: config-chart 3 | chart: config-chart 4 | repo: http://localhost:3000 5 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/helm_repo_url/fleet.yaml: -------------------------------------------------------------------------------- 1 | helm: 2 | releaseName: config-chart 3 | chart: config-chart 4 | repo: http://localhost:3000 5 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/helmrepository/config-chart-0.1.0.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rancher/fleet/5ecc66a1e2cd75668656f6c3c651330a768c0134/integrationtests/cli/assets/helmrepository/config-chart-0.1.0.tgz -------------------------------------------------------------------------------- /integrationtests/cli/assets/helmrepository/config-chart/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | appVersion: 1.16.0 3 | description: A test chart that verifies its config 4 | name: config-chart 5 | type: application 6 | version: 0.1.0 7 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/helmrepository/config-chart/templates/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: test-simple-chart-config 5 | data: 6 | test: "value123" 7 | name: {{ .Values.name }} 8 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/helmrepository/config-chart/values.yaml: -------------------------------------------------------------------------------- 1 | name: default-name 2 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/helmrepository/deps-chart-1.0.0.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rancher/fleet/5ecc66a1e2cd75668656f6c3c651330a768c0134/integrationtests/cli/assets/helmrepository/deps-chart-1.0.0.tgz -------------------------------------------------------------------------------- /integrationtests/cli/assets/helmrepository/deps-chart/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | appVersion: 1.16.0 3 | description: A chart for testing dependencies 4 | name: deps-chart 5 | type: application 6 | version: 1.0.0 7 | dependencies: 8 | - name: config-chart 9 | version: "0.1.0" 10 | repository: "http://localhost:3000" 11 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/helmrepository/deps-chart/templates/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: test-simple-deps-chart 5 | data: 6 | test: "valuedeps" 7 | name: {{ .Values.name }} 8 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/helmrepository/deps-chart/values.yaml: -------------------------------------------------------------------------------- 1 | name: deps-default-name 2 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/helmrepository/index.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | entries: 3 | config-chart: 4 | - apiVersion: v2 5 | appVersion: 1.16.0 6 | created: "2023-01-03T11:05:22.378628588+01:00" 7 | description: A Helm chart for Kubernetes 8 | digest: c82c9a7d29af7dc92a69284cb24900cc42c7cab2b5ff25cee378363073e69dd2 9 | name: testrepo 10 | type: application 11 | urls: 12 | - http://localhost:3000/config-chart-0.1.0.tgz 13 | version: 0.1.0 14 | deps-chart: 15 | - apiVersion: v2 16 | appVersion: 1.16.0 17 | created: "2023-01-03T11:05:22.378628588+01:00" 18 | description: A Helm chart for testing dependency update 19 | digest: 8bae7a819851c6d96bacd213288d7892985951ab3ae79f27c3300f6426aec759 20 | name: testrepo 21 | type: application 22 | urls: 23 | - http://localhost:3000/deps-chart-1.0.0.tgz 24 | version: 1.0.0 25 | generated: "2023-01-03T11:05:22.378100239+01:00" 26 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/keep_resources/fleet.yaml: -------------------------------------------------------------------------------- 1 | keepResources: true -------------------------------------------------------------------------------- /integrationtests/cli/assets/keep_resources/svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: nginx-service 5 | spec: 6 | selector: 7 | app: nginx 8 | ports: 9 | - name: name-of-service-port 10 | protocol: TCP 11 | port: 80 12 | targetPort: http-web-svc 13 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/labels_update/cm.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: cm3 5 | data: 6 | test: "value23" 7 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/labels_update/fleet.yaml: -------------------------------------------------------------------------------- 1 | labels: 2 | new: fleet-label2 -------------------------------------------------------------------------------- /integrationtests/cli/assets/nested_mixed_two_levels/nested/deploymentA/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nginx-deployment 5 | labels: 6 | app: nginxA 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: nginx 12 | template: 13 | metadata: 14 | labels: 15 | app: nginx 16 | spec: 17 | containers: 18 | - name: nginx 19 | image: nginx:1.14.2 20 | ports: 21 | - containerPort: 80 22 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/nested_mixed_two_levels/nested/deploymentA/fleet.yaml: -------------------------------------------------------------------------------- 1 | namespace: deploymenta 2 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/nested_mixed_two_levels/nested/deploymentB/svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: nginx-service 5 | spec: 6 | selector: 7 | app: nginx 8 | ports: 9 | - name: name-of-service-port 10 | protocol: TCP 11 | port: 80 12 | targetPort: http-web-svc 13 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/nested_mixed_two_levels/nested/deploymentC/cm.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: cm1 5 | data: 6 | test: "value123" 7 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/nested_mixed_two_levels/nested/deploymentC/fleet.yaml: -------------------------------------------------------------------------------- 1 | namespace: deploymentb 2 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/nested_mixed_two_levels/nested/deploymentD/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nginx-deployment 5 | labels: 6 | app: nginx 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: nginx 12 | template: 13 | metadata: 14 | labels: 15 | app: nginx 16 | spec: 17 | containers: 18 | - name: nginx 19 | image: nginx:1.14.2 20 | ports: 21 | - containerPort: 80 22 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/nested_multiple/README.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rancher/fleet/5ecc66a1e2cd75668656f6c3c651330a768c0134/integrationtests/cli/assets/nested_multiple/README.md -------------------------------------------------------------------------------- /integrationtests/cli/assets/nested_multiple/deploymentA/fleet.yaml: -------------------------------------------------------------------------------- 1 | namespace: deploymenta 2 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/nested_multiple/deploymentA/svc/svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: nginx-service-a 5 | spec: 6 | selector: 7 | app: nginx 8 | ports: 9 | - name: name-of-service-port 10 | protocol: TCP 11 | port: 80 12 | targetPort: http-web-svc 13 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/nested_multiple/deploymentB/fleet.yaml: -------------------------------------------------------------------------------- 1 | namespace: deploymentb 2 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/nested_multiple/deploymentB/svc/nested/svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: nginx-service-b 5 | spec: 6 | selector: 7 | app: nginx 8 | ports: 9 | - name: name-of-service-port 10 | protocol: TCP 11 | port: 80 12 | targetPort: http-web-svc 13 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/nested_multiple/deploymentC/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nginx-deployment 5 | labels: 6 | app: nginx 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: nginx 12 | template: 13 | metadata: 14 | labels: 15 | app: nginx 16 | spec: 17 | containers: 18 | - name: nginx 19 | image: nginx:1.14.2 20 | ports: 21 | - containerPort: 80 22 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/nested_multiple/deploymentC/fleet.yaml: -------------------------------------------------------------------------------- 1 | namespace: deploymentc 2 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/nested_simple/README.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rancher/fleet/5ecc66a1e2cd75668656f6c3c651330a768c0134/integrationtests/cli/assets/nested_simple/README.md -------------------------------------------------------------------------------- /integrationtests/cli/assets/nested_simple/simple/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nginx-deployment 5 | labels: 6 | app: nginx 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: nginx 12 | template: 13 | metadata: 14 | labels: 15 | app: nginx 16 | spec: 17 | containers: 18 | - name: nginx 19 | image: nginx:1.14.2 20 | ports: 21 | - containerPort: 80 22 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/nested_simple/simple/svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: nginx-service 5 | spec: 6 | selector: 7 | app: nginx 8 | ports: 9 | - name: name-of-service-port 10 | protocol: TCP 11 | port: 80 12 | targetPort: http-web-svc 13 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/nested_two_levels/nested/deployment/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nginx-deployment 5 | labels: 6 | app: nginx 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: nginx 12 | template: 13 | metadata: 14 | labels: 15 | app: nginx 16 | spec: 17 | containers: 18 | - name: nginx 19 | image: nginx:1.14.2 20 | ports: 21 | - containerPort: 80 22 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/nested_two_levels/nested/svc/svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: nginx-service 5 | spec: 6 | selector: 7 | app: nginx 8 | ports: 9 | - name: name-of-service-port 10 | protocol: TCP 11 | port: 80 12 | targetPort: http-web-svc 13 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/simple/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nginx-deployment 5 | labels: 6 | app: nginx 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: nginx 12 | template: 13 | metadata: 14 | labels: 15 | app: nginx 16 | spec: 17 | containers: 18 | - name: nginx 19 | image: nginx:1.14.2 20 | ports: 21 | - containerPort: 80 22 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/simple/svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: nginx-service 5 | spec: 6 | selector: 7 | app: nginx 8 | ports: 9 | - name: name-of-service-port 10 | protocol: TCP 11 | port: 80 12 | targetPort: http-web-svc 13 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/targets/override/cm.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: cm1 5 | data: 6 | test: "value123" 7 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/targets/override/fleet.yaml: -------------------------------------------------------------------------------- 1 | namespace: test 2 | overrideTargets: 3 | - clusterName: overridden -------------------------------------------------------------------------------- /integrationtests/cli/assets/targets/simple/cm.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: cm2 5 | data: 6 | test: "value1234" 7 | -------------------------------------------------------------------------------- /integrationtests/cli/assets/targets/simple/fleet.yaml: -------------------------------------------------------------------------------- 1 | namespace: test -------------------------------------------------------------------------------- /integrationtests/gitcloner/assets/gitserver/git/.gitconfig: -------------------------------------------------------------------------------- 1 | [user] 2 | name = Gogs 3 | email = gogs@fake.local 4 | [core] 5 | quotepath = false 6 | -------------------------------------------------------------------------------- /integrationtests/gitcloner/assets/gitserver/git/.ssh/environment: -------------------------------------------------------------------------------- 1 | GOGS_CUSTOM=/data/gogs 2 | -------------------------------------------------------------------------------- /integrationtests/gitcloner/assets/gitserver/gogs/conf/app.ini: -------------------------------------------------------------------------------- 1 | BRAND_NAME = Gogs 2 | RUN_USER = git 3 | RUN_MODE = prod 4 | 5 | [database] 6 | TYPE = sqlite3 7 | HOST = 127.0.0.1:5432 8 | NAME = gogs 9 | SCHEMA = public 10 | USER = gogs 11 | PASSWORD = 12 | SSL_MODE = disable 13 | PATH = data/gogs.db 14 | 15 | [repository] 16 | ROOT = /data/git/gogs-repositories 17 | DEFAULT_BRANCH = master 18 | 19 | [server] 20 | PROTOCOL = https 21 | DOMAIN = localhost 22 | HTTP_PORT = 3000 23 | EXTERNAL_URL = https://localhost:3000/ 24 | DISABLE_SSH = false 25 | SSH_PORT = 22 26 | START_SSH_SERVER = false 27 | OFFLINE_MODE = false 28 | CERT_FILE = cert.pem 29 | KEY_FILE = key.pem 30 | 31 | [mailer] 32 | ENABLED = false 33 | 34 | [auth] 35 | REQUIRE_EMAIL_CONFIRMATION = false 36 | DISABLE_REGISTRATION = false 37 | ENABLE_REGISTRATION_CAPTCHA = false 38 | REQUIRE_SIGNIN_VIEW = false 39 | 40 | [user] 41 | ENABLE_EMAIL_NOTIFICATION = false 42 | 43 | [picture] 44 | DISABLE_GRAVATAR = false 45 | ENABLE_FEDERATED_AVATAR = false 46 | 47 | [session] 48 | PROVIDER = file 49 | 50 | [log] 51 | MODE = file 52 | LEVEL = Info 53 | ROOT_PATH = /app/gogs/log 54 | 55 | [security] 56 | INSTALL_LOCK = true 57 | SECRET_KEY = HoTWPQxTJ5dIY31 58 | -------------------------------------------------------------------------------- /integrationtests/gitcloner/assets/gitserver/gogs/data/gogs.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rancher/fleet/5ecc66a1e2cd75668656f6c3c651330a768c0134/integrationtests/gitcloner/assets/gitserver/gogs/data/gogs.db -------------------------------------------------------------------------------- /integrationtests/gitcloner/assets/gitserver/ssh/ssh_host_ecdsa_key.pub: -------------------------------------------------------------------------------- 1 | ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBOLWGeeq/e1mK/zH47UeQeMtdh+NEz6j7xp5cAINcV2pPWgAsuyh5dumMv1RkC1rr0pmWekCoMnR2c4+PllRqrQ= root@944bc5922a28 2 | -------------------------------------------------------------------------------- /integrationtests/gitcloner/assets/gogs/password: -------------------------------------------------------------------------------- 1 | pass -------------------------------------------------------------------------------- /integrationtests/gitcloner/assets/repo/README.md: -------------------------------------------------------------------------------- 1 | Integration tests repo -------------------------------------------------------------------------------- /integrationtests/gitcloner/suite_test.go: -------------------------------------------------------------------------------- 1 | package apply 2 | 3 | import ( 4 | "testing" 5 | 6 | . "github.com/onsi/ginkgo/v2" 7 | . "github.com/onsi/gomega" 8 | ) 9 | 10 | func TestGitCloner(t *testing.T) { 11 | RegisterFailHandler(Fail) 12 | RunSpecs(t, "Git cloner Suite") 13 | } 14 | -------------------------------------------------------------------------------- /integrationtests/gitjob/assets/gitserver/git/.gitconfig: -------------------------------------------------------------------------------- 1 | [user] 2 | name = Gogs 3 | email = gogs@fake.local 4 | [core] 5 | quotepath = false 6 | -------------------------------------------------------------------------------- /integrationtests/gitjob/assets/gitserver/git/.ssh/environment: -------------------------------------------------------------------------------- 1 | GOGS_CUSTOM=/data/gogs 2 | -------------------------------------------------------------------------------- /integrationtests/gitjob/assets/gitserver/gogs/conf/app.ini: -------------------------------------------------------------------------------- 1 | BRAND_NAME = Gogs 2 | RUN_USER = git 3 | RUN_MODE = prod 4 | 5 | [database] 6 | TYPE = sqlite3 7 | HOST = 127.0.0.1:5432 8 | NAME = gogs 9 | SCHEMA = public 10 | USER = gogs 11 | PASSWORD = 12 | SSL_MODE = disable 13 | PATH = data/gogs.db 14 | 15 | [repository] 16 | ROOT = /data/git/gogs-repositories 17 | DEFAULT_BRANCH = master 18 | 19 | [server] 20 | DOMAIN = localhost 21 | HTTP_PORT = 3000 22 | EXTERNAL_URL = http://localhost:3000/ 23 | DISABLE_SSH = false 24 | SSH_PORT = 22 25 | START_SSH_SERVER = false 26 | OFFLINE_MODE = false 27 | 28 | [mailer] 29 | ENABLED = false 30 | 31 | [auth] 32 | REQUIRE_EMAIL_CONFIRMATION = false 33 | DISABLE_REGISTRATION = false 34 | ENABLE_REGISTRATION_CAPTCHA = true 35 | REQUIRE_SIGNIN_VIEW = false 36 | 37 | [user] 38 | ENABLE_EMAIL_NOTIFICATION = false 39 | 40 | [picture] 41 | DISABLE_GRAVATAR = false 42 | ENABLE_FEDERATED_AVATAR = false 43 | 44 | [session] 45 | PROVIDER = file 46 | 47 | [log] 48 | MODE = file 49 | LEVEL = Info 50 | ROOT_PATH = /app/gogs/log 51 | 52 | [security] 53 | INSTALL_LOCK = true 54 | SECRET_KEY = HoTWPQxTJ5dIY31 55 | -------------------------------------------------------------------------------- /integrationtests/gitjob/assets/gitserver/gogs/data/gogs.db: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rancher/fleet/5ecc66a1e2cd75668656f6c3c651330a768c0134/integrationtests/gitjob/assets/gitserver/gogs/data/gogs.db -------------------------------------------------------------------------------- /integrationtests/gitjob/assets/gitserver/ssh/ssh_host_ecdsa_key: -------------------------------------------------------------------------------- 1 | -----BEGIN OPENSSH PRIVATE KEY----- 2 | b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAaAAAABNlY2RzYS 3 | 1zaGEyLW5pc3RwMjU2AAAACG5pc3RwMjU2AAAAQQTi1hnnqv3tZiv8x+O1HkHjLXYfjRM+ 4 | o+8aeXACDXFdqT1oALLsoeXbpjL9UZAta69KZlnpAqDJ0dnOPj5ZUaq0AAAAsLX33E2199 5 | xNAAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBOLWGeeq/e1mK/zH 6 | 47UeQeMtdh+NEz6j7xp5cAINcV2pPWgAsuyh5dumMv1RkC1rr0pmWekCoMnR2c4+PllRqr 7 | QAAAAhAOZAKlM42hgAOsRnvRk/wp1mYy+raMO2p05D9BaLcD7oAAAAEXJvb3RAOTQ0YmM1 8 | OTIyYTI4AQIDBAUG 9 | -----END OPENSSH PRIVATE KEY----- 10 | -------------------------------------------------------------------------------- /integrationtests/gitjob/assets/gitserver/ssh/ssh_host_ecdsa_key.pub: -------------------------------------------------------------------------------- 1 | ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBOLWGeeq/e1mK/zH47UeQeMtdh+NEz6j7xp5cAINcV2pPWgAsuyh5dumMv1RkC1rr0pmWekCoMnR2c4+PllRqrQ= root@944bc5922a28 2 | -------------------------------------------------------------------------------- /integrationtests/utils/kubeconfig.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | 7 | "k8s.io/client-go/rest" 8 | "k8s.io/client-go/tools/clientcmd" 9 | "k8s.io/client-go/tools/clientcmd/api" 10 | ) 11 | 12 | func WriteKubeConfig(cfg *rest.Config, path string) error { 13 | config := FromEnvTestConfig(cfg) 14 | if err := os.WriteFile(path, config, 0600); err != nil { 15 | return err 16 | } 17 | return nil 18 | } 19 | 20 | // FromEnvTestConfig returns a new Kubeconfig in byte form when running in envtest. 21 | func FromEnvTestConfig(cfg *rest.Config) []byte { 22 | name := "testenv-cluster" 23 | contextName := fmt.Sprintf("%s@%s", cfg.Username, name) 24 | c := api.Config{ 25 | Clusters: map[string]*api.Cluster{ 26 | name: { 27 | Server: cfg.Host, 28 | CertificateAuthorityData: cfg.CAData, 29 | }, 30 | }, 31 | Contexts: map[string]*api.Context{ 32 | contextName: { 33 | Cluster: name, 34 | AuthInfo: cfg.Username, 35 | }, 36 | }, 37 | AuthInfos: map[string]*api.AuthInfo{ 38 | cfg.Username: { 39 | ClientKeyData: cfg.KeyData, 40 | ClientCertificateData: cfg.CertData, 41 | }, 42 | }, 43 | CurrentContext: contextName, 44 | } 45 | data, _ := clientcmd.Write(c) 46 | return data 47 | } 48 | -------------------------------------------------------------------------------- /integrationtests/utils/namespace.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | cryptorand "crypto/rand" 5 | "encoding/hex" 6 | "fmt" 7 | ) 8 | 9 | func NewNamespaceName() (string, error) { 10 | p := make([]byte, 12) 11 | _, err := cryptorand.Read(p) 12 | if err != nil { 13 | return "", err 14 | } 15 | return fmt.Sprintf("test-%s", hex.EncodeToString(p))[:12], nil 16 | } 17 | -------------------------------------------------------------------------------- /internal/cmd/agent/clusterstatus/suite_test.go: -------------------------------------------------------------------------------- 1 | package clusterstatus_test 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | . "github.com/onsi/ginkgo/v2" 8 | . "github.com/onsi/gomega" 9 | ) 10 | 11 | const ( 12 | timeout = 30 * time.Second 13 | ) 14 | 15 | func TestFleet(t *testing.T) { 16 | RegisterFailHandler(Fail) 17 | RunSpecs(t, "ClusterStatus Suite") 18 | } 19 | 20 | var _ = BeforeSuite(func() { 21 | SetDefaultEventuallyTimeout(timeout) 22 | }) 23 | -------------------------------------------------------------------------------- /internal/cmd/agent/deployer/data/data.go: -------------------------------------------------------------------------------- 1 | package data 2 | 3 | import "github.com/rancher/fleet/internal/cmd/agent/deployer/data/convert" 4 | 5 | type List []map[string]interface{} 6 | 7 | type Object map[string]interface{} 8 | 9 | func (o Object) Map(names ...string) Object { 10 | v := GetValueN(o, names...) 11 | m := convert.ToMapInterface(v) 12 | return m 13 | } 14 | 15 | func (o Object) Slice(names ...string) (result []Object) { 16 | v := GetValueN(o, names...) 17 | for _, item := range convert.ToInterfaceSlice(v) { 18 | result = append(result, convert.ToMapInterface(item)) 19 | } 20 | return 21 | } 22 | 23 | func (o Object) String(names ...string) string { 24 | v := GetValueN(o, names...) 25 | return convert.ToString(v) 26 | } 27 | 28 | func (o Object) StringSlice(names ...string) []string { 29 | v := GetValueN(o, names...) 30 | return convert.ToStringSlice(v) 31 | } 32 | 33 | func (o Object) Bool(key ...string) bool { 34 | return convert.ToBool(GetValueN(o, key...)) 35 | } 36 | -------------------------------------------------------------------------------- /internal/cmd/agent/deployer/data/values.go: -------------------------------------------------------------------------------- 1 | // Package data contains functions for working with unstructured values like []interface or map[string]interface{}. 2 | // It allows reading/writing to these values without having to convert to structured items. 3 | package data 4 | 5 | func GetValueN(data map[string]interface{}, keys ...string) interface{} { 6 | val, _ := getValue(data, keys...) 7 | return val 8 | } 9 | 10 | // getValue works similar to GetValueFromAny, but can only process maps. Kept this way to avoid breaking changes with 11 | // the previous interface, GetValueFromAny should be used in most cases since that can handle slices as well. 12 | func getValue(data map[string]interface{}, keys ...string) (interface{}, bool) { 13 | for i, key := range keys { 14 | if i == len(keys)-1 { 15 | val, ok := data[key] 16 | return val, ok 17 | } 18 | data, _ = data[key].(map[string]interface{}) 19 | } 20 | return nil, false 21 | } 22 | -------------------------------------------------------------------------------- /internal/cmd/agent/deployer/internal/diff/diff_options.go: -------------------------------------------------------------------------------- 1 | // +vendored argoproj/gitops-engine/pkg/diff/diff_options.go 2 | package diff 3 | 4 | import ( 5 | "github.com/go-logr/logr" 6 | "k8s.io/klog/v2/textlogger" 7 | ) 8 | 9 | type Option func(*options) 10 | 11 | // Holds diffing settings 12 | type options struct { 13 | // If set to true then differences caused by aggregated roles in RBAC resources are ignored. 14 | ignoreAggregatedRoles bool 15 | normalizer Normalizer 16 | log logr.Logger 17 | } 18 | 19 | func applyOptions(opts []Option) options { 20 | o := options{ 21 | ignoreAggregatedRoles: false, 22 | normalizer: GetNoopNormalizer(), 23 | log: textlogger.NewLogger(textlogger.NewConfig()), 24 | } 25 | for _, opt := range opts { 26 | opt(&o) 27 | } 28 | return o 29 | } 30 | 31 | func IgnoreAggregatedRoles(ignore bool) Option { 32 | return func(o *options) { 33 | o.ignoreAggregatedRoles = ignore 34 | } 35 | } 36 | 37 | func WithNormalizer(normalizer Normalizer) Option { 38 | return func(o *options) { 39 | o.normalizer = normalizer 40 | } 41 | } 42 | 43 | func WithLogr(log logr.Logger) Option { 44 | return func(o *options) { 45 | o.log = log 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /internal/cmd/agent/deployer/internal/diff/kubernetes_vendor/pkg/util/hash/hash.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2015 The Kubernetes Authors. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package hash 18 | 19 | import ( 20 | "hash" 21 | 22 | "github.com/davecgh/go-spew/spew" 23 | ) 24 | 25 | // DeepHashObject writes specified object to hash using the spew library 26 | // which follows pointers and prints actual values of the nested objects 27 | // ensuring the hash does not change when a pointer changes. 28 | func DeepHashObject(hasher hash.Hash, objectToWrite interface{}) { 29 | hasher.Reset() 30 | printer := spew.ConfigState{ 31 | Indent: " ", 32 | SortKeys: true, 33 | DisableMethods: true, 34 | SpewKeys: true, 35 | } 36 | printer.Fprintf(hasher, "%#v", objectToWrite) 37 | } 38 | -------------------------------------------------------------------------------- /internal/cmd/agent/deployer/internal/diff/scheme/scheme.go: -------------------------------------------------------------------------------- 1 | // +vendored https://github.com/argoproj/gitops-engine/blob/master/pkg/utils/kube/scheme/scheme.go 2 | package scheme 3 | 4 | import ( 5 | "k8s.io/kubernetes/pkg/api/legacyscheme" 6 | 7 | _ "k8s.io/kubernetes/pkg/apis/admission/install" 8 | _ "k8s.io/kubernetes/pkg/apis/admissionregistration/install" 9 | _ "k8s.io/kubernetes/pkg/apis/apps/install" 10 | _ "k8s.io/kubernetes/pkg/apis/authentication/install" 11 | _ "k8s.io/kubernetes/pkg/apis/authorization/install" 12 | _ "k8s.io/kubernetes/pkg/apis/autoscaling/install" 13 | _ "k8s.io/kubernetes/pkg/apis/batch/install" 14 | _ "k8s.io/kubernetes/pkg/apis/certificates/install" 15 | _ "k8s.io/kubernetes/pkg/apis/coordination/install" 16 | _ "k8s.io/kubernetes/pkg/apis/core/install" 17 | _ "k8s.io/kubernetes/pkg/apis/discovery/install" 18 | _ "k8s.io/kubernetes/pkg/apis/events/install" 19 | _ "k8s.io/kubernetes/pkg/apis/extensions/install" 20 | _ "k8s.io/kubernetes/pkg/apis/flowcontrol/install" 21 | _ "k8s.io/kubernetes/pkg/apis/imagepolicy/install" 22 | _ "k8s.io/kubernetes/pkg/apis/networking/install" 23 | _ "k8s.io/kubernetes/pkg/apis/node/install" 24 | _ "k8s.io/kubernetes/pkg/apis/policy/install" 25 | _ "k8s.io/kubernetes/pkg/apis/rbac/install" 26 | _ "k8s.io/kubernetes/pkg/apis/scheduling/install" 27 | _ "k8s.io/kubernetes/pkg/apis/storage/install" 28 | ) 29 | 30 | var Scheme = legacyscheme.Scheme 31 | -------------------------------------------------------------------------------- /internal/cmd/agent/deployer/internal/normalizers/glob/glob.go: -------------------------------------------------------------------------------- 1 | // +vendored argoproj/argo-cd/util/glob/glob.go 2 | package glob 3 | 4 | import ( 5 | "github.com/gobwas/glob" 6 | log "github.com/sirupsen/logrus" 7 | ) 8 | 9 | func Match(pattern, text string, separators ...rune) bool { 10 | compiledGlob, err := glob.Compile(pattern, separators...) 11 | if err != nil { 12 | log.Warnf("failed to compile pattern %s due to error %v", pattern, err) 13 | return false 14 | } 15 | return compiledGlob.Match(text) 16 | } 17 | -------------------------------------------------------------------------------- /internal/cmd/agent/deployer/kv/split.go: -------------------------------------------------------------------------------- 1 | package kv 2 | 3 | import "strings" 4 | 5 | func Split(s, sep string) (string, string) { 6 | parts := strings.SplitN(s, sep, 2) 7 | return strings.TrimSpace(parts[0]), strings.TrimSpace(safeIndex(parts, 1)) 8 | } 9 | 10 | // Like split but if there is only one item return "", item 11 | func RSplit(s, sep string) (string, string) { 12 | parts := strings.SplitN(s, sep, 2) 13 | if len(parts) == 1 { 14 | return "", strings.TrimSpace(parts[0]) 15 | } 16 | return strings.TrimSpace(parts[0]), strings.TrimSpace(safeIndex(parts, 1)) 17 | } 18 | 19 | func safeIndex(parts []string, idx int) string { 20 | if len(parts) <= idx { 21 | return "" 22 | } 23 | return parts[idx] 24 | } 25 | -------------------------------------------------------------------------------- /internal/cmd/agent/deployer/merr/error.go: -------------------------------------------------------------------------------- 1 | package merr 2 | 3 | import "bytes" 4 | 5 | type Errors []error 6 | 7 | func (e Errors) Err() error { 8 | return NewErrors(e...) 9 | } 10 | 11 | func (e Errors) Error() string { 12 | buf := bytes.NewBuffer(nil) 13 | for _, err := range e { 14 | if buf.Len() > 0 { 15 | buf.WriteString(", ") 16 | } 17 | buf.WriteString(err.Error()) 18 | } 19 | 20 | return buf.String() 21 | } 22 | 23 | func NewErrors(inErrors ...error) error { 24 | var errors []error 25 | for _, err := range inErrors { 26 | if err != nil { 27 | errors = append(errors, err) 28 | } 29 | } 30 | 31 | if len(errors) == 0 { 32 | return nil 33 | } else if len(errors) == 1 { 34 | return errors[0] 35 | } 36 | return Errors(errors) 37 | } 38 | -------------------------------------------------------------------------------- /internal/cmd/agent/deployer/normalizers/norm.go: -------------------------------------------------------------------------------- 1 | package normalizers 2 | 3 | import ( 4 | "github.com/rancher/fleet/internal/cmd/agent/deployer/internal/diff" 5 | "github.com/rancher/fleet/internal/cmd/agent/deployer/objectset" 6 | 7 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 8 | ) 9 | 10 | type Norm struct { 11 | normalizers []diff.Normalizer 12 | } 13 | 14 | func (n Norm) Normalize(un *unstructured.Unstructured) error { 15 | for _, normalizer := range n.normalizers { 16 | if err := normalizer.Normalize(un); err != nil { 17 | return err 18 | } 19 | } 20 | return nil 21 | } 22 | 23 | func New(lives objectset.ObjectByGVK, additions ...diff.Normalizer) Norm { 24 | n := Norm{ 25 | normalizers: []diff.Normalizer{ 26 | // Status fields are normally subresources which can't be influenced by resource updates 27 | &StatusNormalizer{}, 28 | &MutatingWebhookNormalizer{ 29 | Live: lives, 30 | }, 31 | &ValidatingWebhookNormalizer{ 32 | Live: lives, 33 | }, 34 | }, 35 | } 36 | 37 | n.normalizers = append(n.normalizers, additions...) 38 | 39 | return n 40 | } 41 | -------------------------------------------------------------------------------- /internal/cmd/agent/deployer/normalizers/status.go: -------------------------------------------------------------------------------- 1 | package normalizers 2 | 3 | import ( 4 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 5 | ) 6 | 7 | // StatusNormalizer removes a top-level "status" fields from the object, if present 8 | type StatusNormalizer struct{} 9 | 10 | func (StatusNormalizer) Normalize(un *unstructured.Unstructured) error { 11 | unstructured.RemoveNestedField(un.Object, "status") 12 | return nil 13 | } 14 | -------------------------------------------------------------------------------- /internal/cmd/agent/deployer/objectset/stringset.go: -------------------------------------------------------------------------------- 1 | package objectset 2 | 3 | var empty struct{} 4 | 5 | // Set is an exceptionally simple `set` implementation for strings. 6 | // It is not threadsafe, but can be used in place of a simple `map[string]struct{}` 7 | // as long as you don't want to do too much with it. 8 | type Set struct { 9 | m map[string]struct{} 10 | } 11 | 12 | func (s *Set) Add(ss ...string) { 13 | if s.m == nil { 14 | s.m = make(map[string]struct{}, len(ss)) 15 | } 16 | for _, k := range ss { 17 | s.m[k] = empty 18 | } 19 | } 20 | 21 | func (s *Set) Values() []string { 22 | i := 0 23 | keys := make([]string, len(s.m)) 24 | for key := range s.m { 25 | keys[i] = key 26 | i++ 27 | } 28 | 29 | return keys 30 | } 31 | -------------------------------------------------------------------------------- /internal/cmd/agent/deployer/summary/cattletypes.go: -------------------------------------------------------------------------------- 1 | package summary 2 | 3 | import ( 4 | "github.com/rancher/fleet/internal/cmd/agent/deployer/data" 5 | fleetv1 "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1/summary" 6 | ) 7 | 8 | func checkCattleTypes(obj data.Object, condition []Condition, summary fleetv1.Summary) fleetv1.Summary { 9 | return checkRelease(obj, condition, summary) 10 | } 11 | 12 | func checkRelease(obj data.Object, _ []Condition, summary fleetv1.Summary) fleetv1.Summary { 13 | if !isKind(obj, "App", "catalog.cattle.io") { 14 | return summary 15 | } 16 | if obj.String("status", "summary", "state") != "deployed" { 17 | return summary 18 | } 19 | for _, resources := range obj.Slice("spec", "resources") { 20 | summary.Relationships = append(summary.Relationships, fleetv1.Relationship{ 21 | Name: resources.String("name"), 22 | Kind: resources.String("kind"), 23 | APIVersion: resources.String("apiVersion"), 24 | Type: "helmresource", 25 | }) 26 | } 27 | return summary 28 | } 29 | -------------------------------------------------------------------------------- /internal/cmd/agent/deployer/summary/suite_test.go: -------------------------------------------------------------------------------- 1 | package summary_test 2 | 3 | import ( 4 | "testing" 5 | 6 | . "github.com/onsi/ginkgo/v2" 7 | . "github.com/onsi/gomega" 8 | ) 9 | 10 | func TestSummary(t *testing.T) { 11 | RegisterFailHandler(Fail) 12 | RunSpecs(t, "Summary Suite") 13 | } 14 | -------------------------------------------------------------------------------- /internal/cmd/agent/deployer/summary/summarized.go: -------------------------------------------------------------------------------- 1 | package summary 2 | 3 | import ( 4 | fleetv1 "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1/summary" 5 | 6 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 7 | ) 8 | 9 | type SummarizedObject struct { 10 | metav1.PartialObjectMetadata 11 | fleetv1.Summary 12 | } 13 | -------------------------------------------------------------------------------- /internal/cmd/agent/globals/globals.go: -------------------------------------------------------------------------------- 1 | package globals 2 | 3 | var ( 4 | InstallerImage = "ibuildthecloud/fleet" 5 | ) 6 | -------------------------------------------------------------------------------- /internal/cmd/cli/writer/writer.go: -------------------------------------------------------------------------------- 1 | // Package writer provides a writer that can be used to write to a file or stdout. 2 | package writer 3 | 4 | import ( 5 | "io" 6 | "os" 7 | "path/filepath" 8 | ) 9 | 10 | type nopCloser struct { 11 | io.Writer 12 | } 13 | 14 | func (nopCloser) Close() error { return nil } 15 | 16 | func NewDefaultNone(output string) io.WriteCloser { 17 | if output == "" { 18 | return nil 19 | } 20 | return New(output) 21 | } 22 | 23 | func New(output string) io.WriteCloser { 24 | switch output { 25 | case "": 26 | return nopCloser{Writer: io.Discard} 27 | case "-": 28 | return os.Stdout 29 | default: 30 | return &lazyFileWriter{ 31 | path: output, 32 | } 33 | } 34 | } 35 | 36 | type lazyFileWriter struct { 37 | path string 38 | file *os.File 39 | } 40 | 41 | func (l *lazyFileWriter) Write(data []byte) (int, error) { 42 | if l.file == nil { 43 | dir := filepath.Dir(l.path) 44 | if err := os.MkdirAll(dir, 0755); err != nil { 45 | return 0, err 46 | } 47 | f, err := os.Create(l.path) 48 | if err != nil { 49 | return 0, err 50 | } 51 | l.file = f 52 | } 53 | return l.file.Write(data) 54 | } 55 | 56 | func (l *lazyFileWriter) Close() error { 57 | if l.file == nil { 58 | return nil 59 | } 60 | return l.file.Close() 61 | } 62 | -------------------------------------------------------------------------------- /internal/cmd/controller/agentmanagement/connection/connection.go: -------------------------------------------------------------------------------- 1 | // Package connection provides a connection to a Kubernetes cluster, used when importing a cluster. 2 | package connection 3 | 4 | import ( 5 | "k8s.io/client-go/kubernetes" 6 | ) 7 | 8 | func SmokeTestKubeClientConnection(client *kubernetes.Clientset) error { 9 | _, err := client.Discovery().ServerVersion() 10 | return err 11 | } 12 | -------------------------------------------------------------------------------- /internal/cmd/controller/agentmanagement/controllers/clusterregistration/suite_test.go: -------------------------------------------------------------------------------- 1 | package clusterregistration_test 2 | 3 | import ( 4 | "testing" 5 | 6 | . "github.com/onsi/ginkgo/v2" 7 | . "github.com/onsi/gomega" 8 | "github.com/rancher/fleet/internal/config" 9 | ) 10 | 11 | func TestFleet(t *testing.T) { 12 | RegisterFailHandler(Fail) 13 | RunSpecs(t, "ClusterRegistration Controller Suite") 14 | } 15 | 16 | var _ = BeforeSuite(func() { 17 | _ = config.SetAndTrigger(&config.Config{IgnoreClusterRegistrationLabels: false}) 18 | }) 19 | -------------------------------------------------------------------------------- /internal/cmd/controller/cleanup/root.go: -------------------------------------------------------------------------------- 1 | package cleanup 2 | 3 | import ( 4 | "fmt" 5 | 6 | command "github.com/rancher/fleet/internal/cmd" 7 | "github.com/rancher/fleet/pkg/version" 8 | "github.com/spf13/cobra" 9 | ) 10 | 11 | type CleanUp struct { 12 | Kubeconfig string `usage:"kubeconfig file"` 13 | Namespace string `usage:"namespace to watch" env:"NAMESPACE"` 14 | } 15 | 16 | // HelpFunc hides the global flags from the help output 17 | func (c *CleanUp) HelpFunc(cmd *cobra.Command, strings []string) { 18 | _ = cmd.Flags().MarkHidden("disable-metrics") 19 | _ = cmd.Flags().MarkHidden("shard-id") 20 | cmd.Parent().HelpFunc()(cmd, strings) 21 | } 22 | 23 | func (c *CleanUp) Run(cmd *cobra.Command, args []string) error { 24 | if c.Namespace == "" { 25 | return fmt.Errorf("--namespace or env NAMESPACE is required to be set") 26 | } 27 | return start(cmd.Context(), c.Kubeconfig, c.Namespace) 28 | } 29 | 30 | func App() *cobra.Command { 31 | return command.Command(&CleanUp{}, cobra.Command{ 32 | Version: version.FriendlyVersion(), 33 | Use: "cleanup", 34 | }) 35 | } 36 | -------------------------------------------------------------------------------- /internal/cmd/controller/cleanup/start.go: -------------------------------------------------------------------------------- 1 | package cleanup 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/sirupsen/logrus" 7 | 8 | "github.com/rancher/fleet/internal/cmd/controller/cleanup/controllers" 9 | "github.com/rancher/wrangler/v3/pkg/kubeconfig" 10 | "github.com/rancher/wrangler/v3/pkg/leader" 11 | "k8s.io/client-go/kubernetes" 12 | "k8s.io/client-go/rest" 13 | ) 14 | 15 | func start(ctx context.Context, kubeConfig, namespace string) error { 16 | clientConfig := kubeconfig.GetNonInteractiveClientConfig(kubeConfig) 17 | kc, err := clientConfig.ClientConfig() 18 | if err != nil { 19 | return err 20 | } 21 | 22 | // try to claim leadership lease without rate limiting 23 | localConfig := rest.CopyConfig(kc) 24 | localConfig.QPS = -1 25 | localConfig.RateLimiter = nil 26 | k8s, err := kubernetes.NewForConfig(localConfig) 27 | if err != nil { 28 | return err 29 | } 30 | 31 | leader.RunOrDie(ctx, namespace, "fleet-cleanup-lock", k8s, func(ctx context.Context) { 32 | appCtx, err := controllers.NewAppContext(clientConfig) 33 | if err != nil { 34 | logrus.Fatal(err) 35 | } 36 | if err := controllers.Register(ctx, appCtx); err != nil { 37 | logrus.Fatal(err) 38 | } 39 | }) 40 | 41 | return nil 42 | } 43 | -------------------------------------------------------------------------------- /internal/cmd/controller/errorutil/errorutil.go: -------------------------------------------------------------------------------- 1 | package errorutil 2 | 3 | import apierrors "k8s.io/apimachinery/pkg/api/errors" 4 | 5 | func IgnoreConflict(err error) error { 6 | if apierrors.IsConflict(err) { 7 | return nil 8 | } 9 | return err 10 | } 11 | -------------------------------------------------------------------------------- /internal/cmd/controller/gitops/reconciler/suite_test.go: -------------------------------------------------------------------------------- 1 | package reconciler 2 | 3 | import ( 4 | "testing" 5 | 6 | . "github.com/onsi/ginkgo/v2" 7 | . "github.com/onsi/gomega" 8 | ) 9 | 10 | func TestGitOpsReconciler(t *testing.T) { 11 | RegisterFailHandler(Fail) 12 | RunSpecs(t, "GitOps Reconciler Suite") 13 | } 14 | 15 | var _ = BeforeSuite(func() { 16 | }) 17 | -------------------------------------------------------------------------------- /internal/cmd/controller/imagescan/update/README.md: -------------------------------------------------------------------------------- 1 | # Credit 2 | 3 | This package is copied from https://github.com/fluxcd/image-automation-controller so giving credit to them -------------------------------------------------------------------------------- /internal/cmd/controller/namespace/util.go: -------------------------------------------------------------------------------- 1 | // Package namespace generates the name of the system registration namespace. 2 | // 3 | // Special namespaces in fleet: 4 | // * system namespace: cattle-fleet-system 5 | // * system registration namespace: cattle-fleet-clusters-system 6 | // * cluster registration namespace or "workspace": fleet-local 7 | // * cluster namespace: cluster-${namespace}-${cluster}-${random} 8 | 9 | package namespace 10 | 11 | import ( 12 | "strings" 13 | 14 | corev1 "k8s.io/api/core/v1" 15 | "k8s.io/apimachinery/pkg/runtime/schema" 16 | ) 17 | 18 | func GVK() schema.GroupVersionKind { 19 | return schema.GroupVersionKind{ 20 | Group: corev1.SchemeGroupVersion.Group, 21 | Version: corev1.SchemeGroupVersion.Version, 22 | Kind: "Namespace", 23 | } 24 | } 25 | 26 | // SystemRegistrationNamespace generates the name of the system registration 27 | // namespace from the configured system namespace, e.g.: 28 | // cattle-fleet-system -> cattle-fleet-clusters-system 29 | func SystemRegistrationNamespace(systemNamespace string) string { 30 | ns := strings.ReplaceAll(systemNamespace, "-system", "-clusters-system") 31 | if ns == systemNamespace { 32 | return systemNamespace + "-clusters-system" 33 | } 34 | return ns 35 | } 36 | -------------------------------------------------------------------------------- /internal/cmd/controller/reconciler/bundle_status.go: -------------------------------------------------------------------------------- 1 | package reconciler 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/rancher/fleet/internal/cmd/controller/summary" 7 | "github.com/rancher/fleet/internal/cmd/controller/target" 8 | fleet "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1" 9 | ) 10 | 11 | const ( 12 | maxNew = 50 13 | ) 14 | 15 | func resetStatus(status *fleet.BundleStatus, allTargets []*target.Target) (err error) { 16 | status.MaxNew = maxNew 17 | status.Summary = fleet.BundleSummary{} 18 | status.PartitionStatus = nil 19 | status.Unavailable = 0 20 | status.NewlyCreated = 0 21 | status.Summary = target.Summary(allTargets) 22 | status.Unavailable = target.Unavailable(allTargets) 23 | status.MaxUnavailable, err = target.MaxUnavailable(allTargets) 24 | return err 25 | } 26 | 27 | func updateDisplay(status *fleet.BundleStatus) { 28 | status.Display.ReadyClusters = fmt.Sprintf("%d/%d", 29 | status.Summary.Ready, 30 | status.Summary.DesiredReady) 31 | status.Display.State = string(summary.GetSummaryState(status.Summary)) 32 | } 33 | -------------------------------------------------------------------------------- /internal/cmd/debug.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | // Copied from https://github.com/rancher/wrangler-cli 4 | 5 | import ( 6 | "flag" 7 | "fmt" 8 | 9 | "github.com/sirupsen/logrus" 10 | "go.uber.org/zap/zapcore" 11 | 12 | "k8s.io/klog/v2" 13 | crzap "sigs.k8s.io/controller-runtime/pkg/log/zap" 14 | ) 15 | 16 | type DebugConfig struct { 17 | Debug bool `usage:"Turn on debug logging"` 18 | DebugLevel int `usage:"If debugging is enabled, set klog -v=X"` 19 | } 20 | 21 | func (c *DebugConfig) SetupDebug() error { 22 | logging := flag.NewFlagSet("", flag.PanicOnError) 23 | klog.InitFlags(logging) 24 | if c.Debug { 25 | logrus.SetLevel(logrus.DebugLevel) 26 | if err := logging.Parse([]string{ 27 | fmt.Sprintf("-v=%d", c.DebugLevel), 28 | }); err != nil { 29 | return err 30 | } 31 | } else { 32 | if err := logging.Parse([]string{ 33 | "-v=0", 34 | }); err != nil { 35 | return err 36 | } 37 | } 38 | 39 | return nil 40 | } 41 | 42 | // OverrideZapOpts, for compatibility override zap opts with legacy debug opts. 43 | func (c *DebugConfig) OverrideZapOpts(zopts *crzap.Options) *crzap.Options { 44 | if zopts == nil { 45 | zopts = &crzap.Options{} 46 | } 47 | 48 | zopts.Development = c.Debug 49 | 50 | if c.Debug && c.DebugLevel > 0 { 51 | zopts.Level = zapcore.Level(c.DebugLevel * -1) //nolint:gosec // no risk, just debug level 52 | } 53 | 54 | return zopts 55 | } 56 | -------------------------------------------------------------------------------- /internal/config/config_test.go: -------------------------------------------------------------------------------- 1 | package config_test 2 | 3 | import ( 4 | "time" 5 | 6 | . "github.com/onsi/ginkgo/v2" 7 | . "github.com/onsi/gomega" 8 | 9 | v1 "k8s.io/api/core/v1" 10 | 11 | "github.com/rancher/fleet/internal/config" 12 | ) 13 | 14 | var _ = Describe("Config", func() { 15 | When("not having set a value for gitClientTimeout", func() { 16 | It("should return the default value", func() { 17 | cfg, err := config.ReadConfig(&v1.ConfigMap{Data: map[string]string{}}) 18 | Expect(err).ToNot(HaveOccurred()) 19 | Expect(cfg.GitClientTimeout.Duration).To(Equal(30 * time.Second)) 20 | }) 21 | }) 22 | When("having set a value for gitClientTimeout", func() { 23 | It("should return the set value", func() { 24 | jsonConfig := `{"gitClientTimeout": "20s"}` 25 | cfg, err := config.ReadConfig(&v1.ConfigMap{ 26 | Data: map[string]string{ 27 | "config": jsonConfig, 28 | }, 29 | }) 30 | Expect(err).ToNot(HaveOccurred()) 31 | Expect(cfg.GitClientTimeout.Duration).To(Equal(20 * time.Second)) 32 | }) 33 | }) 34 | }) 35 | -------------------------------------------------------------------------------- /internal/config/overrides.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "os" 5 | 6 | "github.com/sirupsen/logrus" 7 | ) 8 | 9 | // BypassSystemCAStore is used to bypass the OS trust store in agents through env vars, see 10 | // https://pkg.go.dev/crypto/x509#SystemCertPool for more info. 11 | // We set values to paths belonging to the root filesystem, which is read-only, to prevent tampering. 12 | // Eventually, this should not be necessary, if/when we find a way to set client-go's API Config to achieve similar 13 | // effects. 14 | // Note: this will not work on Windows nor Mac OS. Agents are expected to run on Linux nodes. 15 | func BypassSystemCAStore() { 16 | err := os.Setenv("SSL_CERT_FILE", "/dev/null") 17 | if err != nil { 18 | logrus.Errorf("failed to set env var SSL_CERT_FILE: %s", err.Error()) 19 | } 20 | 21 | err = os.Setenv("SSL_CERT_DIR", "/dev/null") 22 | if err != nil { 23 | logrus.Errorf("failed to set env var SSL_CERT_DIR: %s", err.Error()) 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /internal/config/suite_test.go: -------------------------------------------------------------------------------- 1 | package config_test 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | . "github.com/onsi/ginkgo/v2" 8 | . "github.com/onsi/gomega" 9 | ) 10 | 11 | const ( 12 | timeout = 30 * time.Second 13 | ) 14 | 15 | func TestFleet(t *testing.T) { 16 | RegisterFailHandler(Fail) 17 | RunSpecs(t, "Config Suite") 18 | } 19 | 20 | var _ = BeforeSuite(func() { 21 | SetDefaultEventuallyTimeout(timeout) 22 | }) 23 | -------------------------------------------------------------------------------- /internal/content/helpers.go: -------------------------------------------------------------------------------- 1 | package content 2 | 3 | import ( 4 | "bytes" 5 | "compress/gzip" 6 | "encoding/base64" 7 | "io" 8 | "strings" 9 | ) 10 | 11 | func GUnzip(content []byte) ([]byte, error) { 12 | r, err := gzip.NewReader(bytes.NewBuffer(content)) 13 | if err != nil { 14 | return nil, err 15 | } 16 | return io.ReadAll(r) 17 | } 18 | 19 | func Base64GZ(data []byte) (string, error) { 20 | gz, err := Gzip(data) 21 | if err != nil { 22 | return "", err 23 | } 24 | return base64.StdEncoding.EncodeToString(gz), nil 25 | } 26 | 27 | func Decode(content, encoding string) ([]byte, error) { 28 | var data []byte 29 | 30 | if encoding == "base64" || strings.HasPrefix(encoding, "base64+") { 31 | d, err := base64.StdEncoding.DecodeString(content) 32 | if err != nil { 33 | return nil, err 34 | } 35 | data = d 36 | encoding = strings.TrimPrefix(encoding, "base64") 37 | encoding = strings.TrimPrefix(encoding, "+") 38 | } else { 39 | data = []byte(content) 40 | } 41 | 42 | if encoding == "gz" { 43 | return GUnzip(data) 44 | } 45 | 46 | return data, nil 47 | } 48 | 49 | func Gzip(data []byte) ([]byte, error) { 50 | buf := &bytes.Buffer{} 51 | w := gzip.NewWriter(buf) 52 | defer w.Close() 53 | 54 | if _, err := w.Write(data); err != nil { 55 | return nil, err 56 | } 57 | if err := w.Close(); err != nil { 58 | return nil, err 59 | } 60 | return buf.Bytes(), nil 61 | } 62 | -------------------------------------------------------------------------------- /internal/fleetyaml/fleetyaml.go: -------------------------------------------------------------------------------- 1 | // Package fleetyaml provides utilities for working with fleet.yaml files, 2 | // which are the central yaml files for bundles. 3 | package fleetyaml 4 | 5 | import ( 6 | "os" 7 | "path/filepath" 8 | "strings" 9 | ) 10 | 11 | const ( 12 | fleetYaml = "fleet.yaml" 13 | fallbackFleetYaml = "fleet.yml" 14 | ) 15 | 16 | func FoundFleetYamlInDirectory(baseDir string) bool { 17 | if _, err := os.Stat(GetFleetYamlPath(baseDir, false)); err != nil { 18 | if _, err := os.Stat(GetFleetYamlPath(baseDir, true)); err != nil { 19 | return false 20 | } 21 | } 22 | return true 23 | } 24 | 25 | func GetFleetYamlPath(baseDir string, useFallbackFileExtension bool) string { 26 | if useFallbackFileExtension { 27 | return filepath.Join(baseDir, fallbackFleetYaml) 28 | } 29 | return filepath.Join(baseDir, fleetYaml) 30 | } 31 | 32 | func IsFleetYaml(fileName string) bool { 33 | if fileName == fleetYaml || fileName == fallbackFleetYaml { 34 | return true 35 | } 36 | return false 37 | } 38 | 39 | func IsFleetYamlSuffix(filePath string) bool { 40 | return strings.HasSuffix(filePath, "/"+fleetYaml) || strings.HasSuffix(filePath, "/"+fallbackFleetYaml) 41 | } 42 | -------------------------------------------------------------------------------- /internal/fleetyaml/fleetyaml_test.go: -------------------------------------------------------------------------------- 1 | //go:build !windows 2 | // +build !windows 3 | 4 | package fleetyaml 5 | 6 | import ( 7 | "path/filepath" 8 | "testing" 9 | 10 | "github.com/stretchr/testify/assert" 11 | ) 12 | 13 | func TestBundleYaml(t *testing.T) { 14 | a := assert.New(t) 15 | for _, path := range []string{"/foo", "foo", "/foo/", "foo/", "../foo/bar"} { 16 | 17 | // Test both the primary extension and the fallback extension. 18 | for _, fullPath := range []string{GetFleetYamlPath(path, false), GetFleetYamlPath(path, true)} { 19 | a.True(IsFleetYaml(filepath.Base(fullPath))) 20 | a.True(IsFleetYamlSuffix(fullPath)) 21 | } 22 | } 23 | 24 | // Test expected failure payloads. 25 | for _, fullPath := range []string{"fleet.yaaaaaaaaaml", "", ".", "weakmonkey.yaml", "../fleet.yaaaaml"} { 26 | a.False(IsFleetYaml(filepath.Base(fullPath))) 27 | a.False(IsFleetYamlSuffix(fullPath)) 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /internal/helmdeployer/rawyaml/resources.go: -------------------------------------------------------------------------------- 1 | package rawyaml 2 | 3 | import ( 4 | "bytes" 5 | "strings" 6 | 7 | "helm.sh/helm/v3/pkg/chart" 8 | 9 | "github.com/rancher/wrangler/v3/pkg/yaml" 10 | "k8s.io/apimachinery/pkg/runtime" 11 | ) 12 | 13 | const ( 14 | YAMLPrefix = "chart/raw-yaml/" 15 | inChartPrefix = "raw-yaml/" 16 | ) 17 | 18 | func ToObjects(c *chart.Chart) (result []runtime.Object, _ error) { 19 | for _, resource := range c.Files { 20 | if !strings.HasPrefix(resource.Name, inChartPrefix) { 21 | continue 22 | } 23 | objs, err := yaml.ToObjects(bytes.NewBuffer(resource.Data)) 24 | if err != nil { 25 | if runtime.IsMissingKind(err) { 26 | continue 27 | } 28 | return nil, err 29 | } 30 | for _, obj := range objs { 31 | apiVersion, kind := obj.GetObjectKind().GroupVersionKind().ToAPIVersionAndKind() 32 | if apiVersion == "" || kind == "" { 33 | continue 34 | } 35 | result = append(result, obj) 36 | } 37 | } 38 | 39 | return result, nil 40 | } 41 | -------------------------------------------------------------------------------- /internal/helmvalues/hash.go: -------------------------------------------------------------------------------- 1 | package helmvalues 2 | 3 | import ( 4 | "crypto/sha256" 5 | "encoding/json" 6 | "fmt" 7 | ) 8 | 9 | const ( 10 | ValuesKey = "values" 11 | StagedValuesKey = "stagedValues" 12 | ) 13 | 14 | // HashValuesSecret hashes the data of a secret. This is used for the bundle 15 | // values secret created by fleet apply to detect changes and trigger updates. 16 | func HashValuesSecret(data map[string][]byte) (string, error) { 17 | hasher := sha256.New() 18 | b, err := json.Marshal(data) 19 | if err != nil { 20 | return "", err 21 | } 22 | hasher.Write(b) 23 | return fmt.Sprintf("%x", hasher.Sum(nil)), nil 24 | } 25 | 26 | // HashOptions hashes the bytes passed in. This is used to create a hash of the 27 | // bundledeployment's helm options and staged helm options. 28 | func HashOptions(bytes ...[]byte) string { 29 | hasher := sha256.New() 30 | for _, b := range bytes { 31 | hasher.Write(b) 32 | } 33 | return fmt.Sprintf("%x", hasher.Sum(nil)) 34 | } 35 | -------------------------------------------------------------------------------- /internal/manifest/lookup.go: -------------------------------------------------------------------------------- 1 | package manifest 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/rancher/fleet/internal/content" 7 | fleet "github.com/rancher/fleet/pkg/apis/fleet.cattle.io/v1alpha1" 8 | 9 | "k8s.io/apimachinery/pkg/types" 10 | "sigs.k8s.io/controller-runtime/pkg/client" 11 | ) 12 | 13 | func NewLookup() *Lookup { 14 | return &Lookup{} 15 | } 16 | 17 | type Lookup struct { 18 | } 19 | 20 | func (l *Lookup) Get(ctx context.Context, client client.Reader, id string) (*Manifest, error) { 21 | c := &fleet.Content{} 22 | err := client.Get(ctx, types.NamespacedName{Name: id}, c) 23 | if err != nil { 24 | return nil, err 25 | } 26 | 27 | data, err := content.GUnzip(c.Content) 28 | if err != nil { 29 | return nil, err 30 | } 31 | return FromJSON(data, c.SHA256Sum) 32 | } 33 | -------------------------------------------------------------------------------- /internal/manifest/output.go: -------------------------------------------------------------------------------- 1 | package manifest 2 | 3 | import ( 4 | "archive/tar" 5 | "bytes" 6 | "compress/gzip" 7 | "io" 8 | "time" 9 | 10 | "github.com/rancher/fleet/internal/content" 11 | ) 12 | 13 | func (m *Manifest) ToTarGZ() (io.Reader, error) { 14 | buf := &bytes.Buffer{} 15 | gz := gzip.NewWriter(buf) 16 | w := tar.NewWriter(gz) 17 | 18 | for _, resource := range m.Resources { 19 | bytes, err := content.Decode(resource.Content, resource.Encoding) 20 | if err != nil { 21 | return nil, err 22 | } 23 | 24 | if err := w.WriteHeader(&tar.Header{ 25 | Name: resource.Name, 26 | Mode: 0644, 27 | Typeflag: tar.TypeReg, 28 | ModTime: time.Unix(0, 0), 29 | Size: int64(len(bytes)), 30 | }); err != nil { 31 | return nil, err 32 | } 33 | _, err = w.Write(bytes) 34 | if err != nil { 35 | return nil, err 36 | } 37 | } 38 | 39 | if err := w.Close(); err != nil { 40 | return nil, err 41 | } 42 | 43 | return buf, gz.Close() 44 | } 45 | -------------------------------------------------------------------------------- /internal/names/keyhash.go: -------------------------------------------------------------------------------- 1 | package names 2 | 3 | import ( 4 | "crypto/sha256" 5 | "encoding/hex" 6 | ) 7 | 8 | // KeyHash returns the first 12 hex characters of the hash of the first 100 chars 9 | // of the input string 10 | func KeyHash(s string) string { 11 | if len(s) > 100 { 12 | s = s[:100] 13 | } 14 | d := sha256.Sum256([]byte(s)) 15 | return hex.EncodeToString(d[:])[:12] 16 | } 17 | -------------------------------------------------------------------------------- /internal/names/safeconcat.go: -------------------------------------------------------------------------------- 1 | package names 2 | 3 | import ( 4 | "crypto/sha256" 5 | "encoding/hex" 6 | "strings" 7 | ) 8 | 9 | // SafeConcatName concatenates the given strings and ensures the returned name is under 64 characters 10 | // by cutting the string off at 57 characters and setting the last 6 with an encoded version of the concatenated string. 11 | func SafeConcatName(name ...string) string { 12 | fullPath := strings.Join(name, "-") 13 | if len(fullPath) < 64 { 14 | return fullPath 15 | } 16 | digest := sha256.Sum256([]byte(fullPath)) 17 | // since we cut the string in the middle, the last char may not be compatible with what is expected in k8s 18 | // we are checking and if necessary removing the last char 19 | c := fullPath[56] 20 | if 'a' <= c && c <= 'z' || '0' <= c && c <= '9' { 21 | return fullPath[0:57] + "-" + hex.EncodeToString(digest[0:])[0:5] 22 | } 23 | 24 | return fullPath[0:56] + "-" + hex.EncodeToString(digest[0:])[0:6] 25 | } 26 | -------------------------------------------------------------------------------- /internal/names/suite_test.go: -------------------------------------------------------------------------------- 1 | package names_test 2 | 3 | import ( 4 | "testing" 5 | 6 | . "github.com/onsi/ginkgo/v2" 7 | . "github.com/onsi/gomega" 8 | ) 9 | 10 | func TestNames(t *testing.T) { 11 | RegisterFailHandler(Fail) 12 | RunSpecs(t, "Name Suite") 13 | } 14 | -------------------------------------------------------------------------------- /internal/ocistorage/suite_test.go: -------------------------------------------------------------------------------- 1 | package ocistorage_test 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | . "github.com/onsi/ginkgo/v2" 8 | . "github.com/onsi/gomega" 9 | ) 10 | 11 | const ( 12 | timeout = 30 * time.Second 13 | ) 14 | 15 | func TestFleet(t *testing.T) { 16 | RegisterFailHandler(Fail) 17 | RunSpecs(t, "OCI Utils Suite") 18 | } 19 | 20 | var _ = BeforeSuite(func() { 21 | SetDefaultEventuallyTimeout(timeout) 22 | }) 23 | -------------------------------------------------------------------------------- /internal/registration/secret.go: -------------------------------------------------------------------------------- 1 | package registration 2 | 3 | import ( 4 | "crypto/sha256" 5 | "encoding/hex" 6 | ) 7 | 8 | func SecretName(clientID, clientRandom string) string { 9 | d := sha256.New() 10 | d.Write([]byte(clientID)) 11 | d.Write([]byte(clientRandom)) 12 | return ("c-" + hex.EncodeToString(d.Sum(nil)))[:63] 13 | } 14 | -------------------------------------------------------------------------------- /internal/ssh/url.go: -------------------------------------------------------------------------------- 1 | package ssh 2 | 3 | import ( 4 | "strings" 5 | 6 | giturls "github.com/rancher/fleet/pkg/git-urls" 7 | ) 8 | 9 | // Is checks if the provided string s is a valid SSH URL, returning a boolean. 10 | func Is(s string) bool { 11 | url, err := giturls.Parse(s) 12 | if err != nil { 13 | return false 14 | } 15 | 16 | return strings.HasSuffix(url.Scheme, "ssh") 17 | } 18 | -------------------------------------------------------------------------------- /internal/ssh/url_test.go: -------------------------------------------------------------------------------- 1 | package ssh_test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/rancher/fleet/internal/ssh" 7 | ) 8 | 9 | func TestIs(t *testing.T) { 10 | tests := map[string]struct { 11 | url string 12 | expectSSH bool 13 | }{ 14 | "http": { 15 | url: "http://foo/bar", 16 | expectSSH: false, 17 | }, 18 | "ftp": { 19 | url: "ftp://foo/bar", 20 | expectSSH: false, 21 | }, 22 | "http with @": { 23 | url: "http://fleet-ci:foo@git-service.fleet-local.svc.cluster.local:8080/repo", 24 | expectSSH: false, 25 | }, 26 | "simple ssh": { 27 | url: "ssh://foo/bar", 28 | expectSSH: true, 29 | }, 30 | "git ssh with @": { 31 | url: "git@github.com:foo/bar.git", 32 | expectSSH: true, 33 | }, 34 | "git+ssh": { 35 | url: "git+ssh://foo/bar.git", 36 | expectSSH: true, 37 | }, 38 | "invalid with ssh": { 39 | url: "sshfoo://foo/bar.git", 40 | expectSSH: false, 41 | }, 42 | } 43 | 44 | for name, test := range tests { 45 | t.Run(name, func(t *testing.T) { 46 | isSSH := ssh.Is(test.url) 47 | 48 | if isSSH != test.expectSSH { 49 | t.Errorf("expected SSH match to be %t, got %t", test.expectSSH, isSSH) 50 | } 51 | }) 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /package/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG BUILD_ENV=dapper 2 | ARG ARCH 3 | 4 | FROM --platform=linux/$ARCH registry.suse.com/bci/bci-base:15.6 AS base 5 | COPY package/log.sh /usr/bin/ 6 | RUN zypper rm -y container-suseconnect && \ 7 | zypper ar --priority=500 https://download.opensuse.org/repositories/Virtualization:containers/5.5/Virtualization:containers.repo && \ 8 | zypper --gpg-auto-import-keys ref && \ 9 | zypper -n update && \ 10 | zypper -n install --no-recommends openssh-clients tini git-core && \ 11 | zypper -n clean -a && \ 12 | rm -fr /var/log/zypp* /usr/share/doc 13 | 14 | FROM base AS copy_dapper 15 | ONBUILD ARG ARCH 16 | ONBUILD COPY bin/fleetcontroller-linux-$ARCH /usr/bin/fleetcontroller 17 | ONBUILD COPY bin/fleet-linux-$ARCH /usr/bin/fleet 18 | 19 | FROM base AS copy_buildx 20 | ONBUILD ARG TARGETARCH 21 | ONBUILD COPY bin/fleetcontroller-linux-$TARGETARCH /usr/bin/fleetcontroller 22 | ONBUILD COPY bin/fleet-linux-$TARGETARCH /usr/bin/fleet 23 | 24 | FROM base AS copy_goreleaser 25 | ONBUILD ARG ARCH 26 | ONBUILD COPY fleetcontroller-linux-$ARCH /usr/bin/fleetcontroller 27 | ONBUILD COPY fleet-linux-$ARCH /usr/bin/fleet 28 | 29 | FROM copy_${BUILD_ENV} 30 | USER 1000 31 | ENTRYPOINT ["tini", "--"] 32 | CMD ["fleetcontroller"] 33 | -------------------------------------------------------------------------------- /package/Dockerfile.agent: -------------------------------------------------------------------------------- 1 | ARG BUILD_ENV=dapper 2 | ARG ARCH 3 | 4 | FROM --platform=linux/$ARCH registry.suse.com/bci/bci-busybox:15.6 AS base 5 | 6 | FROM base AS copy_dapper 7 | ONBUILD ARG ARCH 8 | ONBUILD COPY bin/fleetagent-linux-$ARCH /usr/bin/fleetagent 9 | 10 | FROM base AS copy_buildx 11 | ONBUILD ARG TARGETARCH 12 | ONBUILD COPY bin/fleetagent-linux-$TARGETARCH /usr/bin/fleetagent 13 | 14 | FROM base AS copy_goreleaser 15 | ONBUILD ARG ARCH 16 | ONBUILD COPY fleetagent-linux-$ARCH /usr/bin/fleetagent 17 | 18 | FROM copy_${BUILD_ENV} 19 | USER 1000 20 | CMD ["fleetagent"] 21 | -------------------------------------------------------------------------------- /package/log.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -o pipefail 3 | env 4 | "$@" 2>&1 | tee /dev/termination-log 5 | -------------------------------------------------------------------------------- /pkg/apis/fleet.cattle.io/v1alpha1/bundlenamespacemapping_types.go: -------------------------------------------------------------------------------- 1 | package v1alpha1 2 | 3 | import ( 4 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 5 | ) 6 | 7 | func init() { 8 | InternalSchemeBuilder.Register(&BundleNamespaceMapping{}, &BundleNamespaceMappingList{}) 9 | } 10 | 11 | // +genclient 12 | // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object 13 | // +kubebuilder:object:root=true 14 | // +kubebuilder:subresource:status 15 | 16 | // BundleNamespaceMapping maps bundles to clusters in other namespaces. 17 | type BundleNamespaceMapping struct { 18 | metav1.TypeMeta `json:",inline"` 19 | metav1.ObjectMeta `json:"metadata,omitempty"` 20 | 21 | // +nullable 22 | BundleSelector *metav1.LabelSelector `json:"bundleSelector,omitempty"` 23 | // +nullable 24 | NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty"` 25 | } 26 | 27 | // +kubebuilder:object:root=true 28 | 29 | // BundleNamespaceMappingList contains a list of BundleNamespaceMapping 30 | type BundleNamespaceMappingList struct { 31 | metav1.TypeMeta `json:",inline"` 32 | metav1.ListMeta `json:"metadata,omitempty"` 33 | Items []BundleNamespaceMapping `json:"items"` 34 | } 35 | -------------------------------------------------------------------------------- /pkg/apis/fleet.cattle.io/v1alpha1/doc.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2020 - 2024 SUSE LLC 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Code generated by main. DO NOT EDIT. 18 | 19 | // +k8s:deepcopy-gen=package 20 | // +groupName=fleet.cattle.io 21 | package v1alpha1 22 | -------------------------------------------------------------------------------- /pkg/apis/fleet.cattle.io/v1alpha1/groupversion_info.go: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2021-2023 SUSE LLC 2 | 3 | // Package v1alpha1 contains API Schema definitions for the fleet.cattle.io v1alpha1 API group 4 | // +kubebuilder:object:generate=true 5 | // +groupName=fleet.cattle.io 6 | package v1alpha1 7 | 8 | import ( 9 | scheme "github.com/rancher/fleet/pkg/apis/internal" 10 | 11 | "k8s.io/apimachinery/pkg/runtime/schema" 12 | ) 13 | 14 | var ( 15 | // SchemeGroupVersion is group version used to register these objects 16 | SchemeGroupVersion = schema.GroupVersion{Group: "fleet.cattle.io", Version: "v1alpha1"} 17 | 18 | // InternalSchemeBuilder is used to add go types to the GroupVersionKind scheme 19 | InternalSchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} 20 | 21 | // Compatibility with k8s.io/apimachinery/pkg/runtime.Object 22 | SchemeBuilder = InternalSchemeBuilder.SchemeBuilder 23 | 24 | // AddToScheme adds the types in this group-version to the given scheme. 25 | AddToScheme = InternalSchemeBuilder.AddToScheme 26 | ) 27 | 28 | // GroupResource takes an unqualified resource and returns a Group qualified GroupResource 29 | func GroupResource(resource string) schema.GroupResource { 30 | return SchemeGroupVersion.WithResource(resource).GroupResource() 31 | } 32 | -------------------------------------------------------------------------------- /pkg/apis/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/rancher/fleet/pkg/apis 2 | 3 | go 1.24.0 4 | 5 | toolchain go1.24.3 6 | 7 | require ( 8 | github.com/rancher/wrangler/v3 v3.2.1 9 | k8s.io/api v0.33.1 10 | k8s.io/apimachinery v0.33.1 11 | ) 12 | 13 | require ( 14 | github.com/fxamacker/cbor/v2 v2.7.0 // indirect 15 | github.com/go-logr/logr v1.4.2 // indirect 16 | github.com/gogo/protobuf v1.3.2 // indirect 17 | github.com/json-iterator/go v1.1.12 // indirect 18 | github.com/kr/text v0.2.0 // indirect 19 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 20 | github.com/modern-go/reflect2 v1.0.2 // indirect 21 | github.com/x448/float16 v0.8.4 // indirect 22 | golang.org/x/net v0.38.0 // indirect 23 | golang.org/x/text v0.23.0 // indirect 24 | gopkg.in/inf.v0 v0.9.1 // indirect 25 | k8s.io/klog/v2 v2.130.1 // indirect 26 | k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect 27 | sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect 28 | sigs.k8s.io/randfill v1.0.0 // indirect 29 | sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect 30 | sigs.k8s.io/yaml v1.4.0 // indirect 31 | ) 32 | -------------------------------------------------------------------------------- /pkg/cert/cabundle.go: -------------------------------------------------------------------------------- 1 | package cert 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | corev1 "k8s.io/api/core/v1" 8 | "k8s.io/apimachinery/pkg/api/errors" 9 | "k8s.io/apimachinery/pkg/types" 10 | "sigs.k8s.io/controller-runtime/pkg/client" 11 | ) 12 | 13 | const rancherNS = "cattle-system" 14 | 15 | func GetRancherCABundle(ctx context.Context, c client.Client) ([]byte, error) { 16 | secret := &corev1.Secret{} 17 | 18 | err := c.Get(ctx, types.NamespacedName{Namespace: rancherNS, Name: "tls-ca"}, secret) 19 | if client.IgnoreNotFound(err) != nil { 20 | return nil, err 21 | } 22 | 23 | caBundle, ok := secret.Data["cacerts.pem"] // TODO check that the path is right, with an actual Rancher install 24 | if !errors.IsNotFound(err) && !ok { 25 | return nil, fmt.Errorf("no field cacerts.pem found in secret tls-ca") 26 | } 27 | 28 | err = c.Get(ctx, types.NamespacedName{Namespace: rancherNS, Name: "tls-ca-additional"}, secret) 29 | if err != nil { 30 | if errors.IsNotFound(err) { 31 | return caBundle, nil 32 | } 33 | 34 | return nil, err 35 | } 36 | 37 | field, ok := secret.Data["ca-additional.pem"] // TODO check that the path is right, with an actual Rancher install 38 | if !ok { 39 | return nil, fmt.Errorf("no field ca-additional.pem found in secret tls-ca-additional") 40 | } 41 | caBundle = append(caBundle, field...) 42 | 43 | return caBundle, nil 44 | } 45 | -------------------------------------------------------------------------------- /pkg/event/event.go: -------------------------------------------------------------------------------- 1 | package event 2 | 3 | const ( 4 | Normal = "Normal" 5 | Warning = "Warning" 6 | ) 7 | -------------------------------------------------------------------------------- /pkg/generated/controllers/fleet.cattle.io/interface.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright (c) 2020 - 2025 SUSE LLC 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Code generated by main. DO NOT EDIT. 18 | 19 | package fleet 20 | 21 | import ( 22 | v1alpha1 "github.com/rancher/fleet/pkg/generated/controllers/fleet.cattle.io/v1alpha1" 23 | "github.com/rancher/lasso/pkg/controller" 24 | ) 25 | 26 | type Interface interface { 27 | V1alpha1() v1alpha1.Interface 28 | } 29 | 30 | type group struct { 31 | controllerFactory controller.SharedControllerFactory 32 | } 33 | 34 | // New returns a new Interface. 35 | func New(controllerFactory controller.SharedControllerFactory) Interface { 36 | return &group{ 37 | controllerFactory: controllerFactory, 38 | } 39 | } 40 | 41 | func (g *group) V1alpha1() v1alpha1.Interface { 42 | return v1alpha1.New(g.controllerFactory) 43 | } 44 | -------------------------------------------------------------------------------- /pkg/git/suite_test.go: -------------------------------------------------------------------------------- 1 | package git 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | . "github.com/onsi/ginkgo/v2" 8 | . "github.com/onsi/gomega" 9 | ) 10 | 11 | const ( 12 | timeout = 30 * time.Second 13 | ) 14 | 15 | func TestFleet(t *testing.T) { 16 | RegisterFailHandler(Fail) 17 | RunSpecs(t, "Git Suite") 18 | } 19 | 20 | var _ = BeforeSuite(func() { 21 | SetDefaultEventuallyTimeout(timeout) 22 | }) 23 | -------------------------------------------------------------------------------- /pkg/version/version.go: -------------------------------------------------------------------------------- 1 | package version 2 | 3 | import ( 4 | "fmt" 5 | ) 6 | 7 | var ( 8 | Version = "dev" 9 | GitCommit = "HEAD" 10 | ) 11 | 12 | func FriendlyVersion() string { 13 | return fmt.Sprintf("%s (%s)", Version, GitCommit) 14 | } 15 | -------------------------------------------------------------------------------- /updatecli/updatecli.d/known-hosts.yaml: -------------------------------------------------------------------------------- 1 | name: Update known_hosts config map 2 | 3 | {{ range $id, $scm := .scms }} 4 | 5 | --- 6 | name: 'Synchronise known-hosts for {{ $id }}' 7 | scms: 8 | fleet: 9 | kind: github 10 | spec: 11 | user: '{{ $scm.user }}' 12 | email: '{{ $scm.email }}' 13 | owner: '{{ $scm.owner }}' 14 | repository: '{{ $scm.repository }}' 15 | token: '{{ requiredEnv "UPDATECLI_GITHUB_TOKEN" }}' 16 | username: '{{ requiredEnv "UPDATECLI_GITHUB_ACTOR" }}' 17 | branch: '{{ $scm.branch }}' 18 | 19 | targets: 20 | configMapWithUpdatedEntries: 21 | name: 'synchronise config map from new entries' 22 | kind: 'shell' 23 | scmid: 'fleet' 24 | disablesourceinput: true 25 | spec: 26 | changedif: 27 | kind: 'file/checksum' 28 | spec: 29 | files: 30 | - charts/fleet/templates/configmap_known_hosts.yaml 31 | command: bash <(git show main:.github/scripts/update_known_hosts_configmap.sh) 32 | 33 | actions: 34 | default: 35 | name: "[{{ $id }}][updatecli] Update known-hosts config map with new entries" 36 | kind: github/pullrequest 37 | scmid: fleet 38 | spec: 39 | automerge: false 40 | mergemethod: squash 41 | labels: 42 | - kind/known-hosts # /!\ label must exist in the repo! 43 | 44 | {{ end }} 45 | -------------------------------------------------------------------------------- /updatecli/values.d/scm.yaml: -------------------------------------------------------------------------------- 1 | scms: 2 | main: &main 3 | enabled: true 4 | user: 'fleet-bot' 5 | email: 'fleet@suse.de' 6 | owner: rancher 7 | repository: fleet 8 | branch: main 9 | v12: 10 | <<: *main 11 | branch: 'release/v0.12' 12 | v11: 13 | <<: *main 14 | branch: 'release/v0.11' 15 | v10: 16 | <<: *main 17 | branch: 'release/v0.10' 18 | --------------------------------------------------------------------------------