├── .gitignore ├── codecov.yml ├── agent ├── manifests │ ├── base │ │ ├── gitops-agent-sa.yaml │ │ ├── kustomization.yaml │ │ └── gitops-agent-deploy.yaml │ ├── namespace-install │ │ ├── gitops-agent-deployment-overlay.yaml │ │ ├── gitops-agent-role.yaml │ │ ├── gitops-agent-role-binding.yaml │ │ └── kustomization.yaml │ ├── cluster-install │ │ ├── kustomization.yaml │ │ ├── gitops-agent-cluster-role.yaml │ │ └── gitops-agent-cluster-role-binding.yaml │ ├── install-namespaced.yaml │ └── install.yaml └── README.md ├── pkg ├── health │ ├── testdata │ │ ├── application-healthy.yaml │ │ ├── application-degraded.yaml │ │ ├── knative-service.yaml │ │ ├── hpa-v1-progressing-with-no-annotations.yaml │ │ ├── apiservice-v1-true.yaml │ │ ├── apiservice-v1beta1-true.yaml │ │ ├── hpa-v1-progressing.yaml │ │ ├── ingress-nonemptylist.yaml │ │ ├── svc-loadbalancer-unassigned.yaml │ │ ├── apiservice-v1-false.yaml │ │ ├── apiservice-v1beta1-false.yaml │ │ ├── ingress-unassigned.yaml │ │ ├── ingress.yaml │ │ ├── svc-clusterip.yaml │ │ ├── svc-loadbalancer-nonemptylist.yaml │ │ ├── svc-loadbalancer.yaml │ │ ├── hpa-v1-healthy.yaml │ │ ├── hpa-v1-degraded.yaml │ │ ├── pvc-pending.yaml │ │ ├── job-running.yaml │ │ ├── pvc-bound.yaml │ │ ├── job-succeeded.yaml │ │ ├── hpa-v2beta2-healthy.yaml │ │ ├── job-failed.yaml │ │ ├── daemonset-ondelete.yaml │ │ ├── pod-pending.yaml │ │ ├── pod-deletion.yaml │ │ ├── pod-running-restart-never.yaml │ │ ├── pod-running-restart-always.yaml │ │ ├── pod-succeeded.yaml │ │ ├── pod-failed.yaml │ │ ├── pod-running-not-ready.yaml │ │ ├── deployment-progressing.yaml │ │ ├── deployment-degraded.yaml │ │ ├── deployment-suspended.yaml │ │ ├── pod-crashloop.yaml │ │ ├── pod-running-restart-onfailure.yaml │ │ ├── pod-error.yaml │ │ ├── pod-imagepullbackoff.yaml │ │ └── hpa-v2beta1-healthy.yaml │ ├── health_service.go │ ├── health_pvc.go │ ├── health_argo.go │ ├── health_job.go │ ├── health_ingress.go │ └── health_apiservice.go ├── utils │ ├── kube │ │ ├── resource_filter.go │ │ ├── testdata │ │ │ ├── appsdeployment.yaml │ │ │ ├── extensionsdeployment.yaml │ │ │ ├── v1HPA.yaml │ │ │ ├── cr.yaml │ │ │ ├── v2beta1HPA.yaml │ │ │ └── nginx.yaml │ │ ├── convert.go │ │ ├── scheme │ │ │ └── scheme.go │ │ ├── ctl_test.go │ │ ├── convert_test.go │ │ └── kubetest │ │ │ └── mock.go │ ├── tracing │ │ ├── tracer_testing │ │ │ └── doc.go │ │ ├── api.go │ │ ├── nop.go │ │ ├── logging_test.go │ │ └── logging.go │ ├── text │ │ └── text.go │ ├── io │ │ └── io.go │ ├── testing │ │ ├── unstructured.go │ │ └── testdata.go │ └── json │ │ └── json.go ├── sync │ ├── ignore │ │ ├── ignore.go │ │ └── ignore_test.go │ ├── hook │ │ ├── helm │ │ │ ├── hook.go │ │ │ ├── weight_test.go │ │ │ ├── weight.go │ │ │ ├── hook_test.go │ │ │ ├── delete_policy_test.go │ │ │ ├── type.go │ │ │ ├── delete_policy.go │ │ │ └── type_test.go │ │ ├── delete_policy.go │ │ ├── hook.go │ │ ├── delete_policy_test.go │ │ └── hook_test.go │ ├── syncwaves │ │ ├── waves_test.go │ │ └── waves.go │ ├── resource │ │ ├── annotations.go │ │ └── annotations_test.go │ ├── sync_phase.go │ ├── common │ │ └── types_test.go │ ├── sync_phase_test.go │ ├── reconcile.go │ ├── sync_task.go │ ├── sync_task_test.go │ └── doc.go ├── diff │ ├── testdata │ │ ├── grafana-clusterrole-config.json │ │ ├── spinnaker-sa-config.json │ │ ├── wordpress-config.json │ │ ├── aggr-clusterrole-config.json │ │ ├── endpoints-config.json │ │ ├── grafana-clusterrole-live.json │ │ ├── sealedsecret-config.json │ │ ├── wordpress-live.json │ │ ├── deployment-config.json │ │ ├── spinnaker-sa-live.json │ │ ├── mutatingwebhookconfig-config.json │ │ ├── aggr-clusterrole-live.json │ │ ├── sealedsecret-live.json │ │ ├── endpoints-live.json │ │ └── mutatingwebhookconfig-live.json │ └── diff_options.go ├── cache │ ├── doc.go │ ├── predicates.go │ ├── settings_test.go │ ├── resource_test.go │ ├── resource.go │ ├── settings.go │ ├── references.go │ └── predicates_test.go └── engine │ ├── engine_options.go │ └── engine.go ├── OWNERS ├── .github ├── dependabot.yml └── workflows │ └── ci.yaml ├── Dockerfile ├── docs └── releasing.md ├── Makefile ├── specs ├── design.md ├── image-update-monitoring.md ├── deployment-repo-update.md └── template.md ├── go.mod └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | *.swp 2 | *~ 3 | .vscode 4 | .idea 5 | coverage.out 6 | vendor/ -------------------------------------------------------------------------------- /codecov.yml: -------------------------------------------------------------------------------- 1 | coverage: 2 | status: 3 | patch: off 4 | project: 5 | default: 6 | threshold: 2 -------------------------------------------------------------------------------- /agent/manifests/base/gitops-agent-sa.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: gitops-agent 6 | -------------------------------------------------------------------------------- /pkg/health/testdata/application-healthy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: argoproj.io/v1alpha1 2 | kind: Application 3 | status: 4 | health: 5 | status: Healthy 6 | -------------------------------------------------------------------------------- /pkg/health/testdata/application-degraded.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: argoproj.io/v1alpha1 2 | kind: Application 3 | status: 4 | health: 5 | status: Degraded 6 | -------------------------------------------------------------------------------- /pkg/utils/kube/resource_filter.go: -------------------------------------------------------------------------------- 1 | package kube 2 | 3 | type ResourceFilter interface { 4 | IsExcludedResource(group, kind, cluster string) bool 5 | } 6 | -------------------------------------------------------------------------------- /agent/manifests/namespace-install/gitops-agent-deployment-overlay.yaml: -------------------------------------------------------------------------------- 1 | - {op: add, path: /spec/template/spec/containers/0/command/-, value: --namespaced} 2 | 3 | -------------------------------------------------------------------------------- /agent/manifests/base/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - ./gitops-agent-deploy.yaml 6 | - ./gitops-agent-sa.yaml 7 | -------------------------------------------------------------------------------- /pkg/utils/tracing/tracer_testing/doc.go: -------------------------------------------------------------------------------- 1 | package tracer_testing 2 | 3 | //go:generate go run github.com/golang/mock/mockgen -destination "logger.go" -package "tracer_testing" "github.com/go-logr/logr" "Logger" 4 | -------------------------------------------------------------------------------- /pkg/utils/text/text.go: -------------------------------------------------------------------------------- 1 | package text 2 | 3 | func FirstNonEmpty(args ...string) string { 4 | for _, value := range args { 5 | if len(value) > 0 { 6 | return value 7 | } 8 | } 9 | return "" 10 | } 11 | -------------------------------------------------------------------------------- /OWNERS: -------------------------------------------------------------------------------- 1 | owners: 2 | - alexmt 3 | - jessesuen 4 | 5 | approvers: 6 | - alexec 7 | - alexmt 8 | - dthomson25 9 | - jannfis 10 | - jessesuen 11 | - mayzhang2000 12 | - rachelwang20 13 | 14 | reviewers: 15 | - ash2k 16 | -------------------------------------------------------------------------------- /agent/manifests/cluster-install/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | bases: 5 | - ../base 6 | 7 | resources: 8 | - ./gitops-agent-cluster-role-binding.yaml 9 | - ./gitops-agent-cluster-role.yaml 10 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "gomod" 4 | directory: "/" 5 | schedule: 6 | interval: "daily" 7 | - package-ecosystem: "github-actions" 8 | directory: "/" 9 | schedule: 10 | interval: "daily" 11 | -------------------------------------------------------------------------------- /agent/manifests/namespace-install/gitops-agent-role.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: Role 4 | metadata: 5 | name: gitops-agent 6 | rules: 7 | - apiGroups: 8 | - '*' 9 | resources: 10 | - '*' 11 | verbs: 12 | - '*' -------------------------------------------------------------------------------- /pkg/utils/kube/testdata/appsdeployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1beta2 2 | kind: Deployment 3 | metadata: 4 | name: nginx-deployment 5 | spec: 6 | template: 7 | metadata: 8 | labels: 9 | name: nginx 10 | spec: 11 | containers: 12 | - name: nginx 13 | image: nginx 14 | -------------------------------------------------------------------------------- /pkg/utils/kube/testdata/extensionsdeployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: nginx-deployment 5 | spec: 6 | template: 7 | metadata: 8 | labels: 9 | name: nginx 10 | spec: 11 | containers: 12 | - name: nginx 13 | image: nginx 14 | -------------------------------------------------------------------------------- /pkg/utils/kube/testdata/v1HPA.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: autoscaling/v1 2 | kind: HorizontalPodAutoscaler 3 | metadata: 4 | name: php-apache 5 | spec: 6 | scaleTargetRef: 7 | apiVersion: apps/v1 8 | kind: Deployment 9 | name: php-apache 10 | minReplicas: 1 11 | maxReplicas: 10 12 | targetCPUUtilizationPercentage: 50 13 | -------------------------------------------------------------------------------- /pkg/utils/tracing/api.go: -------------------------------------------------------------------------------- 1 | package tracing 2 | 3 | /* 4 | Poor Mans OpenTracing. 5 | 6 | Standardizes logging of operation duration. 7 | */ 8 | 9 | type Tracer interface { 10 | StartSpan(operationName string) Span 11 | } 12 | 13 | type Span interface { 14 | SetBaggageItem(key string, value interface{}) 15 | Finish() 16 | } 17 | -------------------------------------------------------------------------------- /agent/manifests/namespace-install/gitops-agent-role-binding.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: RoleBinding 4 | metadata: 5 | name: gitops-agent 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: Role 9 | name: gitops-agent 10 | subjects: 11 | - kind: ServiceAccount 12 | name: gitops-agent 13 | -------------------------------------------------------------------------------- /pkg/sync/ignore/ignore.go: -------------------------------------------------------------------------------- 1 | package ignore 2 | 3 | import ( 4 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 5 | 6 | "github.com/argoproj/gitops-engine/pkg/sync/hook" 7 | ) 8 | 9 | // should we Ignore this resource? 10 | func Ignore(obj *unstructured.Unstructured) bool { 11 | return hook.IsHook(obj) && len(hook.Types(obj)) == 0 12 | } 13 | -------------------------------------------------------------------------------- /pkg/sync/hook/helm/hook.go: -------------------------------------------------------------------------------- 1 | package helm 2 | 3 | import "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 4 | 5 | func IsHook(obj *unstructured.Unstructured) bool { 6 | value, ok := obj.GetAnnotations()["helm.sh/hook"] 7 | // Helm use the same annotation to identify CRD as hooks, but they are not. 8 | return ok && value != "crd-install" 9 | } 10 | -------------------------------------------------------------------------------- /agent/manifests/cluster-install/gitops-agent-cluster-role.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: gitops-agent 6 | rules: 7 | - apiGroups: 8 | - '*' 9 | resources: 10 | - '*' 11 | verbs: 12 | - '*' 13 | - nonResourceURLs: 14 | - '*' 15 | verbs: 16 | - '*' 17 | -------------------------------------------------------------------------------- /pkg/sync/hook/helm/weight_test.go: -------------------------------------------------------------------------------- 1 | package helm 2 | 3 | import ( 4 | "testing" 5 | 6 | . "github.com/argoproj/gitops-engine/pkg/utils/testing" 7 | 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | func TestWeight(t *testing.T) { 12 | assert.Equal(t, Weight(NewPod()), 0) 13 | assert.Equal(t, Weight(Annotate(NewPod(), "helm.sh/hook-weight", "1")), 1) 14 | } 15 | -------------------------------------------------------------------------------- /pkg/diff/testdata/grafana-clusterrole-config.json: -------------------------------------------------------------------------------- 1 | { 2 | "kind": "ClusterRole", 3 | "apiVersion": "rbac.authorization.k8s.io/v1", 4 | "metadata": { 5 | "labels": { 6 | "app": "grafana", 7 | "chart": "grafana-1.21.2", 8 | "release": "grafana", 9 | "heritage": "Tiller" 10 | }, 11 | "name": "grafana-clusterrole" 12 | }, 13 | "rules": [] 14 | } -------------------------------------------------------------------------------- /pkg/utils/kube/testdata/cr.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: argoproj.io/v1alpha1 2 | kind: Application 3 | metadata: 4 | name: custom-resource 5 | namespace: default 6 | spec: 7 | destination: 8 | namespace: default 9 | server: https://kubernetes.default.svc 10 | project: default 11 | source: 12 | path: guestbook 13 | repoURL: https://github.com/argoproj/argocd-example-apps.git -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.14.3 as builder 2 | 3 | WORKDIR /src 4 | 5 | COPY go.mod /src/go.mod 6 | COPY go.sum /src/go.sum 7 | 8 | RUN go mod download 9 | 10 | # Perform the build 11 | COPY . . 12 | RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags="-w -s" -o /dist/gitops ./agent 13 | 14 | 15 | FROM alpine/git:v2.24.3 16 | COPY --from=builder /dist/gitops /usr/local/bin/gitops -------------------------------------------------------------------------------- /pkg/health/testdata/knative-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: serving.knative.dev/v1alpha1 2 | kind: Service 3 | metadata: 4 | name: helloworld 5 | spec: 6 | runLatest: 7 | configuration: 8 | revisionTemplate: 9 | spec: 10 | container: 11 | env: 12 | - name: TARGET 13 | value: world 14 | image: helloworld:latest 15 | -------------------------------------------------------------------------------- /agent/manifests/cluster-install/gitops-agent-cluster-role-binding.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | name: gitops-agent 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: ClusterRole 9 | name: gitops-agent 10 | subjects: 11 | - kind: ServiceAccount 12 | name: gitops-agent 13 | namespace: gitops-agent 14 | -------------------------------------------------------------------------------- /pkg/utils/kube/testdata/v2beta1HPA.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: autoscaling/v2beta1 2 | kind: HorizontalPodAutoscaler 3 | metadata: 4 | name: php-apache 5 | spec: 6 | scaleTargetRef: 7 | apiVersion: apps/v1 8 | kind: Deployment 9 | name: php-apache 10 | minReplicas: 1 11 | maxReplicas: 10 12 | metrics: 13 | - type: Resource 14 | resource: 15 | name: cpu 16 | targetAverageUtilization: 50 17 | -------------------------------------------------------------------------------- /pkg/utils/tracing/nop.go: -------------------------------------------------------------------------------- 1 | package tracing 2 | 3 | var ( 4 | _ Tracer = NopTracer{} 5 | _ Span = nopSpan{} 6 | ) 7 | 8 | type NopTracer struct { 9 | } 10 | 11 | func (n NopTracer) StartSpan(operationName string) Span { 12 | return nopSpan{} 13 | } 14 | 15 | type nopSpan struct { 16 | } 17 | 18 | func (n nopSpan) SetBaggageItem(key string, value interface{}) { 19 | } 20 | 21 | func (n nopSpan) Finish() { 22 | } 23 | -------------------------------------------------------------------------------- /pkg/health/testdata/hpa-v1-progressing-with-no-annotations.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: autoscaling/v1 2 | kind: HorizontalPodAutoscaler 3 | metadata: 4 | name: sample 5 | namespace: argocd 6 | spec: 7 | maxReplicas: 2 8 | minReplicas: 1 9 | scaleTargetRef: 10 | apiVersion: apps/v1 11 | kind: Deployment 12 | name: sample 13 | targetCPUUtilizationPercentage: 2 14 | status: 15 | currentReplicas: 1 16 | desiredReplicas: 1 -------------------------------------------------------------------------------- /agent/manifests/namespace-install/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | bases: 5 | - ../base 6 | 7 | resources: 8 | - ./gitops-agent-role-binding.yaml 9 | - ./gitops-agent-role.yaml 10 | 11 | patchesJson6902: 12 | - target: 13 | group: apps 14 | version: v1 15 | kind: Deployment 16 | name: gitops-agent 17 | path: ./gitops-agent-deployment-overlay.yaml 18 | -------------------------------------------------------------------------------- /pkg/diff/testdata/spinnaker-sa-config.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "v1", 3 | "kind": "ServiceAccount", 4 | "metadata": { 5 | "labels": { 6 | "app": "spinnaker-spinnaker", 7 | "app.kubernetes.io/instance": "spinnaker", 8 | "chart": "spinnaker-1.1.3", 9 | "heritage": "Tiller", 10 | "release": "spinnaker" 11 | }, 12 | "name": "spinnaker-spinnaker-halyard", 13 | "namespace": "spinnaker" 14 | } 15 | } -------------------------------------------------------------------------------- /pkg/sync/syncwaves/waves_test.go: -------------------------------------------------------------------------------- 1 | package syncwaves 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | 8 | . "github.com/argoproj/gitops-engine/pkg/utils/testing" 9 | ) 10 | 11 | func TestWave(t *testing.T) { 12 | assert.Equal(t, 0, Wave(NewPod())) 13 | assert.Equal(t, 1, Wave(Annotate(NewPod(), "argocd.argoproj.io/sync-wave", "1"))) 14 | assert.Equal(t, 1, Wave(Annotate(NewPod(), "helm.sh/hook-weight", "1"))) 15 | } 16 | -------------------------------------------------------------------------------- /pkg/utils/io/io.go: -------------------------------------------------------------------------------- 1 | package io 2 | 3 | import ( 4 | "os" 5 | ) 6 | 7 | var ( 8 | TempDir string 9 | ) 10 | 11 | func init() { 12 | fileInfo, err := os.Stat("/dev/shm") 13 | if err == nil && fileInfo.IsDir() { 14 | TempDir = "/dev/shm" 15 | } 16 | } 17 | 18 | // DeleteFile is best effort deletion of a file 19 | func DeleteFile(path string) { 20 | if _, err := os.Stat(path); os.IsNotExist(err) { 21 | return 22 | } 23 | _ = os.Remove(path) 24 | } 25 | -------------------------------------------------------------------------------- /pkg/diff/testdata/wordpress-config.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "v1", 3 | "kind": "Secret", 4 | "metadata": { 5 | "name": "wordpress-wordpress", 6 | "labels": { 7 | "app": "wordpress-wordpress", 8 | "chart": "wordpress-5.0.1", 9 | "release": "wordpress", 10 | "heritage": "Tiller" 11 | } 12 | }, 13 | "type": "Opaque", 14 | "data": { 15 | "wordpress-password": "Skt2T0tjMk5PdQ==", 16 | "smtp-password": "" 17 | } 18 | } -------------------------------------------------------------------------------- /pkg/sync/hook/helm/weight.go: -------------------------------------------------------------------------------- 1 | package helm 2 | 3 | import ( 4 | "strconv" 5 | 6 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 7 | ) 8 | 9 | // note that we do not take into account if this is or is not a hook, caller should check 10 | func Weight(obj *unstructured.Unstructured) int { 11 | text, ok := obj.GetAnnotations()["helm.sh/hook-weight"] 12 | if ok { 13 | value, err := strconv.Atoi(text) 14 | if err == nil { 15 | return value 16 | } 17 | } 18 | return 0 19 | } 20 | -------------------------------------------------------------------------------- /pkg/cache/doc.go: -------------------------------------------------------------------------------- 1 | /* 2 | Package cache implements lightweight Kubernetes cluster caching that stores only resource references and ownership 3 | references. In addition to references cache might be configured to store custom metadata and whole body of selected 4 | resources. 5 | 6 | The library uses Kubernetes watch API to maintain cache up to date. This approach reduces number of Kubernetes 7 | API requests and provides instant access to the required Kubernetes resources. 8 | */ 9 | package cache 10 | -------------------------------------------------------------------------------- /pkg/cache/predicates.go: -------------------------------------------------------------------------------- 1 | package cache 2 | 3 | // TopLevelResource returns true if resource has no parents 4 | func TopLevelResource(r *Resource) bool { 5 | return len(r.OwnerRefs) == 0 6 | } 7 | 8 | // ResourceOfGroupKind returns predicate that matches resource by specified group and kind 9 | func ResourceOfGroupKind(group string, kind string) func(r *Resource) bool { 10 | return func(r *Resource) bool { 11 | key := r.ResourceKey() 12 | return key.Group == group && key.Kind == kind 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /pkg/sync/hook/helm/hook_test.go: -------------------------------------------------------------------------------- 1 | package helm 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | 8 | . "github.com/argoproj/gitops-engine/pkg/utils/testing" 9 | ) 10 | 11 | func TestIsHook(t *testing.T) { 12 | assert.False(t, IsHook(NewPod())) 13 | assert.True(t, IsHook(Annotate(NewPod(), "helm.sh/hook", "anything"))) 14 | // helm calls "crd-install" a hook, but it really can't be treated as such 15 | assert.False(t, IsHook(Annotate(NewCRD(), "helm.sh/hook", "crd-install"))) 16 | } 17 | -------------------------------------------------------------------------------- /pkg/diff/testdata/aggr-clusterrole-config.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "rbac.authorization.k8s.io/v1", 3 | "kind": "ClusterRole", 4 | "metadata": { 5 | "name": "test-clusterrole", 6 | "labels": { 7 | "app.kubernetes.io/instance": "clusterroles" 8 | } 9 | }, 10 | "aggregationRule": { 11 | "clusterRoleSelectors": [ 12 | { 13 | "matchLabels": { 14 | "rbac.example.com/aggregate-to-test": "true" 15 | } 16 | } 17 | ] 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /pkg/sync/syncwaves/waves.go: -------------------------------------------------------------------------------- 1 | package syncwaves 2 | 3 | import ( 4 | "strconv" 5 | 6 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 7 | 8 | "github.com/argoproj/gitops-engine/pkg/sync/common" 9 | helmhook "github.com/argoproj/gitops-engine/pkg/sync/hook/helm" 10 | ) 11 | 12 | func Wave(obj *unstructured.Unstructured) int { 13 | text, ok := obj.GetAnnotations()[common.AnnotationSyncWave] 14 | if ok { 15 | val, err := strconv.Atoi(text) 16 | if err == nil { 17 | return val 18 | } 19 | } 20 | return helmhook.Weight(obj) 21 | } 22 | -------------------------------------------------------------------------------- /pkg/health/testdata/apiservice-v1-true.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiregistration.k8s.io/v1 2 | kind: APIService 3 | metadata: 4 | name: v1beta1.admission.cert-manager.io 5 | labels: 6 | app: webhook 7 | app.kubernetes.io/instance: external-dns 8 | spec: 9 | group: admission.cert-manager.io 10 | groupPriorityMinimum: 1000 11 | versionPriority: 15 12 | service: 13 | name: cert-manager-webhook 14 | namespace: external-dns 15 | version: v1beta1 16 | status: 17 | conditions: 18 | - lastTransitionTime: "2019-07-09T14:48:15Z" 19 | message: all checks passed 20 | reason: Passed 21 | status: "True" 22 | type: Available 23 | -------------------------------------------------------------------------------- /pkg/health/testdata/apiservice-v1beta1-true.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiregistration.k8s.io/v1beta1 2 | kind: APIService 3 | metadata: 4 | name: v1beta1.admission.cert-manager.io 5 | labels: 6 | app: webhook 7 | app.kubernetes.io/instance: external-dns 8 | spec: 9 | group: admission.cert-manager.io 10 | groupPriorityMinimum: 1000 11 | versionPriority: 15 12 | service: 13 | name: cert-manager-webhook 14 | namespace: external-dns 15 | version: v1beta1 16 | status: 17 | conditions: 18 | - lastTransitionTime: "2019-07-09T14:48:15Z" 19 | message: all checks passed 20 | reason: Passed 21 | status: "True" 22 | type: Available 23 | -------------------------------------------------------------------------------- /pkg/health/testdata/hpa-v1-progressing.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: autoscaling/v1 2 | kind: HorizontalPodAutoscaler 3 | metadata: 4 | annotations: 5 | autoscaling.alpha.kubernetes.io/conditions: '[{"type":"AbleToScale","status":"True","lastTransitionTime":"2020-11-23T19:38:38Z","reason":"SucceededGetScale","message":"the HPA controller was able to get the target''s current scale"}]' 6 | name: sample 7 | namespace: argocd 8 | spec: 9 | maxReplicas: 1 10 | minReplicas: 1 11 | scaleTargetRef: 12 | apiVersion: apps/v1 13 | kind: Deployment 14 | name: sample 15 | targetCPUUtilizationPercentage: 2 16 | status: 17 | currentReplicas: 1 18 | desiredReplicas: 0 -------------------------------------------------------------------------------- /pkg/health/testdata/ingress-nonemptylist.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | generation: 1 5 | labels: 6 | app: grafana 7 | app.kubernetes.io/instance: grafana 8 | chart: grafana-1.12.0 9 | heritage: Tiller 10 | release: grafana 11 | name: grafana 12 | namespace: test-ops 13 | spec: 14 | rules: 15 | - host: grafana.com 16 | http: 17 | paths: 18 | - backend: 19 | serviceName: grafana 20 | servicePort: 80 21 | path: / 22 | tls: 23 | - hosts: 24 | - grafana.com 25 | secretName: my-secret 26 | status: 27 | loadBalancer: 28 | ingress: 29 | - {} 30 | -------------------------------------------------------------------------------- /pkg/health/testdata/svc-loadbalancer-unassigned.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | creationTimestamp: 2018-11-06T01:07:35Z 5 | name: argo-artifacts 6 | namespace: argo 7 | resourceVersion: "346792" 8 | selfLink: /api/v1/namespaces/argo/services/argo-artifacts 9 | uid: 586f5e57-e160-11e8-b3c1-9ae2f452bd03 10 | spec: 11 | clusterIP: 10.105.70.181 12 | externalTrafficPolicy: Cluster 13 | ports: 14 | - name: service 15 | nodePort: 32667 16 | port: 9000 17 | protocol: TCP 18 | targetPort: 9000 19 | selector: 20 | app: minio 21 | release: argo-artifacts 22 | sessionAffinity: None 23 | type: LoadBalancer 24 | status: 25 | loadBalancer: {} 26 | -------------------------------------------------------------------------------- /pkg/utils/tracing/logging_test.go: -------------------------------------------------------------------------------- 1 | package tracing 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/golang/mock/gomock" 7 | 8 | "github.com/argoproj/gitops-engine/pkg/utils/tracing/tracer_testing" 9 | ) 10 | 11 | func TestLoggingTracer(t *testing.T) { 12 | c := gomock.NewController(t) 13 | l := tracer_testing.NewMockLogger(c) 14 | gomock.InOrder( 15 | l.EXPECT().WithValues("my-key", "my-value").Return(l), 16 | l.EXPECT().WithValues("operation_name", "my-operation", "time_ms", gomock.Any()).Return(l), 17 | l.EXPECT().Info("Trace"), 18 | ) 19 | 20 | tr := NewLoggingTracer(l) 21 | 22 | span := tr.StartSpan("my-operation") 23 | span.SetBaggageItem("my-key", "my-value") 24 | span.Finish() 25 | } 26 | -------------------------------------------------------------------------------- /pkg/health/testdata/apiservice-v1-false.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiregistration.k8s.io/v1 2 | kind: APIService 3 | metadata: 4 | name: v1beta1.admission.cert-manager.io 5 | labels: 6 | app: webhook 7 | app.kubernetes.io/instance: external-dns 8 | spec: 9 | group: admission.cert-manager.io 10 | groupPriorityMinimum: 1000 11 | versionPriority: 15 12 | service: 13 | name: cert-manager-webhook 14 | namespace: external-dns 15 | version: v1beta1 16 | status: 17 | conditions: 18 | - lastTransitionTime: "2019-06-26T07:17:09Z" 19 | message: endpoints for service/cert-manager-webhook in "external-dns" have no 20 | addresses 21 | reason: MissingEndpoints 22 | status: "False" 23 | type: Available 24 | -------------------------------------------------------------------------------- /pkg/health/testdata/apiservice-v1beta1-false.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiregistration.k8s.io/v1beta1 2 | kind: APIService 3 | metadata: 4 | name: v1beta1.admission.cert-manager.io 5 | labels: 6 | app: webhook 7 | app.kubernetes.io/instance: external-dns 8 | spec: 9 | group: admission.cert-manager.io 10 | groupPriorityMinimum: 1000 11 | versionPriority: 15 12 | service: 13 | name: cert-manager-webhook 14 | namespace: external-dns 15 | version: v1beta1 16 | status: 17 | conditions: 18 | - lastTransitionTime: "2019-06-26T07:17:09Z" 19 | message: endpoints for service/cert-manager-webhook in "external-dns" have no 20 | addresses 21 | reason: MissingEndpoints 22 | status: "False" 23 | type: Available 24 | -------------------------------------------------------------------------------- /pkg/utils/testing/unstructured.go: -------------------------------------------------------------------------------- 1 | package testing 2 | 3 | import ( 4 | "encoding/json" 5 | "io/ioutil" 6 | "strings" 7 | 8 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 9 | "sigs.k8s.io/yaml" 10 | ) 11 | 12 | func UnstructuredFromFile(path string) *unstructured.Unstructured { 13 | file, err := ioutil.ReadFile(path) 14 | if err != nil { 15 | panic(err) 16 | } 17 | return Unstructured(string(file)) 18 | } 19 | 20 | func Unstructured(text string) *unstructured.Unstructured { 21 | un := &unstructured.Unstructured{} 22 | var err error 23 | if strings.HasPrefix(text, "{") { 24 | err = json.Unmarshal([]byte(text), &un) 25 | } else { 26 | err = yaml.Unmarshal([]byte(text), &un) 27 | } 28 | if err != nil { 29 | panic(err) 30 | } 31 | return un 32 | } 33 | -------------------------------------------------------------------------------- /pkg/health/testdata/ingress-unassigned.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | annotations: 5 | kubernetes.io/ingress.class: nginx 6 | nginx.ingress.kubernetes.io/force-ssl-redirect: "true" 7 | nginx.ingress.kubernetes.io/ssl-passthrough: "true" 8 | creationTimestamp: 2018-09-20T06:47:27Z 9 | generation: 9 10 | name: argocd-server-ingress 11 | namespace: argocd 12 | resourceVersion: "23207680" 13 | selfLink: /apis/extensions/v1beta1/namespaces/argocd/ingresses/argocd-server-ingress 14 | uid: 09927cae-bca1-11e8-bbd2-42010a8a00bb 15 | spec: 16 | rules: 17 | - host: example.argoproj.io 18 | http: 19 | paths: 20 | - backend: 21 | serviceName: argocd-server 22 | servicePort: https 23 | status: 24 | loadBalancer: {} 25 | -------------------------------------------------------------------------------- /docs/releasing.md: -------------------------------------------------------------------------------- 1 | # Releasing 2 | 3 | This document describes the `gitops-engine` library releasing process. 4 | 5 | # Versioning 6 | 7 | * The library is versioned using the [semantic versioning](http://semver.org/): new version will be backwards-compatible 8 | with earlier versions within a single major version. 9 | * The library has its own release cycle and not tied to Argo CD release cycle. 10 | * The first library release is v0.1.0. 11 | 12 | # Release process 13 | 14 | * Release branch is created for every minor release. 15 | * The branch name should use the following convention: `release-.`. For example all v0.1 releases should 16 | be in `release-0.1` branch. 17 | * Actual release is a git tag which uses the following naming convention: `v--`. For example: `v0.1.0`, `v0.1.1` etc. -------------------------------------------------------------------------------- /pkg/health/testdata/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | annotations: 5 | kubernetes.io/ingress.class: nginx 6 | nginx.ingress.kubernetes.io/force-ssl-redirect: "true" 7 | nginx.ingress.kubernetes.io/ssl-passthrough: "true" 8 | creationTimestamp: 2018-09-20T06:47:27Z 9 | generation: 9 10 | name: argocd-server-ingress 11 | namespace: argocd 12 | resourceVersion: "23207680" 13 | selfLink: /apis/extensions/v1beta1/namespaces/argocd/ingresses/argocd-server-ingress 14 | uid: 09927cae-bca1-11e8-bbd2-42010a8a00bb 15 | spec: 16 | rules: 17 | - host: example.argoproj.io 18 | http: 19 | paths: 20 | - backend: 21 | serviceName: argocd-server 22 | servicePort: https 23 | status: 24 | loadBalancer: 25 | ingress: 26 | - ip: 1.2.3.4 27 | -------------------------------------------------------------------------------- /pkg/sync/ignore/ignore_test.go: -------------------------------------------------------------------------------- 1 | package ignore 2 | 3 | import ( 4 | "testing" 5 | 6 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 7 | 8 | "github.com/argoproj/gitops-engine/pkg/sync/common" 9 | 10 | "github.com/stretchr/testify/assert" 11 | 12 | . "github.com/argoproj/gitops-engine/pkg/utils/testing" 13 | ) 14 | 15 | func newHook(obj *unstructured.Unstructured, hookType common.HookType) *unstructured.Unstructured { 16 | return Annotate(obj, "argocd.argoproj.io/hook", string(hookType)) 17 | } 18 | 19 | func TestIgnore(t *testing.T) { 20 | assert.False(t, Ignore(NewPod())) 21 | assert.False(t, Ignore(newHook(NewPod(), "Sync"))) 22 | assert.True(t, Ignore(newHook(NewPod(), "garbage"))) 23 | assert.False(t, Ignore(HelmHook(NewPod(), "pre-install"))) 24 | assert.True(t, Ignore(HelmHook(NewPod(), "garbage"))) 25 | } 26 | -------------------------------------------------------------------------------- /pkg/sync/resource/annotations.go: -------------------------------------------------------------------------------- 1 | package resource 2 | 3 | import ( 4 | "strings" 5 | 6 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 7 | ) 8 | 9 | func GetAnnotationCSVs(obj *unstructured.Unstructured, key string) []string { 10 | // may for de-duping 11 | valuesToBool := make(map[string]bool) 12 | for _, item := range strings.Split(obj.GetAnnotations()[key], ",") { 13 | val := strings.TrimSpace(item) 14 | if val != "" { 15 | valuesToBool[val] = true 16 | } 17 | } 18 | var values []string 19 | for val := range valuesToBool { 20 | values = append(values, val) 21 | } 22 | return values 23 | } 24 | 25 | func HasAnnotationOption(obj *unstructured.Unstructured, key, val string) bool { 26 | for _, item := range GetAnnotationCSVs(obj, key) { 27 | if item == val { 28 | return true 29 | } 30 | } 31 | return false 32 | } 33 | -------------------------------------------------------------------------------- /pkg/health/testdata/svc-clusterip.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | annotations: 5 | kubectl.kubernetes.io/last-applied-configuration: | 6 | {"apiVersion":"v1","kind":"Service","metadata":{"annotations":{},"name":"argocd-metrics","namespace":"argocd"},"spec":{"ports":[{"name":"http","port":8082,"protocol":"TCP","targetPort":8082}],"selector":{"app":"argocd-server"}}} 7 | creationTimestamp: 2018-10-27T06:36:27Z 8 | name: argocd-metrics 9 | namespace: argocd 10 | resourceVersion: "1131" 11 | selfLink: /api/v1/namespaces/argocd/services/argocd-metrics 12 | uid: a1f65069-d9b2-11e8-b3c1-9ae2f452bd03 13 | spec: 14 | clusterIP: 10.96.199.2 15 | ports: 16 | - name: http 17 | port: 8082 18 | protocol: TCP 19 | targetPort: 8082 20 | selector: 21 | app: argocd-server 22 | sessionAffinity: None 23 | type: ClusterIP 24 | status: 25 | loadBalancer: {} 26 | -------------------------------------------------------------------------------- /pkg/sync/sync_phase.go: -------------------------------------------------------------------------------- 1 | package sync 2 | 3 | import ( 4 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 5 | 6 | "github.com/argoproj/gitops-engine/pkg/sync/common" 7 | "github.com/argoproj/gitops-engine/pkg/sync/hook" 8 | ) 9 | 10 | func syncPhases(obj *unstructured.Unstructured) []common.SyncPhase { 11 | if hook.Skip(obj) { 12 | return nil 13 | } else if hook.IsHook(obj) { 14 | phasesMap := make(map[common.SyncPhase]bool) 15 | for _, hookType := range hook.Types(obj) { 16 | switch hookType { 17 | case common.HookTypePreSync, common.HookTypeSync, common.HookTypePostSync, common.HookTypeSyncFail: 18 | phasesMap[common.SyncPhase(hookType)] = true 19 | } 20 | } 21 | var phases []common.SyncPhase 22 | for phase := range phasesMap { 23 | phases = append(phases, phase) 24 | } 25 | return phases 26 | } else { 27 | return []common.SyncPhase{common.SyncPhaseSync} 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /pkg/sync/hook/delete_policy.go: -------------------------------------------------------------------------------- 1 | package hook 2 | 3 | import ( 4 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 5 | 6 | "github.com/argoproj/gitops-engine/pkg/sync/common" 7 | helmhook "github.com/argoproj/gitops-engine/pkg/sync/hook/helm" 8 | resourceutil "github.com/argoproj/gitops-engine/pkg/sync/resource" 9 | ) 10 | 11 | func DeletePolicies(obj *unstructured.Unstructured) []common.HookDeletePolicy { 12 | var policies []common.HookDeletePolicy 13 | for _, text := range resourceutil.GetAnnotationCSVs(obj, common.AnnotationKeyHookDeletePolicy) { 14 | p, ok := common.NewHookDeletePolicy(text) 15 | if ok { 16 | policies = append(policies, p) 17 | } 18 | } 19 | for _, p := range helmhook.DeletePolicies(obj) { 20 | policies = append(policies, p.DeletePolicy()) 21 | } 22 | if len(policies) == 0 { 23 | policies = append(policies, common.HookDeletePolicyBeforeHookCreation) 24 | } 25 | return policies 26 | } 27 | -------------------------------------------------------------------------------- /pkg/utils/kube/convert.go: -------------------------------------------------------------------------------- 1 | package kube 2 | 3 | import ( 4 | "github.com/argoproj/gitops-engine/pkg/utils/kube/scheme" 5 | 6 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 7 | "k8s.io/apimachinery/pkg/runtime" 8 | "k8s.io/apimachinery/pkg/runtime/schema" 9 | ) 10 | 11 | func convertToVersionWithScheme(obj *unstructured.Unstructured, group string, version string) (*unstructured.Unstructured, error) { 12 | s := scheme.Scheme 13 | object, err := s.ConvertToVersion(obj, runtime.InternalGroupVersioner) 14 | if err != nil { 15 | return nil, err 16 | } 17 | unmarshalledObj, err := s.ConvertToVersion(object, schema.GroupVersion{Group: group, Version: version}) 18 | if err != nil { 19 | return nil, err 20 | } 21 | unstrBody, err := runtime.DefaultUnstructuredConverter.ToUnstructured(unmarshalledObj) 22 | if err != nil { 23 | return nil, err 24 | } 25 | return &unstructured.Unstructured{Object: unstrBody}, nil 26 | } 27 | -------------------------------------------------------------------------------- /.github/workflows/ci.yaml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | push: 5 | branches: 6 | - 'master' 7 | - 'release-*' 8 | pull_request: 9 | branches: 10 | - 'master' 11 | jobs: 12 | test: 13 | runs-on: ubuntu-latest 14 | steps: 15 | - uses: actions/checkout@master 16 | - uses: actions/cache@v2 17 | with: 18 | path: ~/go/pkg/mod 19 | key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} 20 | restore-keys: | 21 | ${{ runner.os }}-go- 22 | - uses: actions/setup-go@v2.1.3 23 | with: 24 | go-version: '1.15.6' 25 | - run: make test 26 | - uses: actions-contrib/golangci-lint@v1 27 | with: 28 | args: run --timeout=5m 29 | env: 30 | GOROOT: "" 31 | - uses: codecov/codecov-action@v1.2.1 32 | with: 33 | token: ${{ secrets.CODECOV_TOKEN }} #required 34 | file: ./coverage.out 35 | -------------------------------------------------------------------------------- /pkg/diff/testdata/endpoints-config.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "v1", 3 | "kind": "Endpoints", 4 | "metadata": { 5 | "annotations": { 6 | "description": "A workaround to support a set of backend IPs for solr", 7 | "linkerd.io/inject": "disabled" 8 | }, 9 | "labels": { 10 | "app.kubernetes.io/instance": "guestbook" 11 | }, 12 | "name": "solrcloud", 13 | "namespace": "default" 14 | }, 15 | "subsets": [ 16 | { 17 | "addresses": [ 18 | { 19 | "ip": "172.20.10.97" 20 | }, 21 | { 22 | "ip": "172.20.10.98" 23 | }, 24 | { 25 | "ip": "172.20.10.99" 26 | }, 27 | { 28 | "ip": "172.20.10.100" 29 | }, 30 | { 31 | "ip": "172.20.10.101" 32 | } 33 | ], 34 | "ports": [ 35 | { 36 | "name": "solr-http", 37 | "port": 8080 38 | } 39 | ] 40 | } 41 | ] 42 | } -------------------------------------------------------------------------------- /pkg/diff/testdata/grafana-clusterrole-live.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "rbac.authorization.k8s.io/v1", 3 | "kind": "ClusterRole", 4 | "metadata": { 5 | "annotations": { 6 | "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"rbac.authorization.k8s.io/v1\",\"kind\":\"ClusterRole\",\"metadata\":{\"annotations\":{},\"labels\":{\"app\":\"grafana\",\"chart\":\"grafana-1.21.2\",\"heritage\":\"Tiller\",\"release\":\"grafana\"},\"name\":\"grafana-clusterrole\"},\"rules\":[]}\n" 7 | }, 8 | "creationTimestamp": "2018-12-26T23:26:41Z", 9 | "labels": { 10 | "app": "grafana", 11 | "chart": "grafana-1.21.2", 12 | "heritage": "Tiller", 13 | "release": "grafana" 14 | }, 15 | "name": "grafana-clusterrole", 16 | "resourceVersion": "13174", 17 | "selfLink": "/apis/rbac.authorization.k8s.io/v1/clusterroles/grafana-clusterrole", 18 | "uid": "b30316d3-0965-11e9-9673-ae0a6e5594a2" 19 | }, 20 | "rules": null 21 | } -------------------------------------------------------------------------------- /pkg/health/testdata/svc-loadbalancer-nonemptylist.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | annotations: 5 | service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: "600" 6 | creationTimestamp: 2018-06-05T23:34:58Z 7 | labels: 8 | app.kubernetes.io/instance: argocd-cdp 9 | name: argocd-server 10 | namespace: argocd 11 | resourceVersion: "32559487" 12 | selfLink: /api/v1/namespaces/argocd/services/argocd-server 13 | uid: 0f5885a9-6919-11e8-ad29-020124679688 14 | spec: 15 | clusterIP: 100.69.46.185 16 | externalTrafficPolicy: Cluster 17 | ports: 18 | - name: http 19 | nodePort: 30354 20 | port: 80 21 | protocol: TCP 22 | targetPort: 8080 23 | - name: https 24 | nodePort: 31866 25 | port: 443 26 | protocol: TCP 27 | targetPort: 8080 28 | selector: 29 | app: argocd-server 30 | sessionAffinity: None 31 | type: LoadBalancer 32 | status: 33 | loadBalancer: 34 | ingress: 35 | - {} 36 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # docker image publishing options 2 | DOCKER_PUSH?=false 3 | IMAGE_NAMESPACE?= 4 | IMAGE_TAG?=latest 5 | ifdef IMAGE_NAMESPACE 6 | IMAGE_PREFIX=${IMAGE_NAMESPACE}/ 7 | endif 8 | 9 | .PHONY: generate 10 | generate: agent-manifests 11 | 12 | .PHONY: test 13 | test: 14 | go test -race ./... -coverprofile=coverage.out 15 | 16 | .PHONY: lint 17 | lint: 18 | golangci-lint run 19 | 20 | .PHONY: agent-image 21 | agent-image: 22 | docker build -t $(IMAGE_PREFIX)gitops-agent . -f Dockerfile 23 | @if [ "$(DOCKER_PUSH)" = "true" ] ; then docker push $(IMAGE_PREFIX)gitops-agent:$(IMAGE_TAG) ; fi 24 | 25 | .PHONY: agent-manifests 26 | agent-manifests: 27 | kustomize build ./agent/manifests/cluster-install > ./agent/manifests/install.yaml 28 | kustomize build ./agent/manifests/namespace-install > ./agent/manifests/install-namespaced.yaml 29 | 30 | .PHONY: generate-mocks 31 | generate-mocks: 32 | go generate -x -v "github.com/argoproj/gitops-engine/pkg/utils/tracing/tracer_testing" 33 | -------------------------------------------------------------------------------- /pkg/health/testdata/svc-loadbalancer.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | annotations: 5 | service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: "600" 6 | creationTimestamp: 2018-06-05T23:34:58Z 7 | labels: 8 | app.kubernetes.io/instance: argocd-cdp 9 | name: argocd-server 10 | namespace: argocd 11 | resourceVersion: "32559487" 12 | selfLink: /api/v1/namespaces/argocd/services/argocd-server 13 | uid: 0f5885a9-6919-11e8-ad29-020124679688 14 | spec: 15 | clusterIP: 100.69.46.185 16 | externalTrafficPolicy: Cluster 17 | ports: 18 | - name: http 19 | nodePort: 30354 20 | port: 80 21 | protocol: TCP 22 | targetPort: 8080 23 | - name: https 24 | nodePort: 31866 25 | port: 443 26 | protocol: TCP 27 | targetPort: 8080 28 | selector: 29 | app: argocd-server 30 | sessionAffinity: None 31 | type: LoadBalancer 32 | status: 33 | loadBalancer: 34 | ingress: 35 | - hostname: abc123.us-west-2.elb.amazonaws.com 36 | -------------------------------------------------------------------------------- /pkg/diff/testdata/sealedsecret-config.json: -------------------------------------------------------------------------------- 1 | { 2 | "kind": "SealedSecret", 3 | "apiVersion": "bitnami.com/v1alpha1", 4 | "metadata": { 5 | "name": "mysecret", 6 | "namespace": "default", 7 | "creationTimestamp": null 8 | }, 9 | "spec": { 10 | "encryptedData": { 11 | "foo": "AgCW7b6fmehUc6MmdTfbAEpOx9zcYuXZGKID6llLUqLnXiLHfeF8yPY6wpecv+/GETeu+neCHZbonSRGWsK723rqD2S9GYUT6WT1TJW1+dYEJQrCYg3AYw9Awn7Tli5854oGfvKH2cfu++Yjq7EW7SfEA3E+12uAYM+ennQeib4HNmjxJUf3dVAjvYBrpyl/zMflla5Mi1mkBSLgqzlUitEyCNx63lFCdRw03BZAMOYuMVVRFDP5iobQFBl/NV9f04eMV1B8aTVvGoU81eX2FDZ6b+/DQx4TVdGuZTgViPcThiih4ahB2/9Qyk5OZlubmQeM7qMn8uo53kZy/3sOhqikr5TNGz52BJCoRFhw3HYw4Qm0NTxC/lYnuxnkHq8eXj1KMYVR3YrxnLy3sMH07I3OGsczwabUFvGyOp8SOOOICJ1Np7DmQSW/6U3bHey3sILAJYrZkeU8hjPZ1OU+7Ydr9En5sr048xuvfNblQCfhNGfAePNI0gD/Y6+ubmHM6s/vK4c3kDDVV2nY9suE84T6Zoxd0zEZXftay6+GaFz4lb3qKdFTxTMegzU5RxGxFm0bF75Y7EIWjulhNnC2PzQ6EK6sH5R6HwfZ5pbE3MUMYF4Ww3v3oo3z15EJ7l86//bIiKCQcuM7tFNsJYlubRAEhpcZFunUSKW8eqodjxstYnCwqNxHFKr8aybtsUouC93ZZ3M=" 12 | } 13 | } 14 | } -------------------------------------------------------------------------------- /pkg/health/testdata/hpa-v1-healthy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: autoscaling/v1 2 | kind: HorizontalPodAutoscaler 3 | metadata: 4 | annotations: 5 | autoscaling.alpha.kubernetes.io/conditions: '[{"type":"AbleToScale","status":"True","lastTransitionTime":"2020-11-23T19:38:38Z","reason":"SucceededRescale","message":"the HPA controller was able to update the target scale to 1"},{"type":"ScalingActive","status":"False","lastTransitionTime":"2020-11-23T19:38:38Z","reason":"FailedGetResourceMetric","message":"the 6 | HPA was unable to compute the replica count: unable to get metrics for resource 7 | cpu: unable to fetch metrics from resource metrics API: the server is currently 8 | unable to handle the request (get pods.metrics.k8s.io)"}]' 9 | name: sample 10 | namespace: argocd 11 | spec: 12 | maxReplicas: 2 13 | minReplicas: 1 14 | scaleTargetRef: 15 | apiVersion: apps/v1 16 | kind: Deployment 17 | name: sample 18 | targetCPUUtilizationPercentage: 2 19 | status: 20 | currentReplicas: 1 21 | desiredReplicas: 1 -------------------------------------------------------------------------------- /pkg/health/testdata/hpa-v1-degraded.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: autoscaling/v1 2 | kind: HorizontalPodAutoscaler 3 | metadata: 4 | annotations: 5 | autoscaling.alpha.kubernetes.io/conditions: '[{"type":"AbleToScale","status":"True","lastTransitionTime":"2020-11-23T19:38:38Z","reason":"FailedGetScale","message":"the HPA controller was unable to get the target''s current scale"},{"type":"ScalingActive","status":"False","lastTransitionTime":"2020-11-23T19:38:38Z","reason":"FailedGetResourceMetric","message":"the 6 | HPA was unable to compute the replica count: unable to get metrics for resource 7 | cpu: unable to fetch metrics from resource metrics API: the server is currently 8 | unable to handle the request (get pods.metrics.k8s.io)"}]' 9 | name: sample 10 | namespace: argocd 11 | spec: 12 | maxReplicas: 1 13 | minReplicas: 1 14 | scaleTargetRef: 15 | apiVersion: apps/v1 16 | kind: Deployment 17 | name: sample 18 | targetCPUUtilizationPercentage: 2 19 | status: 20 | currentReplicas: 1 21 | desiredReplicas: 0 -------------------------------------------------------------------------------- /pkg/health/testdata/pvc-pending.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | annotations: 5 | kubectl.kubernetes.io/last-applied-configuration: | 6 | {"apiVersion":"v1","kind":"PersistentVolumeClaim","metadata":{"annotations":{},"labels":{"app.kubernetes.io/instance":"working-pvc"},"name":"testpvc-2","namespace":"argocd"},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"2Gi"}},"storageClassName":"slow"}} 7 | volume.beta.kubernetes.io/storage-provisioner: kubernetes.io/aws-ebs 8 | creationTimestamp: 2018-08-27T23:00:54Z 9 | finalizers: 10 | - kubernetes.io/pvc-protection 11 | labels: 12 | app.kubernetes.io/instance: working-pvc 13 | name: testpvc-2 14 | namespace: argocd 15 | resourceVersion: "323141" 16 | selfLink: /api/v1/namespaces/argocd/persistentvolumeclaims/testpvc-2 17 | uid: 0cedfc44-aa4d-11e8-a271-025000000001 18 | spec: 19 | accessModes: 20 | - ReadWriteOnce 21 | resources: 22 | requests: 23 | storage: 2Gi 24 | storageClassName: slow 25 | status: 26 | phase: Pending 27 | -------------------------------------------------------------------------------- /pkg/sync/hook/helm/delete_policy_test.go: -------------------------------------------------------------------------------- 1 | package helm 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | 8 | "github.com/argoproj/gitops-engine/pkg/sync/common" 9 | . "github.com/argoproj/gitops-engine/pkg/utils/testing" 10 | ) 11 | 12 | func TestDeletePolicies(t *testing.T) { 13 | assert.Nil(t, DeletePolicies(NewPod())) 14 | assert.Equal(t, []DeletePolicy{BeforeHookCreation}, DeletePolicies(Annotate(NewPod(), "helm.sh/hook-delete-policy", "before-hook-creation"))) 15 | assert.Equal(t, []DeletePolicy{HookSucceeded}, DeletePolicies(Annotate(NewPod(), "helm.sh/hook-delete-policy", "hook-succeeded"))) 16 | assert.Equal(t, []DeletePolicy{HookFailed}, DeletePolicies(Annotate(NewPod(), "helm.sh/hook-delete-policy", "hook-failed"))) 17 | } 18 | 19 | func TestDeletePolicy_DeletePolicy(t *testing.T) { 20 | assert.Equal(t, common.HookDeletePolicyBeforeHookCreation, BeforeHookCreation.DeletePolicy()) 21 | assert.Equal(t, common.HookDeletePolicyHookSucceeded, HookSucceeded.DeletePolicy()) 22 | assert.Equal(t, common.HookDeletePolicyHookFailed, HookFailed.DeletePolicy()) 23 | } 24 | -------------------------------------------------------------------------------- /pkg/diff/diff_options.go: -------------------------------------------------------------------------------- 1 | package diff 2 | 3 | import ( 4 | "github.com/go-logr/logr" 5 | "k8s.io/klog/v2/klogr" 6 | ) 7 | 8 | type Option func(*options) 9 | 10 | // Holds diffing settings 11 | type options struct { 12 | // If set to true then differences caused by aggregated roles in RBAC resources are ignored. 13 | ignoreAggregatedRoles bool 14 | normalizer Normalizer 15 | log logr.Logger 16 | } 17 | 18 | func applyOptions(opts []Option) options { 19 | o := options{ 20 | ignoreAggregatedRoles: false, 21 | normalizer: GetNoopNormalizer(), 22 | log: klogr.New(), 23 | } 24 | for _, opt := range opts { 25 | opt(&o) 26 | } 27 | return o 28 | } 29 | 30 | func IgnoreAggregatedRoles(ignore bool) Option { 31 | return func(o *options) { 32 | o.ignoreAggregatedRoles = ignore 33 | } 34 | } 35 | 36 | func WithNormalizer(normalizer Normalizer) Option { 37 | return func(o *options) { 38 | o.normalizer = normalizer 39 | } 40 | } 41 | 42 | func WithLogr(log logr.Logger) Option { 43 | return func(o *options) { 44 | o.log = log 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /pkg/diff/testdata/wordpress-live.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "v1", 3 | "data": { 4 | "smtp-password": null, 5 | "wordpress-password": "Skt2T0tjMk5PdQ==" 6 | }, 7 | "kind": "Secret", 8 | "metadata": { 9 | "annotations": { 10 | "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"data\":{\"smtp-password\":\"\",\"wordpress-password\":\"Xkt2T0tjMk5PdQ==\"},\"kind\":\"Secret\",\"metadata\":{\"annotations\":{},\"labels\":{\"app\":\"wordpress-wordpress\",\"chart\":\"wordpress-5.0.1\",\"heritage\":\"Tiller\",\"release\":\"wordpress\"},\"name\":\"wordpress-wordpress\",\"namespace\":\"argocd\"},\"type\":\"Opaque\"}\n" 11 | }, 12 | "creationTimestamp": "2018-12-19T09:15:40Z", 13 | "labels": { 14 | "app": "wordpress-wordpress", 15 | "chart": "wordpress-5.0.1", 16 | "heritage": "Tiller", 17 | "release": "wordpress" 18 | }, 19 | "name": "wordpress-wordpress", 20 | "namespace": "argocd", 21 | "resourceVersion": "27442", 22 | "selfLink": "/api/v1/namespaces/argocd/secrets/wordpress-wordpress", 23 | "uid": "a782f882-036e-11e9-92c4-ba8ba592c12d" 24 | }, 25 | "type": "Opaque" 26 | } -------------------------------------------------------------------------------- /pkg/diff/testdata/deployment-config.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "apps/v1", 3 | "kind": "Deployment", 4 | "metadata": { 5 | "labels": { 6 | "app.kubernetes.io/instance": "guestbook" 7 | }, 8 | "name": "guestbook-ui", 9 | "namespace": "default" 10 | }, 11 | "spec": { 12 | "replicas": 1, 13 | "revisionHistoryLimit": 3, 14 | "selector": { 15 | "matchLabels": { 16 | "app": "guestbook-ui" 17 | } 18 | }, 19 | "strategy": { 20 | "type": "RollingUpdate" 21 | }, 22 | "template": { 23 | "metadata": { 24 | "labels": { 25 | "app": "guestbook-ui" 26 | } 27 | }, 28 | "spec": { 29 | "containers": [ 30 | { 31 | "image": "gcr.io/heptio-images/ks-guestbook-demo:0.2", 32 | "name": "guestbook-ui", 33 | "ports": [ 34 | { 35 | "containerPort": 80 36 | } 37 | ], 38 | "env": [ 39 | { 40 | "name": "VAR1", 41 | "value": "something" 42 | } 43 | ] 44 | } 45 | ] 46 | } 47 | } 48 | } 49 | } -------------------------------------------------------------------------------- /pkg/sync/hook/hook.go: -------------------------------------------------------------------------------- 1 | package hook 2 | 3 | import ( 4 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 5 | 6 | "github.com/argoproj/gitops-engine/pkg/sync/common" 7 | helmhook "github.com/argoproj/gitops-engine/pkg/sync/hook/helm" 8 | resourceutil "github.com/argoproj/gitops-engine/pkg/sync/resource" 9 | ) 10 | 11 | func IsHook(obj *unstructured.Unstructured) bool { 12 | _, ok := obj.GetAnnotations()[common.AnnotationKeyHook] 13 | if ok { 14 | return !Skip(obj) 15 | } 16 | return helmhook.IsHook(obj) 17 | } 18 | 19 | func Skip(obj *unstructured.Unstructured) bool { 20 | for _, hookType := range Types(obj) { 21 | if hookType == common.HookTypeSkip { 22 | return len(Types(obj)) == 1 23 | } 24 | } 25 | return false 26 | } 27 | 28 | func Types(obj *unstructured.Unstructured) []common.HookType { 29 | var types []common.HookType 30 | for _, text := range resourceutil.GetAnnotationCSVs(obj, common.AnnotationKeyHook) { 31 | t, ok := common.NewHookType(text) 32 | if ok { 33 | types = append(types, t) 34 | } 35 | } 36 | // we ignore Helm hooks if we have Argo hook 37 | if len(types) == 0 { 38 | for _, t := range helmhook.Types(obj) { 39 | types = append(types, t.HookType()) 40 | } 41 | } 42 | return types 43 | } 44 | -------------------------------------------------------------------------------- /pkg/diff/testdata/spinnaker-sa-live.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "v1", 3 | "kind": "ServiceAccount", 4 | "metadata": { 5 | "annotations": { 6 | "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"kind\":\"ServiceAccount\",\"metadata\":{\"annotations\":{},\"labels\":{\"app\":\"spinnaker-spinnaker\",\"app.kubernetes.io/instance\":\"spinnaker\",\"chart\":\"spinnaker-1.1.3\",\"heritage\":\"Tiller\",\"release\":\"spinnaker\"},\"name\":\"spinnaker-spinnaker-halyard\",\"namespace\":\"spinnaker\"}}\n" 7 | }, 8 | "creationTimestamp": "2018-09-20T20:49:20Z", 9 | "labels": { 10 | "app": "spinnaker-spinnaker", 11 | "app.kubernetes.io/instance": "spinnaker", 12 | "chart": "spinnaker-1.1.3", 13 | "heritage": "Tiller", 14 | "release": "spinnaker" 15 | }, 16 | "name": "spinnaker-spinnaker-halyard", 17 | "namespace": "spinnaker", 18 | "resourceVersion": "12102423", 19 | "selfLink": "/api/v1/namespaces/spinnaker/serviceaccounts/spinnaker-spinnaker-halyard", 20 | "uid": "a5a9401b-bd16-11e8-bbd2-42010a8a00bb" 21 | }, 22 | "secrets": [ 23 | { 24 | "name": "spinnaker-spinnaker-halyard-token-7m6xs" 25 | } 26 | ] 27 | } -------------------------------------------------------------------------------- /pkg/health/health_service.go: -------------------------------------------------------------------------------- 1 | package health 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/argoproj/gitops-engine/pkg/utils/kube" 7 | corev1 "k8s.io/api/core/v1" 8 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 9 | "k8s.io/apimachinery/pkg/runtime" 10 | ) 11 | 12 | func getServiceHealth(obj *unstructured.Unstructured) (*HealthStatus, error) { 13 | gvk := obj.GroupVersionKind() 14 | switch gvk { 15 | case corev1.SchemeGroupVersion.WithKind(kube.ServiceKind): 16 | var service corev1.Service 17 | err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &service) 18 | if err != nil { 19 | return nil, fmt.Errorf("failed to convert unstructured Service to typed: %v", err) 20 | } 21 | return getCorev1ServiceHealth(&service) 22 | default: 23 | return nil, fmt.Errorf("unsupported Service GVK: %s", gvk) 24 | } 25 | } 26 | 27 | func getCorev1ServiceHealth(service *corev1.Service) (*HealthStatus, error) { 28 | health := HealthStatus{Status: HealthStatusHealthy} 29 | if service.Spec.Type == corev1.ServiceTypeLoadBalancer { 30 | if len(service.Status.LoadBalancer.Ingress) > 0 { 31 | health.Status = HealthStatusHealthy 32 | } else { 33 | health.Status = HealthStatusProgressing 34 | } 35 | } 36 | return &health, nil 37 | } 38 | -------------------------------------------------------------------------------- /pkg/sync/hook/helm/type.go: -------------------------------------------------------------------------------- 1 | package helm 2 | 3 | import ( 4 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 5 | 6 | "github.com/argoproj/gitops-engine/pkg/sync/common" 7 | resourceutil "github.com/argoproj/gitops-engine/pkg/sync/resource" 8 | ) 9 | 10 | type Type string 11 | 12 | const ( 13 | PreInstall Type = "pre-install" 14 | PreUpgrade Type = "pre-upgrade" 15 | PostUpgrade Type = "post-upgrade" 16 | PostInstall Type = "post-install" 17 | ) 18 | 19 | func NewType(t string) (Type, bool) { 20 | return Type(t), 21 | t == string(PreInstall) || 22 | t == string(PreUpgrade) || 23 | t == string(PostUpgrade) || 24 | t == string(PostInstall) 25 | } 26 | 27 | var hookTypes = map[Type]common.HookType{ 28 | PreInstall: common.HookTypePreSync, 29 | PreUpgrade: common.HookTypePreSync, 30 | PostUpgrade: common.HookTypePostSync, 31 | PostInstall: common.HookTypePostSync, 32 | } 33 | 34 | func (t Type) HookType() common.HookType { 35 | return hookTypes[t] 36 | } 37 | 38 | func Types(obj *unstructured.Unstructured) []Type { 39 | var types []Type 40 | for _, text := range resourceutil.GetAnnotationCSVs(obj, "helm.sh/hook") { 41 | t, ok := NewType(text) 42 | if ok { 43 | types = append(types, t) 44 | } 45 | } 46 | return types 47 | } 48 | -------------------------------------------------------------------------------- /pkg/utils/kube/scheme/scheme.go: -------------------------------------------------------------------------------- 1 | package scheme 2 | 3 | import ( 4 | "k8s.io/kubernetes/pkg/api/legacyscheme" 5 | 6 | _ "k8s.io/kubernetes/pkg/apis/admission/install" 7 | _ "k8s.io/kubernetes/pkg/apis/admissionregistration/install" 8 | _ "k8s.io/kubernetes/pkg/apis/apps/install" 9 | _ "k8s.io/kubernetes/pkg/apis/authentication/install" 10 | _ "k8s.io/kubernetes/pkg/apis/authorization/install" 11 | _ "k8s.io/kubernetes/pkg/apis/autoscaling/install" 12 | _ "k8s.io/kubernetes/pkg/apis/batch/install" 13 | _ "k8s.io/kubernetes/pkg/apis/certificates/install" 14 | _ "k8s.io/kubernetes/pkg/apis/coordination/install" 15 | _ "k8s.io/kubernetes/pkg/apis/core/install" 16 | _ "k8s.io/kubernetes/pkg/apis/discovery/install" 17 | _ "k8s.io/kubernetes/pkg/apis/events/install" 18 | _ "k8s.io/kubernetes/pkg/apis/extensions/install" 19 | _ "k8s.io/kubernetes/pkg/apis/flowcontrol/install" 20 | _ "k8s.io/kubernetes/pkg/apis/imagepolicy/install" 21 | _ "k8s.io/kubernetes/pkg/apis/networking/install" 22 | _ "k8s.io/kubernetes/pkg/apis/node/install" 23 | _ "k8s.io/kubernetes/pkg/apis/policy/install" 24 | _ "k8s.io/kubernetes/pkg/apis/rbac/install" 25 | _ "k8s.io/kubernetes/pkg/apis/scheduling/install" 26 | _ "k8s.io/kubernetes/pkg/apis/storage/install" 27 | ) 28 | 29 | var Scheme = legacyscheme.Scheme 30 | -------------------------------------------------------------------------------- /agent/manifests/base/gitops-agent-deploy.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: gitops-agent 6 | spec: 7 | strategy: 8 | type: Recreate 9 | selector: 10 | matchLabels: 11 | app.kubernetes.io/name: gitops-agent 12 | template: 13 | metadata: 14 | labels: 15 | app.kubernetes.io/name: gitops-agent 16 | spec: 17 | containers: 18 | - name: gitops-agent 19 | command: 20 | - gitops 21 | - /tmp/git/repo 22 | - --path 23 | - guestbook 24 | image: argoproj/gitops-agent:latest 25 | volumeMounts: 26 | - name: git 27 | mountPath: /tmp/git 28 | 29 | - name: git-sync 30 | env: 31 | - name: GIT_SYNC_REPO 32 | value: https://github.com/argoproj/argocd-example-apps 33 | args: 34 | - --webhook-url 35 | - http://localhost:9001/api/v1/sync 36 | - --dest 37 | - repo 38 | image: k8s.gcr.io/git-sync:v3.1.6 39 | volumeMounts: 40 | - name: git 41 | mountPath: /tmp/git 42 | 43 | serviceAccountName: gitops-agent 44 | 45 | volumes: 46 | - emptyDir: {} 47 | name: git -------------------------------------------------------------------------------- /pkg/engine/engine_options.go: -------------------------------------------------------------------------------- 1 | package engine 2 | 3 | import ( 4 | "github.com/go-logr/logr" 5 | "k8s.io/klog/v2/klogr" 6 | 7 | "github.com/argoproj/gitops-engine/pkg/utils/kube" 8 | "github.com/argoproj/gitops-engine/pkg/utils/tracing" 9 | ) 10 | 11 | type Option func(*options) 12 | 13 | type options struct { 14 | log logr.Logger 15 | kubectl kube.Kubectl 16 | } 17 | 18 | func applyOptions(opts []Option) options { 19 | log := klogr.New() 20 | o := options{ 21 | log: log, 22 | kubectl: &kube.KubectlCmd{ 23 | Log: log, 24 | Tracer: tracing.NopTracer{}, 25 | }, 26 | } 27 | for _, opt := range opts { 28 | opt(&o) 29 | } 30 | return o 31 | } 32 | 33 | func WithLogr(log logr.Logger) Option { 34 | return func(o *options) { 35 | o.log = log 36 | if kcmd, ok := o.kubectl.(*kube.KubectlCmd); ok { 37 | kcmd.Log = log 38 | } 39 | } 40 | } 41 | 42 | // SetTracer sets the tracer to use. 43 | func SetTracer(tracer tracing.Tracer) Option { 44 | return func(o *options) { 45 | if kcmd, ok := o.kubectl.(*kube.KubectlCmd); ok { 46 | kcmd.Tracer = tracer 47 | } 48 | } 49 | } 50 | 51 | // WithKubectl allows to override kubectl wrapper implementation. 52 | func WithKubectl(kubectl kube.Kubectl) Option { 53 | return func(o *options) { 54 | o.kubectl = kubectl 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /pkg/sync/hook/delete_policy_test.go: -------------------------------------------------------------------------------- 1 | package hook 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | 8 | "github.com/argoproj/gitops-engine/pkg/sync/common" 9 | . "github.com/argoproj/gitops-engine/pkg/utils/testing" 10 | ) 11 | 12 | func TestDeletePolicies(t *testing.T) { 13 | assert.Equal(t, []common.HookDeletePolicy{common.HookDeletePolicyBeforeHookCreation}, DeletePolicies(NewPod())) 14 | assert.Equal(t, []common.HookDeletePolicy{common.HookDeletePolicyBeforeHookCreation}, DeletePolicies(Annotate(NewPod(), "argocd.argoproj.io/hook-delete-policy", "garbage"))) 15 | assert.Equal(t, []common.HookDeletePolicy{common.HookDeletePolicyBeforeHookCreation}, DeletePolicies(Annotate(NewPod(), "argocd.argoproj.io/hook-delete-policy", "BeforeHookCreation"))) 16 | assert.Equal(t, []common.HookDeletePolicy{common.HookDeletePolicyHookSucceeded}, DeletePolicies(Annotate(NewPod(), "argocd.argoproj.io/hook-delete-policy", "HookSucceeded"))) 17 | assert.Equal(t, []common.HookDeletePolicy{common.HookDeletePolicyHookFailed}, DeletePolicies(Annotate(NewPod(), "argocd.argoproj.io/hook-delete-policy", "HookFailed"))) 18 | // Helm test 19 | assert.Equal(t, []common.HookDeletePolicy{common.HookDeletePolicyHookSucceeded}, DeletePolicies(Annotate(NewPod(), "helm.sh/hook-delete-policy", "hook-succeeded"))) 20 | } 21 | -------------------------------------------------------------------------------- /pkg/health/health_pvc.go: -------------------------------------------------------------------------------- 1 | package health 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/argoproj/gitops-engine/pkg/utils/kube" 7 | corev1 "k8s.io/api/core/v1" 8 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 9 | "k8s.io/apimachinery/pkg/runtime" 10 | ) 11 | 12 | func getPVCHealth(obj *unstructured.Unstructured) (*HealthStatus, error) { 13 | gvk := obj.GroupVersionKind() 14 | switch gvk { 15 | case corev1.SchemeGroupVersion.WithKind(kube.PersistentVolumeClaimKind): 16 | var pvc corev1.PersistentVolumeClaim 17 | err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &pvc) 18 | if err != nil { 19 | return nil, fmt.Errorf("failed to convert unstructured PersistentVolumeClaim to typed: %v", err) 20 | } 21 | return getCorev1PVCHealth(&pvc) 22 | default: 23 | return nil, fmt.Errorf("unsupported PersistentVolumeClaim GVK: %s", gvk) 24 | } 25 | } 26 | 27 | func getCorev1PVCHealth(pvc *corev1.PersistentVolumeClaim) (*HealthStatus, error) { 28 | var status HealthStatusCode 29 | switch pvc.Status.Phase { 30 | case corev1.ClaimLost: 31 | status = HealthStatusDegraded 32 | case corev1.ClaimPending: 33 | status = HealthStatusProgressing 34 | case corev1.ClaimBound: 35 | status = HealthStatusHealthy 36 | default: 37 | status = HealthStatusUnknown 38 | } 39 | return &HealthStatus{Status: status}, nil 40 | } 41 | -------------------------------------------------------------------------------- /pkg/sync/resource/annotations_test.go: -------------------------------------------------------------------------------- 1 | package resource 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 8 | 9 | . "github.com/argoproj/gitops-engine/pkg/utils/testing" 10 | ) 11 | 12 | func TestHasAnnotationOption(t *testing.T) { 13 | type args struct { 14 | obj *unstructured.Unstructured 15 | key string 16 | val string 17 | } 18 | tests := []struct { 19 | name string 20 | args args 21 | wantVals []string 22 | want bool 23 | }{ 24 | {"Nil", args{NewPod(), "foo", "bar"}, nil, false}, 25 | {"Empty", args{example(""), "foo", "bar"}, nil, false}, 26 | {"Single", args{example("bar"), "foo", "bar"}, []string{"bar"}, true}, 27 | {"DeDup", args{example("bar,bar"), "foo", "bar"}, []string{"bar"}, true}, 28 | {"Double", args{example("bar,baz"), "foo", "baz"}, []string{"bar", "baz"}, true}, 29 | {"Spaces", args{example("bar "), "foo", "bar"}, []string{"bar"}, true}, 30 | } 31 | for _, tt := range tests { 32 | t.Run(tt.name, func(t *testing.T) { 33 | assert.ElementsMatch(t, tt.wantVals, GetAnnotationCSVs(tt.args.obj, tt.args.key)) 34 | assert.Equal(t, tt.want, HasAnnotationOption(tt.args.obj, tt.args.key, tt.args.val)) 35 | }) 36 | } 37 | } 38 | 39 | func example(val string) *unstructured.Unstructured { 40 | return Annotate(NewPod(), "foo", val) 41 | } 42 | -------------------------------------------------------------------------------- /pkg/health/testdata/job-running.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | creationTimestamp: 2018-12-02T08:19:13Z 5 | labels: 6 | controller-uid: f3fe3a46-f60a-11e8-aa53-42010a80021b 7 | job-name: succeed 8 | name: succeed 9 | namespace: argoci-workflows 10 | resourceVersion: "46535911" 11 | selfLink: /apis/batch/v1/namespaces/argoci-workflows/jobs/succeed 12 | uid: f3fe3a46-f60a-11e8-aa53-42010a80021b 13 | spec: 14 | backoffLimit: 0 15 | completions: 1 16 | parallelism: 1 17 | selector: 18 | matchLabels: 19 | controller-uid: f3fe3a46-f60a-11e8-aa53-42010a80021b 20 | template: 21 | metadata: 22 | creationTimestamp: null 23 | labels: 24 | controller-uid: f3fe3a46-f60a-11e8-aa53-42010a80021b 25 | job-name: succeed 26 | spec: 27 | containers: 28 | - command: 29 | - sh 30 | - -c 31 | - sleep 10 32 | image: alpine:latest 33 | imagePullPolicy: Always 34 | name: succeed 35 | resources: {} 36 | terminationMessagePath: /dev/termination-log 37 | terminationMessagePolicy: File 38 | dnsPolicy: ClusterFirst 39 | restartPolicy: Never 40 | schedulerName: default-scheduler 41 | securityContext: {} 42 | terminationGracePeriodSeconds: 30 43 | status: 44 | active: 1 45 | startTime: 2018-12-02T08:19:14Z 46 | -------------------------------------------------------------------------------- /pkg/utils/tracing/logging.go: -------------------------------------------------------------------------------- 1 | package tracing 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/go-logr/logr" 7 | ) 8 | 9 | var ( 10 | _ Tracer = LoggingTracer{} 11 | _ Span = loggingSpan{} 12 | ) 13 | 14 | type LoggingTracer struct { 15 | logger logr.Logger 16 | } 17 | 18 | func NewLoggingTracer(logger logr.Logger) *LoggingTracer { 19 | return &LoggingTracer{ 20 | logger: logger, 21 | } 22 | } 23 | 24 | func (l LoggingTracer) StartSpan(operationName string) Span { 25 | return loggingSpan{ 26 | logger: l.logger, 27 | operationName: operationName, 28 | baggage: make(map[string]interface{}), 29 | start: time.Now(), 30 | } 31 | } 32 | 33 | type loggingSpan struct { 34 | logger logr.Logger 35 | operationName string 36 | baggage map[string]interface{} 37 | start time.Time 38 | } 39 | 40 | func (s loggingSpan) Finish() { 41 | s.logger.WithValues(baggageToVals(s.baggage)...). 42 | WithValues("operation_name", s.operationName, "time_ms", time.Since(s.start).Seconds()*1e3). 43 | Info("Trace") 44 | } 45 | 46 | func (s loggingSpan) SetBaggageItem(key string, value interface{}) { 47 | s.baggage[key] = value 48 | } 49 | 50 | func baggageToVals(baggage map[string]interface{}) []interface{} { 51 | result := make([]interface{}, 0, len(baggage)*2) 52 | for k, v := range baggage { 53 | result = append(result, k, v) 54 | } 55 | return result 56 | } 57 | -------------------------------------------------------------------------------- /pkg/diff/testdata/mutatingwebhookconfig-config.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "admissionregistration.k8s.io/v1beta1", 3 | "kind": "MutatingWebhookConfiguration", 4 | "metadata": { 5 | "annotations": { 6 | "cert-manager.io/inject-apiserver-ca": "true" 7 | }, 8 | "labels": { 9 | "app": "webhook", 10 | "app.kubernetes.io/instance": "cert-manager", 11 | "app.kubernetes.io/managed-by": "Tiller", 12 | "app.kubernetes.io/name": "webhook", 13 | "helm.sh/chart": "cert-manager-v0.11.0" 14 | }, 15 | "name": "cert-manager-webhook" 16 | }, 17 | "webhooks": [ 18 | { 19 | "clientConfig": { 20 | "service": { 21 | "name": "kubernetes", 22 | "namespace": "default", 23 | "path": "/apis/webhook.cert-manager.io/v1beta1/mutations" 24 | } 25 | }, 26 | "failurePolicy": "Fail", 27 | "name": "webhook.cert-manager.io", 28 | "rules": [ 29 | { 30 | "apiGroups": [ 31 | "cert-manager.io" 32 | ], 33 | "apiVersions": [ 34 | "v1alpha2" 35 | ], 36 | "operations": [ 37 | "CREATE", 38 | "UPDATE" 39 | ], 40 | "resources": [ 41 | "certificates", 42 | "issuers", 43 | "clusterissuers", 44 | "orders", 45 | "challenges", 46 | "certificaterequests" 47 | ] 48 | } 49 | ] 50 | } 51 | ] 52 | } -------------------------------------------------------------------------------- /pkg/sync/hook/helm/delete_policy.go: -------------------------------------------------------------------------------- 1 | package helm 2 | 3 | import ( 4 | "github.com/argoproj/gitops-engine/pkg/sync/common" 5 | resourceutil "github.com/argoproj/gitops-engine/pkg/sync/resource" 6 | 7 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 8 | ) 9 | 10 | type DeletePolicy string 11 | 12 | const ( 13 | BeforeHookCreation DeletePolicy = "before-hook-creation" 14 | HookSucceeded DeletePolicy = "hook-succeeded" 15 | HookFailed DeletePolicy = "hook-failed" 16 | ) 17 | 18 | // note that we do not take into account if this is or is not a hook, caller should check 19 | func NewDeletePolicy(p string) (DeletePolicy, bool) { 20 | return DeletePolicy(p), p == string(BeforeHookCreation) || p == string(HookSucceeded) || p == string(HookFailed) 21 | } 22 | 23 | var hookDeletePolicies = map[DeletePolicy]common.HookDeletePolicy{ 24 | BeforeHookCreation: common.HookDeletePolicyBeforeHookCreation, 25 | HookSucceeded: common.HookDeletePolicyHookSucceeded, 26 | HookFailed: common.HookDeletePolicyHookFailed, 27 | } 28 | 29 | func (p DeletePolicy) DeletePolicy() common.HookDeletePolicy { 30 | return hookDeletePolicies[p] 31 | } 32 | 33 | func DeletePolicies(obj *unstructured.Unstructured) []DeletePolicy { 34 | var policies []DeletePolicy 35 | for _, text := range resourceutil.GetAnnotationCSVs(obj, "helm.sh/hook-delete-policy") { 36 | p, ok := NewDeletePolicy(text) 37 | if ok { 38 | policies = append(policies, p) 39 | } 40 | } 41 | return policies 42 | } 43 | -------------------------------------------------------------------------------- /pkg/health/health_argo.go: -------------------------------------------------------------------------------- 1 | package health 2 | 3 | import ( 4 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 5 | "k8s.io/apimachinery/pkg/runtime" 6 | ) 7 | 8 | type nodePhase string 9 | 10 | // Workflow and node statuses 11 | const ( 12 | nodePending nodePhase = "Pending" 13 | nodeRunning nodePhase = "Running" 14 | nodeSucceeded nodePhase = "Succeeded" 15 | // nodeSkipped nodePhase = "Skipped" 16 | nodeFailed nodePhase = "Failed" 17 | nodeError nodePhase = "Error" 18 | ) 19 | 20 | // An agnostic workflow object only considers Status.Phase and Status.Message. It is agnostic to the API version or any 21 | // other fields. 22 | type argoWorkflow struct { 23 | Status struct { 24 | Phase nodePhase 25 | Message string 26 | } 27 | } 28 | 29 | func getArgoWorkflowHealth(obj *unstructured.Unstructured) (*HealthStatus, error) { 30 | var wf argoWorkflow 31 | err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &wf) 32 | if err != nil { 33 | return nil, err 34 | } 35 | switch wf.Status.Phase { 36 | case nodePending, nodeRunning: 37 | return &HealthStatus{Status: HealthStatusProgressing, Message: wf.Status.Message}, nil 38 | case nodeSucceeded: 39 | return &HealthStatus{Status: HealthStatusHealthy, Message: wf.Status.Message}, nil 40 | case nodeFailed, nodeError: 41 | return &HealthStatus{Status: HealthStatusDegraded, Message: wf.Status.Message}, nil 42 | } 43 | return &HealthStatus{Status: HealthStatusHealthy, Message: wf.Status.Message}, nil 44 | } 45 | -------------------------------------------------------------------------------- /pkg/health/testdata/pvc-bound.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | annotations: 5 | control-plane.alpha.kubernetes.io/leader: '{"holderIdentity":"e57a9040-a984-11e8-836b-c4b301c4d0d1","leaseDurationSeconds":15,"acquireTime":"2018-08-27T23:00:54Z","renewTime":"2018-08-27T23:00:56Z","leaderTransitions":0}' 6 | kubectl.kubernetes.io/last-applied-configuration: | 7 | {"apiVersion":"v1","kind":"PersistentVolumeClaim","metadata":{"annotations":{},"labels":{"app.kubernetes.io/instance":"working-pvc"},"name":"testpvc","namespace":"argocd"},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"2Gi"}}}} 8 | pv.kubernetes.io/bind-completed: "yes" 9 | pv.kubernetes.io/bound-by-controller: "yes" 10 | volume.beta.kubernetes.io/storage-provisioner: docker.io/hostpath 11 | creationTimestamp: 2018-08-27T23:00:54Z 12 | finalizers: 13 | - kubernetes.io/pvc-protection 14 | labels: 15 | app.kubernetes.io/instance: working-pvc 16 | name: testpvc 17 | namespace: argocd 18 | resourceVersion: "323170" 19 | selfLink: /api/v1/namespaces/argocd/persistentvolumeclaims/testpvc 20 | uid: 0cedda2c-aa4d-11e8-a271-025000000001 21 | spec: 22 | accessModes: 23 | - ReadWriteOnce 24 | resources: 25 | requests: 26 | storage: 2Gi 27 | storageClassName: hostpath 28 | volumeName: pvc-0cedda2c-aa4d-11e8-a271-025000000001 29 | status: 30 | accessModes: 31 | - ReadWriteOnce 32 | capacity: 33 | storage: 2Gi 34 | phase: Bound 35 | -------------------------------------------------------------------------------- /specs/design.md: -------------------------------------------------------------------------------- 1 | # GitOps Engine Design 2 | 3 | ## Summary 4 | 5 | Flux and ArgoCD are two popular open-source GitOps implementations. They currently offer different user experiences but, at their core, Flux and ArgoCD have a lot in common. 6 | Therefore, the Flux and ArgoCD maintainers have decided to join forces, with the hypothesis that working on a single project will be more effective, avoiding duplicate work and 7 | ultimately bringing more and better value to the end-user. 8 | 9 | Effectively merging Flux and ArgoCD into a single solution is a long term goal. As a first step, both the ArgoCD and Flux teams are going to work on designing and implementing 10 | the GitOps Engine. 11 | 12 | ![](https://user-images.githubusercontent.com/426437/66851601-ea9a6880-ef2f-11e9-807d-0c5f09fcc384.png) 13 | 14 | The maintenance and support of the GitOps Engine will be a joined effort by the Flux and ArgoCD teams. 15 | 16 | ## Goals 17 | 18 | The GitOps Engine: 19 | * should contain core functionality pre-existing in ArgoCD and Flux. 20 | 21 | ## Non-Goals 22 | 23 | * is not intended as a general framework for implementing GitOps services. 24 | 25 | ## Proposals 26 | 27 | Teams have considered two ways to extract common functionality into the GitOps engine: 28 | 1. [Bottom-up](./design-bottom-up.md). Identify components that are used in both projects and move them one by one into GitOps engine repository. 29 | 1. [Top-down](./design-top-down.md). Take a whole sub-system of one project and make it customizable enough to be suitable for both Argo CD and Flux. 30 | -------------------------------------------------------------------------------- /pkg/utils/json/json.go: -------------------------------------------------------------------------------- 1 | package json 2 | 3 | // https://github.com/ksonnet/ksonnet/blob/master/pkg/kubecfg/diff.go 4 | func removeFields(config, live interface{}) interface{} { 5 | switch c := config.(type) { 6 | case map[string]interface{}: 7 | l, ok := live.(map[string]interface{}) 8 | if ok { 9 | return RemoveMapFields(c, l) 10 | } else { 11 | return live 12 | } 13 | case []interface{}: 14 | l, ok := live.([]interface{}) 15 | if ok { 16 | return RemoveListFields(c, l) 17 | } else { 18 | return live 19 | } 20 | default: 21 | return live 22 | } 23 | 24 | } 25 | 26 | // RemoveMapFields remove all non-existent fields in the live that don't exist in the config 27 | func RemoveMapFields(config, live map[string]interface{}) map[string]interface{} { 28 | result := map[string]interface{}{} 29 | for k, v1 := range config { 30 | v2, ok := live[k] 31 | if !ok { 32 | continue 33 | } 34 | if v2 != nil { 35 | v2 = removeFields(v1, v2) 36 | } 37 | result[k] = v2 38 | } 39 | return result 40 | } 41 | 42 | func RemoveListFields(config, live []interface{}) []interface{} { 43 | // If live is longer than config, then the extra elements at the end of the 44 | // list will be returned as-is so they appear in the diff. 45 | result := make([]interface{}, 0, len(live)) 46 | for i, v2 := range live { 47 | if len(config) > i { 48 | if v2 != nil { 49 | v2 = removeFields(config[i], v2) 50 | } 51 | result = append(result, v2) 52 | } else { 53 | result = append(result, v2) 54 | } 55 | } 56 | return result 57 | } 58 | -------------------------------------------------------------------------------- /pkg/sync/common/types_test.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestNewHookType(t *testing.T) { 10 | t.Run("Garbage", func(t *testing.T) { 11 | _, ok := NewHookType("Garbage") 12 | assert.False(t, ok) 13 | }) 14 | t.Run("PreSync", func(t *testing.T) { 15 | hookType, ok := NewHookType("PreSync") 16 | assert.True(t, ok) 17 | assert.Equal(t, HookTypePreSync, hookType) 18 | }) 19 | t.Run("Sync", func(t *testing.T) { 20 | hookType, ok := NewHookType("Sync") 21 | assert.True(t, ok) 22 | assert.Equal(t, HookTypeSync, hookType) 23 | }) 24 | t.Run("PostSync", func(t *testing.T) { 25 | hookType, ok := NewHookType("PostSync") 26 | assert.True(t, ok) 27 | assert.Equal(t, HookTypePostSync, hookType) 28 | }) 29 | } 30 | 31 | func TestNewHookDeletePolicy(t *testing.T) { 32 | t.Run("Garbage", func(t *testing.T) { 33 | _, ok := NewHookDeletePolicy("Garbage") 34 | assert.False(t, ok) 35 | }) 36 | t.Run("HookSucceeded", func(t *testing.T) { 37 | p, ok := NewHookDeletePolicy("HookSucceeded") 38 | assert.True(t, ok) 39 | assert.Equal(t, HookDeletePolicyHookSucceeded, p) 40 | }) 41 | t.Run("HookFailed", func(t *testing.T) { 42 | p, ok := NewHookDeletePolicy("HookFailed") 43 | assert.True(t, ok) 44 | assert.Equal(t, HookDeletePolicyHookFailed, p) 45 | }) 46 | t.Run("BeforeHookCreation", func(t *testing.T) { 47 | p, ok := NewHookDeletePolicy("BeforeHookCreation") 48 | assert.True(t, ok) 49 | assert.Equal(t, HookDeletePolicyBeforeHookCreation, p) 50 | }) 51 | } 52 | -------------------------------------------------------------------------------- /pkg/sync/hook/helm/type_test.go: -------------------------------------------------------------------------------- 1 | package helm 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | 8 | "github.com/argoproj/gitops-engine/pkg/sync/common" 9 | . "github.com/argoproj/gitops-engine/pkg/utils/testing" 10 | ) 11 | 12 | func TestTypes(t *testing.T) { 13 | assert.Nil(t, Types(NewPod())) 14 | assert.Equal(t, []Type{PreInstall}, Types(Annotate(NewPod(), "helm.sh/hook", "pre-install"))) 15 | assert.Equal(t, []Type{PreUpgrade}, Types(Annotate(NewPod(), "helm.sh/hook", "pre-upgrade"))) 16 | assert.Equal(t, []Type{PostUpgrade}, Types(Annotate(NewPod(), "helm.sh/hook", "post-upgrade"))) 17 | assert.Equal(t, []Type{PostInstall}, Types(Annotate(NewPod(), "helm.sh/hook", "post-install"))) 18 | // helm calls "crd-install" a hook, but it really can't be treated as such 19 | assert.Empty(t, Types(Annotate(NewPod(), "helm.sh/hook", "crd-install"))) 20 | // we do not consider these supported hooks 21 | assert.Nil(t, Types(Annotate(NewPod(), "helm.sh/hook", "pre-rollback"))) 22 | assert.Nil(t, Types(Annotate(NewPod(), "helm.sh/hook", "post-rollback"))) 23 | assert.Nil(t, Types(Annotate(NewPod(), "helm.sh/hook", "test-success"))) 24 | assert.Nil(t, Types(Annotate(NewPod(), "helm.sh/hook", "test-failure"))) 25 | } 26 | 27 | func TestType_HookType(t *testing.T) { 28 | assert.Equal(t, common.HookTypePreSync, PreInstall.HookType()) 29 | assert.Equal(t, common.HookTypePreSync, PreUpgrade.HookType()) 30 | assert.Equal(t, common.HookTypePostSync, PostUpgrade.HookType()) 31 | assert.Equal(t, common.HookTypePostSync, PostInstall.HookType()) 32 | } 33 | -------------------------------------------------------------------------------- /pkg/health/testdata/job-succeeded.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | creationTimestamp: 2018-12-02T08:19:13Z 5 | labels: 6 | controller-uid: f3fe3a46-f60a-11e8-aa53-42010a80021b 7 | job-name: succeed 8 | name: succeed 9 | namespace: argoci-workflows 10 | resourceVersion: "46535949" 11 | selfLink: /apis/batch/v1/namespaces/argoci-workflows/jobs/succeed 12 | uid: f3fe3a46-f60a-11e8-aa53-42010a80021b 13 | spec: 14 | backoffLimit: 0 15 | completions: 1 16 | parallelism: 1 17 | selector: 18 | matchLabels: 19 | controller-uid: f3fe3a46-f60a-11e8-aa53-42010a80021b 20 | template: 21 | metadata: 22 | creationTimestamp: null 23 | labels: 24 | controller-uid: f3fe3a46-f60a-11e8-aa53-42010a80021b 25 | job-name: succeed 26 | spec: 27 | containers: 28 | - command: 29 | - sh 30 | - -c 31 | - sleep 10 32 | image: alpine:latest 33 | imagePullPolicy: Always 34 | name: succeed 35 | resources: {} 36 | terminationMessagePath: /dev/termination-log 37 | terminationMessagePolicy: File 38 | dnsPolicy: ClusterFirst 39 | restartPolicy: Never 40 | schedulerName: default-scheduler 41 | securityContext: {} 42 | terminationGracePeriodSeconds: 30 43 | status: 44 | completionTime: 2018-12-02T08:19:26Z 45 | conditions: 46 | - lastProbeTime: 2018-12-02T08:19:26Z 47 | lastTransitionTime: 2018-12-02T08:19:26Z 48 | status: "True" 49 | type: Complete 50 | startTime: 2018-12-02T08:19:14Z 51 | succeeded: 1 52 | -------------------------------------------------------------------------------- /pkg/health/testdata/hpa-v2beta2-healthy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: autoscaling/v2beta2 2 | kind: HorizontalPodAutoscaler 3 | metadata: 4 | creationTimestamp: '2020-08-07T00:13:31Z' 5 | name: credential-hpa 6 | uid: 04d9992e-a849-4cce-9e1e-121a62d5c001 7 | spec: 8 | maxReplicas: 1 9 | metrics: 10 | - resource: 11 | name: cpu 12 | target: 13 | averageUtilization: 65 14 | type: Utilization 15 | type: Resource 16 | minReplicas: 1 17 | scaleTargetRef: 18 | apiVersion: argoproj.io/v1alpha1 19 | kind: Rollout 20 | name: credential-rollout 21 | status: 22 | conditions: 23 | - lastTransitionTime: '2020-08-07T00:13:46Z' 24 | message: recommended size matches current size 25 | reason: ReadyForNewScale 26 | status: 'True' 27 | type: AbleToScale 28 | - lastTransitionTime: '2020-12-12T07:13:46Z' 29 | message: >- 30 | the HPA was able to successfully calculate a replica count from cpu 31 | resource utilization (percentage of request) 32 | reason: ValidMetricFound 33 | status: 'True' 34 | type: ScalingActive 35 | - lastTransitionTime: '2020-12-09T23:28:43Z' 36 | message: the desired count is within the acceptable range 37 | reason: DesiredWithinRange 38 | status: 'False' 39 | type: ScalingLimited 40 | currentMetrics: 41 | - resource: 42 | current: 43 | averageUtilization: 27 44 | averageValue: 195m 45 | name: cpu 46 | type: Resource 47 | currentReplicas: 1 48 | desiredReplicas: 1 49 | lastScaleTime: '2020-08-07T00:13:46Z' 50 | -------------------------------------------------------------------------------- /pkg/health/testdata/job-failed.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | creationTimestamp: 2018-12-02T08:09:25Z 5 | labels: 6 | controller-uid: 95052288-f609-11e8-aa53-42010a80021b 7 | job-name: fail 8 | name: fail 9 | namespace: argoci-workflows 10 | resourceVersion: "46534173" 11 | selfLink: /apis/batch/v1/namespaces/argoci-workflows/jobs/fail 12 | uid: 95052288-f609-11e8-aa53-42010a80021b 13 | spec: 14 | backoffLimit: 0 15 | completions: 1 16 | parallelism: 1 17 | selector: 18 | matchLabels: 19 | controller-uid: 95052288-f609-11e8-aa53-42010a80021b 20 | template: 21 | metadata: 22 | creationTimestamp: null 23 | labels: 24 | controller-uid: 95052288-f609-11e8-aa53-42010a80021b 25 | job-name: fail 26 | spec: 27 | containers: 28 | - command: 29 | - sh 30 | - -c 31 | - exit 1 32 | image: alpine:latest 33 | imagePullPolicy: Always 34 | name: fail 35 | resources: {} 36 | terminationMessagePath: /dev/termination-log 37 | terminationMessagePolicy: File 38 | dnsPolicy: ClusterFirst 39 | restartPolicy: Never 40 | schedulerName: default-scheduler 41 | securityContext: {} 42 | terminationGracePeriodSeconds: 30 43 | status: 44 | conditions: 45 | - lastProbeTime: 2018-12-02T08:09:27Z 46 | lastTransitionTime: 2018-12-02T08:09:27Z 47 | message: Job has reached the specified backoff limit 48 | reason: BackoffLimitExceeded 49 | status: "True" 50 | type: Failed 51 | failed: 1 52 | startTime: 2018-12-02T08:09:25Z 53 | -------------------------------------------------------------------------------- /pkg/health/testdata/daemonset-ondelete.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: fluentd-elasticsearch 5 | namespace: kube-system 6 | labels: 7 | k8s-app: fluentd-logging 8 | spec: 9 | selector: 10 | matchLabels: 11 | name: fluentd-elasticsearch 12 | updateStrategy: 13 | type: OnDelete 14 | template: 15 | metadata: 16 | labels: 17 | name: fluentd-elasticsearch 18 | spec: 19 | tolerations: 20 | # this toleration is to have the daemonset runnable on master nodes 21 | # remove it if your masters can't run pods 22 | - key: node-role.kubernetes.io/master 23 | effect: NoSchedule 24 | containers: 25 | - name: fluentd-elasticsearch 26 | image: quay.io/fluentd_elasticsearch/fluentd:v2.5.1 27 | resources: 28 | limits: 29 | memory: 200Mi 30 | requests: 31 | cpu: 100m 32 | memory: 200Mi 33 | volumeMounts: 34 | - name: varlog 35 | mountPath: /var/log 36 | - name: varlibdockercontainers 37 | mountPath: /var/lib/docker/containers 38 | readOnly: true 39 | terminationGracePeriodSeconds: 30 40 | volumes: 41 | - name: varlog 42 | hostPath: 43 | path: /var/log 44 | - name: varlibdockercontainers 45 | hostPath: 46 | path: /var/lib/docker/containers 47 | status: 48 | currentNumberScheduled: 1 49 | desiredNumberScheduled: 1 50 | numberAvailable: 1 51 | numberMisscheduled: 0 52 | numberReady: 1 53 | observedGeneration: 4 -------------------------------------------------------------------------------- /pkg/health/health_job.go: -------------------------------------------------------------------------------- 1 | package health 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/argoproj/gitops-engine/pkg/utils/kube" 7 | batchv1 "k8s.io/api/batch/v1" 8 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 9 | "k8s.io/apimachinery/pkg/runtime" 10 | ) 11 | 12 | func getJobHealth(obj *unstructured.Unstructured) (*HealthStatus, error) { 13 | gvk := obj.GroupVersionKind() 14 | switch gvk { 15 | case batchv1.SchemeGroupVersion.WithKind(kube.JobKind): 16 | var job batchv1.Job 17 | err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &job) 18 | if err != nil { 19 | return nil, fmt.Errorf("failed to convert unstructured Job to typed: %v", err) 20 | } 21 | return getBatchv1JobHealth(&job) 22 | default: 23 | return nil, fmt.Errorf("unsupported Job GVK: %s", gvk) 24 | } 25 | } 26 | 27 | func getBatchv1JobHealth(job *batchv1.Job) (*HealthStatus, error) { 28 | failed := false 29 | var failMsg string 30 | complete := false 31 | var message string 32 | for _, condition := range job.Status.Conditions { 33 | switch condition.Type { 34 | case batchv1.JobFailed: 35 | failed = true 36 | complete = true 37 | failMsg = condition.Message 38 | case batchv1.JobComplete: 39 | complete = true 40 | message = condition.Message 41 | } 42 | } 43 | if !complete { 44 | return &HealthStatus{ 45 | Status: HealthStatusProgressing, 46 | Message: message, 47 | }, nil 48 | } else if failed { 49 | return &HealthStatus{ 50 | Status: HealthStatusDegraded, 51 | Message: failMsg, 52 | }, nil 53 | } else { 54 | return &HealthStatus{ 55 | Status: HealthStatusHealthy, 56 | Message: message, 57 | }, nil 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /pkg/cache/settings_test.go: -------------------------------------------------------------------------------- 1 | package cache 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "github.com/stretchr/testify/assert" 8 | "k8s.io/client-go/rest" 9 | 10 | "github.com/argoproj/gitops-engine/pkg/utils/kube/kubetest" 11 | ) 12 | 13 | func TestSetSettings(t *testing.T) { 14 | cache := NewClusterCache(&rest.Config{}, SetKubectl(&kubetest.MockKubectlCmd{})) 15 | updatedHealth := &noopSettings{} 16 | updatedFilter := &noopSettings{} 17 | cache.Invalidate(SetSettings(Settings{ResourceHealthOverride: updatedHealth, ResourcesFilter: updatedFilter})) 18 | 19 | assert.Equal(t, updatedFilter, cache.settings.ResourcesFilter) 20 | assert.Equal(t, updatedHealth, cache.settings.ResourceHealthOverride) 21 | } 22 | 23 | func TestSetConfig(t *testing.T) { 24 | cache := NewClusterCache(&rest.Config{}, SetKubectl(&kubetest.MockKubectlCmd{})) 25 | updatedConfig := &rest.Config{Host: "http://newhost"} 26 | cache.Invalidate(SetConfig(updatedConfig)) 27 | 28 | assert.Equal(t, updatedConfig, cache.config) 29 | } 30 | 31 | func TestSetNamespaces(t *testing.T) { 32 | cache := NewClusterCache(&rest.Config{}, SetKubectl(&kubetest.MockKubectlCmd{}), SetNamespaces([]string{"default"})) 33 | 34 | updatedNamespaces := []string{"updated"} 35 | cache.Invalidate(SetNamespaces(updatedNamespaces)) 36 | 37 | assert.ElementsMatch(t, updatedNamespaces, cache.namespaces) 38 | } 39 | 40 | func TestSetResyncTimeout(t *testing.T) { 41 | cache := NewClusterCache(&rest.Config{}) 42 | assert.Equal(t, clusterResyncTimeout, cache.syncStatus.resyncTimeout) 43 | 44 | timeout := 1 * time.Hour 45 | cache.Invalidate(SetResyncTimeout(timeout)) 46 | 47 | assert.Equal(t, timeout, cache.syncStatus.resyncTimeout) 48 | } 49 | -------------------------------------------------------------------------------- /agent/manifests/install-namespaced.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: gitops-agent 5 | --- 6 | apiVersion: rbac.authorization.k8s.io/v1 7 | kind: Role 8 | metadata: 9 | name: gitops-agent 10 | rules: 11 | - apiGroups: 12 | - '*' 13 | resources: 14 | - '*' 15 | verbs: 16 | - '*' 17 | --- 18 | apiVersion: rbac.authorization.k8s.io/v1 19 | kind: RoleBinding 20 | metadata: 21 | name: gitops-agent 22 | roleRef: 23 | apiGroup: rbac.authorization.k8s.io 24 | kind: Role 25 | name: gitops-agent 26 | subjects: 27 | - kind: ServiceAccount 28 | name: gitops-agent 29 | --- 30 | apiVersion: apps/v1 31 | kind: Deployment 32 | metadata: 33 | name: gitops-agent 34 | spec: 35 | selector: 36 | matchLabels: 37 | app.kubernetes.io/name: gitops-agent 38 | strategy: 39 | type: Recreate 40 | template: 41 | metadata: 42 | labels: 43 | app.kubernetes.io/name: gitops-agent 44 | spec: 45 | containers: 46 | - command: 47 | - gitops 48 | - /tmp/git/repo 49 | - --path 50 | - guestbook 51 | - --namespaced 52 | image: argoproj/gitops-agent:latest 53 | name: gitops-agent 54 | volumeMounts: 55 | - mountPath: /tmp/git 56 | name: git 57 | - args: 58 | - --webhook-url 59 | - http://localhost:9001/api/v1/sync 60 | - --dest 61 | - repo 62 | env: 63 | - name: GIT_SYNC_REPO 64 | value: https://github.com/argoproj/argocd-example-apps 65 | image: k8s.gcr.io/git-sync:v3.1.6 66 | name: git-sync 67 | volumeMounts: 68 | - mountPath: /tmp/git 69 | name: git 70 | serviceAccountName: gitops-agent 71 | volumes: 72 | - emptyDir: {} 73 | name: git 74 | -------------------------------------------------------------------------------- /specs/image-update-monitoring.md: -------------------------------------------------------------------------------- 1 | # Docker Image Update Monitoring 2 | 3 | ## Summary 4 | 5 | Many GitOps users would like to automate Kubernetes manifest changes in the deployment repository 6 | (see [Deployment Repo Update Automation](./deployment-repo-update.md)). The changes might be triggered by 7 | the CI pipeline run or a new image in the Docker registry. Flux provides docker registry monitoring as part of 8 | [Automated Image Update](https://docs.fluxcd.io/en/latest/references/automated-image-update.html) feature. 9 | 10 | This document is meant to collect requirements for a component that provides docker registry monitoring functionality and 11 | can be used by Argo CD and potentially Flux users. 12 | 13 | ## Requirements 14 | 15 | ### Configurable Event Handler 16 | 17 | When a new docker image is discovered the component should execute an event handler and pass the docker image name/version as a parameter. 18 | The event handler is a shell script. The user should be able to specify the handler in the component configuration. 19 | 20 | ### Docker Registry WebHooks 21 | 22 | Some Docker Registries send a webhook when a new image gets pushed. The component should provide a webhook handler which when invokes an event handler. 23 | 24 | ### Image Pulling 25 | 26 | In addition to the webhook, the component should support images metadata pulling. The pulling should detect the new images and invoke an event handler for each new image. 27 | 28 | ### Image Credentials Auto-Discovering 29 | 30 | If a component is running inside of a Kubernetes cluster together with the deployments then it already has access to the Docker registry credentials. Auto-Discovering functionality 31 | detect available docker registry credentials and use them to access registries instead of requiring users to configure credentials manually. 32 | -------------------------------------------------------------------------------- /agent/manifests/install.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: gitops-agent 5 | --- 6 | apiVersion: rbac.authorization.k8s.io/v1 7 | kind: ClusterRole 8 | metadata: 9 | name: gitops-agent 10 | rules: 11 | - apiGroups: 12 | - '*' 13 | resources: 14 | - '*' 15 | verbs: 16 | - '*' 17 | - nonResourceURLs: 18 | - '*' 19 | verbs: 20 | - '*' 21 | --- 22 | apiVersion: rbac.authorization.k8s.io/v1 23 | kind: ClusterRoleBinding 24 | metadata: 25 | name: gitops-agent 26 | roleRef: 27 | apiGroup: rbac.authorization.k8s.io 28 | kind: ClusterRole 29 | name: gitops-agent 30 | subjects: 31 | - kind: ServiceAccount 32 | name: gitops-agent 33 | namespace: gitops-agent 34 | --- 35 | apiVersion: apps/v1 36 | kind: Deployment 37 | metadata: 38 | name: gitops-agent 39 | spec: 40 | selector: 41 | matchLabels: 42 | app.kubernetes.io/name: gitops-agent 43 | strategy: 44 | type: Recreate 45 | template: 46 | metadata: 47 | labels: 48 | app.kubernetes.io/name: gitops-agent 49 | spec: 50 | containers: 51 | - command: 52 | - gitops 53 | - /tmp/git/repo 54 | - --path 55 | - guestbook 56 | image: argoproj/gitops-agent:latest 57 | name: gitops-agent 58 | volumeMounts: 59 | - mountPath: /tmp/git 60 | name: git 61 | - args: 62 | - --webhook-url 63 | - http://localhost:9001/api/v1/sync 64 | - --dest 65 | - repo 66 | env: 67 | - name: GIT_SYNC_REPO 68 | value: https://github.com/argoproj/argocd-example-apps 69 | image: k8s.gcr.io/git-sync:v3.1.6 70 | name: git-sync 71 | volumeMounts: 72 | - mountPath: /tmp/git 73 | name: git 74 | serviceAccountName: gitops-agent 75 | volumes: 76 | - emptyDir: {} 77 | name: git 78 | -------------------------------------------------------------------------------- /pkg/diff/testdata/aggr-clusterrole-live.json: -------------------------------------------------------------------------------- 1 | { 2 | "aggregationRule": { 3 | "clusterRoleSelectors": [ 4 | { 5 | "matchLabels": { 6 | "rbac.example.com/aggregate-to-test": "true" 7 | } 8 | } 9 | ] 10 | }, 11 | "apiVersion": "rbac.authorization.k8s.io/v1", 12 | "kind": "ClusterRole", 13 | "metadata": { 14 | "annotations": { 15 | "kubectl.kubernetes.io/last-applied-configuration": "{\"aggregationRule\":{\"clusterRoleSelectors\":[{\"matchLabels\":{\"rbac.example.com/aggregate-to-test\":\"true\"}}]},\"apiVersion\":\"rbac.authorization.k8s.io/v1\",\"kind\":\"ClusterRole\",\"metadata\":{\"annotations\":{},\"labels\":{\"app.kubernetes.io/instance\":\"clusterroles\"},\"name\":\"test-clusterrole\"},\"rules\":[{\"apiGroups\":[\"\"],\"resources\":[\"deployments\"],\"verbs\":[\"get\",\"list\"]}]}\n" 16 | }, 17 | "creationTimestamp": "2020-02-02T17:18:54Z", 18 | "labels": { 19 | "app.kubernetes.io/instance": "clusterroles" 20 | }, 21 | "name": "test-clusterrole", 22 | "resourceVersion": "5108751", 23 | "selfLink": "/apis/rbac.authorization.k8s.io/v1/clusterroles/test-clusterrole", 24 | "uid": "418e7818-ec49-49f6-ada0-d1fccf679bf6" 25 | }, 26 | "rules": [ 27 | { 28 | "apiGroups": [ 29 | "" 30 | ], 31 | "resources": [ 32 | "services", 33 | "endpoints" 34 | ], 35 | "verbs": [ 36 | "get", 37 | "list", 38 | "watch" 39 | ] 40 | }, 41 | { 42 | "apiGroups": [ 43 | "" 44 | ], 45 | "resources": [ 46 | "pods" 47 | ], 48 | "verbs": [ 49 | "get", 50 | "list", 51 | "watch" 52 | ] 53 | } 54 | ] 55 | } 56 | -------------------------------------------------------------------------------- /pkg/utils/kube/testdata/nginx.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | annotations: 5 | deployment.kubernetes.io/revision: "1" 6 | creationTimestamp: 2018-07-05T09:30:00Z 7 | generation: 1 8 | labels: 9 | app: nginx 10 | name: nginx-deployment 11 | namespace: default 12 | resourceVersion: "5140192" 13 | selfLink: /apis/extensions/v1beta1/namespaces/default/deployments/nginx-deployment 14 | uid: fd131d5c-8035-11e8-a525-42010a8a006c 15 | spec: 16 | progressDeadlineSeconds: 600 17 | replicas: 1 18 | revisionHistoryLimit: 2 19 | selector: 20 | matchLabels: 21 | app: nginx 22 | strategy: 23 | rollingUpdate: 24 | maxSurge: 25% 25 | maxUnavailable: 25% 26 | type: RollingUpdate 27 | template: 28 | metadata: 29 | creationTimestamp: null 30 | labels: 31 | app: nginx 32 | spec: 33 | containers: 34 | - image: nginx:1.7.9 35 | imagePullPolicy: IfNotPresent 36 | name: nginx 37 | ports: 38 | - containerPort: 80 39 | protocol: TCP 40 | resources: {} 41 | terminationMessagePath: /dev/termination-log 42 | terminationMessagePolicy: File 43 | dnsPolicy: ClusterFirst 44 | restartPolicy: Always 45 | schedulerName: default-scheduler 46 | securityContext: {} 47 | terminationGracePeriodSeconds: 30 48 | status: 49 | availableReplicas: 1 50 | conditions: 51 | - lastTransitionTime: 2018-07-05T09:30:00Z 52 | lastUpdateTime: 2018-07-05T09:30:09Z 53 | message: ReplicaSet "nginx-deployment-648fdd98d4" has successfully progressed. 54 | reason: NewReplicaSetAvailable 55 | status: "True" 56 | type: Progressing 57 | - lastTransitionTime: 2018-07-06T15:23:51Z 58 | lastUpdateTime: 2018-07-06T15:23:51Z 59 | message: Deployment has minimum availability. 60 | reason: MinimumReplicasAvailable 61 | status: "True" 62 | type: Available 63 | observedGeneration: 1 64 | readyReplicas: 1 65 | replicas: 1 66 | updatedReplicas: 1 67 | -------------------------------------------------------------------------------- /pkg/utils/kube/ctl_test.go: -------------------------------------------------------------------------------- 1 | package kube 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | "k8s.io/klog/v2/klogr" 8 | 9 | testingutils "github.com/argoproj/gitops-engine/pkg/utils/testing" 10 | "github.com/argoproj/gitops-engine/pkg/utils/tracing" 11 | ) 12 | 13 | var ( 14 | _ Kubectl = &KubectlCmd{} 15 | ) 16 | 17 | func TestConvertToVersion(t *testing.T) { 18 | kubectl := KubectlCmd{ 19 | Log: klogr.New(), 20 | Tracer: tracing.NopTracer{}, 21 | } 22 | t.Run("AppsDeployment", func(t *testing.T) { 23 | newObj, err := kubectl.ConvertToVersion(testingutils.UnstructuredFromFile("testdata/appsdeployment.yaml"), "extensions", "v1beta1") 24 | if assert.NoError(t, err) { 25 | gvk := newObj.GroupVersionKind() 26 | assert.Equal(t, "extensions", gvk.Group) 27 | assert.Equal(t, "v1beta1", gvk.Version) 28 | } 29 | }) 30 | t.Run("CustomResource", func(t *testing.T) { 31 | _, err := kubectl.ConvertToVersion(testingutils.UnstructuredFromFile("testdata/cr.yaml"), "argoproj.io", "v1") 32 | assert.Error(t, err) 33 | }) 34 | t.Run("ExtensionsDeployment", func(t *testing.T) { 35 | obj := testingutils.UnstructuredFromFile("testdata/nginx.yaml") 36 | 37 | // convert an extensions/v1beta1 object into itself 38 | newObj, err := kubectl.ConvertToVersion(obj, "extensions", "v1beta1") 39 | if assert.NoError(t, err) { 40 | gvk := newObj.GroupVersionKind() 41 | assert.Equal(t, "extensions", gvk.Group) 42 | assert.Equal(t, "v1beta1", gvk.Version) 43 | } 44 | 45 | // convert an extensions/v1beta1 object into an apps/v1 46 | newObj, err = kubectl.ConvertToVersion(obj, "apps", "v1") 47 | if assert.NoError(t, err) { 48 | gvk := newObj.GroupVersionKind() 49 | assert.Equal(t, "apps", gvk.Group) 50 | assert.Equal(t, "v1", gvk.Version) 51 | } 52 | 53 | // converting it again should not have any affect 54 | newObj, err = kubectl.ConvertToVersion(obj, "apps", "v1") 55 | if assert.NoError(t, err) { 56 | gvk := newObj.GroupVersionKind() 57 | assert.Equal(t, "apps", gvk.Group) 58 | assert.Equal(t, "v1", gvk.Version) 59 | } 60 | }) 61 | } 62 | -------------------------------------------------------------------------------- /pkg/sync/sync_phase_test.go: -------------------------------------------------------------------------------- 1 | package sync 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 8 | 9 | "github.com/argoproj/gitops-engine/pkg/sync/common" 10 | . "github.com/argoproj/gitops-engine/pkg/utils/testing" 11 | ) 12 | 13 | func TestSyncPhaseNone(t *testing.T) { 14 | assert.Equal(t, []common.SyncPhase{common.SyncPhaseSync}, syncPhases(&unstructured.Unstructured{})) 15 | } 16 | 17 | func TestSyncPhasePreSync(t *testing.T) { 18 | assert.Equal(t, []common.SyncPhase{common.SyncPhasePreSync}, syncPhases(pod("PreSync"))) 19 | } 20 | 21 | func TestSyncPhaseSync(t *testing.T) { 22 | assert.Equal(t, []common.SyncPhase{common.SyncPhaseSync}, syncPhases(pod("Sync"))) 23 | } 24 | 25 | func TestSyncPhaseSkip(t *testing.T) { 26 | assert.Nil(t, syncPhases(pod("Skip"))) 27 | } 28 | 29 | // garbage hooks are still hooks, but have no phases, because some user spelled something wrong 30 | func TestSyncPhaseGarbage(t *testing.T) { 31 | assert.Nil(t, syncPhases(pod("Garbage"))) 32 | } 33 | 34 | func TestSyncPhasePost(t *testing.T) { 35 | assert.Equal(t, []common.SyncPhase{common.SyncPhasePostSync}, syncPhases(pod("PostSync"))) 36 | } 37 | 38 | func TestSyncPhaseFail(t *testing.T) { 39 | assert.Equal(t, []common.SyncPhase{common.SyncPhaseSyncFail}, syncPhases(pod("SyncFail"))) 40 | } 41 | 42 | func TestSyncPhaseTwoPhases(t *testing.T) { 43 | assert.ElementsMatch(t, []common.SyncPhase{common.SyncPhasePreSync, common.SyncPhasePostSync}, syncPhases(pod("PreSync,PostSync"))) 44 | } 45 | 46 | func TestSyncDuplicatedPhases(t *testing.T) { 47 | assert.ElementsMatch(t, []common.SyncPhase{common.SyncPhasePreSync}, syncPhases(pod("PreSync,PreSync"))) 48 | assert.ElementsMatch(t, []common.SyncPhase{common.SyncPhasePreSync}, syncPhases(podWithHelmHook("pre-install,pre-upgrade"))) 49 | } 50 | 51 | func pod(hookType string) *unstructured.Unstructured { 52 | return Annotate(NewPod(), "argocd.argoproj.io/hook", hookType) 53 | } 54 | 55 | func podWithHelmHook(hookType string) *unstructured.Unstructured { 56 | return Annotate(NewPod(), "helm.sh/hook", hookType) 57 | } 58 | -------------------------------------------------------------------------------- /specs/deployment-repo-update.md: -------------------------------------------------------------------------------- 1 | # Deployment Repo Update Automation 2 | 3 | ## Summary 4 | 5 | The GitOps driven continuous deployment cycle starts with a change in the Git repository that contains resource manifests. Flux provides the 6 | [Automated Image Update](https://docs.fluxcd.io/en/latest/references/automated-image-update.html) feature that continuously monitors the docker registry and automatically 7 | updates deployment repo when a new image is released. This functionality is not available for Argo CD users. Also, some Argo CD users need only functionality related to the 8 | Git repository updating and don't need docker registry monitoring. 9 | 10 | This document is meant to collect requirements for the Git repository update functionality. As a next step, we could discuss if it is possible to implement a Golang library or 11 | a service that can be used in combination with Argo CD and Flux. 12 | 13 | > Note: Flux already plans to split out the docker registry monitor and image updating feature into a separate component. We should consider re-using the extracted component. 14 | 15 | ## Requirements 16 | 17 | ### Manifests updating 18 | 19 | When updates are discovered for any image referenced in resource manifests in the configuration repository, new manifests that refer to the updated image tags/versions must be generated. 20 | The manifests might be stored as raw YAML files or as the templating tool package such as Kustomize or Helm. The manifest updating functionality should take new images 21 | set as an input and update manifest files or templating tool configs to use the provided set of images. 22 | 23 | ### Commit signing 24 | 25 | The user might want to use GPG signing for each commit in the deployment repo. The commit signing feature should allow to optionally 26 | sign the commit with the image changes. 27 | 28 | ### Interaction with Git 29 | 30 | The feature provides the following basic functionalities: 31 | * Clone Git repo or update the local copy of a previously cloned copy 32 | * Configure local Git user name and email. 33 | * Push changes back to Git remote repo. 34 | * Rebase remote changes in case of concurrent repository update. 35 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/argoproj/gitops-engine 2 | 3 | go 1.15 4 | 5 | require ( 6 | github.com/evanphx/json-patch v4.9.0+incompatible 7 | github.com/go-logr/logr v0.3.0 8 | github.com/golang/mock v1.4.4 9 | github.com/spf13/cobra v1.1.1 10 | github.com/stretchr/testify v1.6.1 11 | golang.org/x/sync v0.0.0-20201207232520-09787c993a3a 12 | k8s.io/api v0.20.1 13 | k8s.io/apiextensions-apiserver v0.20.1 14 | k8s.io/apimachinery v0.20.1 15 | k8s.io/cli-runtime v0.20.1 16 | k8s.io/client-go v0.20.1 17 | k8s.io/klog/v2 v2.4.0 18 | k8s.io/kube-aggregator v0.20.1 19 | k8s.io/kubectl v0.20.1 20 | k8s.io/kubernetes v1.20.1 21 | sigs.k8s.io/yaml v1.2.0 22 | ) 23 | 24 | replace ( 25 | // https://github.com/kubernetes/kubernetes/issues/79384#issuecomment-505627280 26 | k8s.io/api => k8s.io/api v0.20.1 27 | k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.20.1 // indirect 28 | k8s.io/apimachinery => k8s.io/apimachinery v0.20.1 // indirect 29 | k8s.io/apiserver => k8s.io/apiserver v0.20.1 30 | k8s.io/cli-runtime => k8s.io/cli-runtime v0.20.1 31 | k8s.io/client-go => k8s.io/client-go v0.20.1 32 | k8s.io/cloud-provider => k8s.io/cloud-provider v0.20.1 33 | k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.20.1 34 | k8s.io/code-generator => k8s.io/code-generator v0.20.1 35 | k8s.io/component-base => k8s.io/component-base v0.20.1 36 | k8s.io/component-helpers => k8s.io/component-helpers v0.20.1 37 | k8s.io/controller-manager => k8s.io/controller-manager v0.20.1 38 | k8s.io/cri-api => k8s.io/cri-api v0.20.1 39 | k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.20.1 40 | k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.20.1 41 | k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.20.1 42 | k8s.io/kube-proxy => k8s.io/kube-proxy v0.20.1 43 | k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.20.1 44 | k8s.io/kubectl => k8s.io/kubectl v0.20.1 45 | k8s.io/kubelet => k8s.io/kubelet v0.20.1 46 | k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.20.1 47 | k8s.io/metrics => k8s.io/metrics v0.20.1 48 | k8s.io/mount-utils => k8s.io/mount-utils v0.20.1 49 | k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.20.1 50 | ) 51 | -------------------------------------------------------------------------------- /pkg/health/testdata/pod-pending.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | creationTimestamp: 2018-12-02T10:16:04Z 5 | name: image-pull-backoff 6 | namespace: argocd 7 | resourceVersion: "155333" 8 | selfLink: /api/v1/namespaces/argocd/pods/image-pull-backoff 9 | uid: 46c1e8de-f61b-11e8-a057-fe5f49266390 10 | spec: 11 | containers: 12 | - image: doesnt-exist 13 | imagePullPolicy: Always 14 | name: main 15 | resources: {} 16 | terminationMessagePath: /dev/termination-log 17 | terminationMessagePolicy: File 18 | volumeMounts: 19 | - mountPath: /var/run/secrets/kubernetes.io/serviceaccount 20 | name: default-token-f9jvj 21 | readOnly: true 22 | dnsPolicy: ClusterFirst 23 | nodeName: minikube 24 | restartPolicy: Always 25 | schedulerName: default-scheduler 26 | securityContext: {} 27 | serviceAccount: default 28 | serviceAccountName: default 29 | terminationGracePeriodSeconds: 30 30 | tolerations: 31 | - effect: NoExecute 32 | key: node.kubernetes.io/not-ready 33 | operator: Exists 34 | tolerationSeconds: 300 35 | - effect: NoExecute 36 | key: node.kubernetes.io/unreachable 37 | operator: Exists 38 | tolerationSeconds: 300 39 | volumes: 40 | - name: default-token-f9jvj 41 | secret: 42 | defaultMode: 420 43 | secretName: default-token-f9jvj 44 | status: 45 | conditions: 46 | - lastProbeTime: null 47 | lastTransitionTime: 2018-12-02T10:16:04Z 48 | status: "True" 49 | type: Initialized 50 | - lastProbeTime: null 51 | lastTransitionTime: 2018-12-02T10:16:04Z 52 | message: 'containers with unready status: [main]' 53 | reason: ContainersNotReady 54 | status: "False" 55 | type: Ready 56 | - lastProbeTime: null 57 | lastTransitionTime: 2018-12-02T10:16:04Z 58 | status: "True" 59 | type: PodScheduled 60 | containerStatuses: 61 | - image: doesnt-exist 62 | imageID: "" 63 | lastState: {} 64 | name: main 65 | ready: false 66 | restartCount: 0 67 | state: 68 | waiting: 69 | reason: PodInitializing 70 | hostIP: 192.168.64.41 71 | phase: Pending 72 | podIP: 172.17.0.9 73 | qosClass: BestEffort 74 | startTime: 2018-12-02T10:16:04Z 75 | -------------------------------------------------------------------------------- /pkg/diff/testdata/sealedsecret-live.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "bitnami.com/v1alpha1", 3 | "kind": "SealedSecret", 4 | "metadata": { 5 | "annotations": { 6 | "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"bitnami.com/v1alpha1\",\"kind\":\"SealedSecret\",\"metadata\":{\"annotations\":{},\"creationTimestamp\":null,\"name\":\"mysecret\",\"namespace\":\"default\"},\"spec\":{\"encryptedData\":{\"foo\":\"AgCW7b6fmehUc6MmdTfbAEpOx9zcYuXZGKID6llLUqLnXiLHfeF8yPY6wpecv+/GETeu+neCHZbonSRGWsK723rqD2S9GYUT6WT1TJW1+dYEJQrCYg3AYw9Awn7Tli5854oGfvKH2cfu++Yjq7EW7SfEA3E+12uAYM+ennQeib4HNmjxJUf3dVAjvYBrpyl/zMflla5Mi1mkBSLgqzlUitEyCNx63lFCdRw03BZAMOYuMVVRFDP5iobQFBl/NV9f04eMV1B8aTVvGoU81eX2FDZ6b+/DQx4TVdGuZTgViPcThiih4ahB2/9Qyk5OZlubmQeM7qMn8uo53kZy/3sOhqikr5TNGz52BJCoRFhw3HYw4Qm0NTxC/lYnuxnkHq8eXj1KMYVR3YrxnLy3sMH07I3OGsczwabUFvGyOp8SOOOICJ1Np7DmQSW/6U3bHey3sILAJYrZkeU8hjPZ1OU+7Ydr9En5sr048xuvfNblQCfhNGfAePNI0gD/Y6+ubmHM6s/vK4c3kDDVV2nY9suE84T6Zoxd0zEZXftay6+GaFz4lb3qKdFTxTMegzU5RxGxFm0bF75Y7EIWjulhNnC2PzQ6EK6sH5R6HwfZ5pbE3MUMYF4Ww3v3oo3z15EJ7l86//bIiKCQcuM7tFNsJYlubRAEhpcZFunUSKW8eqodjxstYnCwqNxHFKr8aybtsUouC93ZZ3M=\"}}}\n" 7 | }, 8 | "creationTimestamp": "2019-01-15T16:37:05Z", 9 | "generation": 1, 10 | "name": "mysecret", 11 | "namespace": "default", 12 | "resourceVersion": "102734", 13 | "selfLink": "/apis/bitnami.com/v1alpha1/namespaces/default/sealedsecrets/mysecret", 14 | "uid": "cb3c8363-18e3-11e9-afc1-a6a55e696d25" 15 | }, 16 | "spec": { 17 | "encryptedData": { 18 | "foo": "AgCW7b6fmehUc6MmdTfbAEpOx9zcYuXZGKID6llLUqLnXiLHfeF8yPY6wpecv+/GETeu+neCHZbonSRGWsK723rqD2S9GYUT6WT1TJW1+dYEJQrCYg3AYw9Awn7Tli5854oGfvKH2cfu++Yjq7EW7SfEA3E+12uAYM+ennQeib4HNmjxJUf3dVAjvYBrpyl/zMflla5Mi1mkBSLgqzlUitEyCNx63lFCdRw03BZAMOYuMVVRFDP5iobQFBl/NV9f04eMV1B8aTVvGoU81eX2FDZ6b+/DQx4TVdGuZTgViPcThiih4ahB2/9Qyk5OZlubmQeM7qMn8uo53kZy/3sOhqikr5TNGz52BJCoRFhw3HYw4Qm0NTxC/lYnuxnkHq8eXj1KMYVR3YrxnLy3sMH07I3OGsczwabUFvGyOp8SOOOICJ1Np7DmQSW/6U3bHey3sILAJYrZkeU8hjPZ1OU+7Ydr9En5sr048xuvfNblQCfhNGfAePNI0gD/Y6+ubmHM6s/vK4c3kDDVV2nY9suE84T6Zoxd0zEZXftay6+GaFz4lb3qKdFTxTMegzU5RxGxFm0bF75Y7EIWjulhNnC2PzQ6EK6sH5R6HwfZ5pbE3MUMYF4Ww3v3oo3z15EJ7l86//bIiKCQcuM7tFNsJYlubRAEhpcZFunUSKW8eqodjxstYnCwqNxHFKr8aybtsUouC93ZZ3M=" 19 | } 20 | } 21 | } -------------------------------------------------------------------------------- /pkg/cache/resource_test.go: -------------------------------------------------------------------------------- 1 | package cache 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | "k8s.io/client-go/rest" 8 | ) 9 | 10 | var c = NewClusterCache(&rest.Config{}) 11 | 12 | func TestIsParentOf(t *testing.T) { 13 | child := c.newResource(mustToUnstructured(testPod())) 14 | parent := c.newResource(mustToUnstructured(testRS())) 15 | grandParent := c.newResource(mustToUnstructured(testDeploy())) 16 | 17 | assert.True(t, parent.isParentOf(child)) 18 | assert.False(t, grandParent.isParentOf(child)) 19 | } 20 | 21 | func TestIsParentOfSameKindDifferentGroupAndUID(t *testing.T) { 22 | rs := testRS() 23 | rs.APIVersion = "somecrd.io/v1" 24 | rs.SetUID("123") 25 | child := c.newResource(mustToUnstructured(testPod())) 26 | invalidParent := c.newResource(mustToUnstructured(rs)) 27 | 28 | assert.False(t, invalidParent.isParentOf(child)) 29 | } 30 | 31 | func TestIsServiceParentOfEndPointWithTheSameName(t *testing.T) { 32 | nonMatchingNameEndPoint := c.newResource(strToUnstructured(` 33 | apiVersion: v1 34 | kind: Endpoints 35 | metadata: 36 | name: not-matching-name 37 | namespace: default 38 | `)) 39 | 40 | matchingNameEndPoint := c.newResource(strToUnstructured(` 41 | apiVersion: v1 42 | kind: Endpoints 43 | metadata: 44 | name: helm-guestbook 45 | namespace: default 46 | `)) 47 | 48 | parent := c.newResource(testService) 49 | 50 | assert.True(t, parent.isParentOf(matchingNameEndPoint)) 51 | assert.Equal(t, parent.Ref.UID, matchingNameEndPoint.OwnerRefs[0].UID) 52 | assert.False(t, parent.isParentOf(nonMatchingNameEndPoint)) 53 | } 54 | 55 | func TestIsServiceAccoountParentOfSecret(t *testing.T) { 56 | serviceAccount := c.newResource(strToUnstructured(` 57 | apiVersion: v1 58 | kind: ServiceAccount 59 | metadata: 60 | name: default 61 | namespace: default 62 | uid: '123' 63 | secrets: 64 | - name: default-token-123 65 | `)) 66 | tokenSecret := c.newResource(strToUnstructured(` 67 | apiVersion: v1 68 | kind: Secret 69 | metadata: 70 | annotations: 71 | kubernetes.io/service-account.name: default 72 | kubernetes.io/service-account.uid: '123' 73 | name: default-token-123 74 | namespace: default 75 | uid: '345' 76 | type: kubernetes.io/service-account-token 77 | `)) 78 | 79 | assert.True(t, serviceAccount.isParentOf(tokenSecret)) 80 | } 81 | -------------------------------------------------------------------------------- /pkg/utils/testing/testdata.go: -------------------------------------------------------------------------------- 1 | package testing 2 | 3 | import ( 4 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 5 | ) 6 | 7 | const ( 8 | FakeArgoCDNamespace = "fake-argocd-ns" 9 | ) 10 | 11 | func HelmHook(obj *unstructured.Unstructured, hookType string) *unstructured.Unstructured { 12 | return Annotate(obj, "helm.sh/hook", hookType) 13 | } 14 | 15 | func Annotate(obj *unstructured.Unstructured, key, val string) *unstructured.Unstructured { 16 | annotations := obj.GetAnnotations() 17 | if annotations == nil { 18 | annotations = map[string]string{} 19 | } 20 | annotations[key] = val 21 | obj.SetAnnotations(annotations) 22 | return obj 23 | } 24 | 25 | var PodManifest = ` 26 | { 27 | "apiVersion": "v1", 28 | "kind": "Pod", 29 | "metadata": { 30 | "name": "my-pod" 31 | }, 32 | "spec": { 33 | "containers": [ 34 | { 35 | "image": "nginx:1.7.9", 36 | "name": "nginx", 37 | "resources": { 38 | "requests": { 39 | "cpu": 0.2 40 | } 41 | } 42 | } 43 | ] 44 | } 45 | } 46 | ` 47 | 48 | func NewPod() *unstructured.Unstructured { 49 | return Unstructured(PodManifest) 50 | } 51 | 52 | var ServiceManifest = ` 53 | { 54 | "apiVersion": "v1", 55 | "kind": "Service", 56 | "metadata": { 57 | "name": "my-service" 58 | }, 59 | "spec": { 60 | "ports": [ 61 | { 62 | "name": "http", 63 | "protocol": "TCP", 64 | "port": 80, 65 | "targetPort": 8080 66 | } 67 | ], 68 | "selector": { 69 | "app": "my-service" 70 | } 71 | } 72 | } 73 | ` 74 | 75 | func NewService() *unstructured.Unstructured { 76 | return Unstructured(ServiceManifest) 77 | } 78 | 79 | func NewCRD() *unstructured.Unstructured { 80 | return Unstructured(`apiVersion: apiextensions.k8s.io/v1beta1 81 | kind: CustomResourceDefinition 82 | metadata: 83 | name: testcrds.argoproj.io 84 | spec: 85 | group: argoproj.io 86 | version: v1 87 | scope: Namespaced 88 | names: 89 | plural: testcrds 90 | kind: TestCrd`) 91 | } 92 | 93 | func NewNamespace() *unstructured.Unstructured { 94 | return Unstructured(`apiVersion: v1 95 | kind: Namespace 96 | metadata: 97 | name: testnamespace 98 | spec:`) 99 | } 100 | -------------------------------------------------------------------------------- /pkg/health/testdata/pod-deletion.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | creationTimestamp: 2018-12-02T10:16:04Z 5 | name: image-pull-backoff 6 | namespace: argocd 7 | resourceVersion: "155333" 8 | selfLink: /api/v1/namespaces/argocd/pods/image-pull-backoff 9 | uid: 46c1e8de-f61b-11e8-a057-fe5f49266390 10 | deletionTimestamp: 2018-12-03T10:16:04Z 11 | spec: 12 | containers: 13 | - image: doesnt-exist 14 | imagePullPolicy: Always 15 | name: main 16 | resources: {} 17 | terminationMessagePath: /dev/termination-log 18 | terminationMessagePolicy: File 19 | volumeMounts: 20 | - mountPath: /var/run/secrets/kubernetes.io/serviceaccount 21 | name: default-token-f9jvj 22 | readOnly: true 23 | dnsPolicy: ClusterFirst 24 | nodeName: minikube 25 | restartPolicy: Always 26 | schedulerName: default-scheduler 27 | securityContext: {} 28 | serviceAccount: default 29 | serviceAccountName: default 30 | terminationGracePeriodSeconds: 30 31 | tolerations: 32 | - effect: NoExecute 33 | key: node.kubernetes.io/not-ready 34 | operator: Exists 35 | tolerationSeconds: 300 36 | - effect: NoExecute 37 | key: node.kubernetes.io/unreachable 38 | operator: Exists 39 | tolerationSeconds: 300 40 | volumes: 41 | - name: default-token-f9jvj 42 | secret: 43 | defaultMode: 420 44 | secretName: default-token-f9jvj 45 | status: 46 | conditions: 47 | - lastProbeTime: null 48 | lastTransitionTime: 2018-12-02T10:16:04Z 49 | status: "True" 50 | type: Initialized 51 | - lastProbeTime: null 52 | lastTransitionTime: 2018-12-02T10:16:04Z 53 | message: 'containers with unready status: [main]' 54 | reason: ContainersNotReady 55 | status: "False" 56 | type: Ready 57 | - lastProbeTime: null 58 | lastTransitionTime: 2018-12-02T10:16:04Z 59 | status: "True" 60 | type: PodScheduled 61 | containerStatuses: 62 | - image: doesnt-exist 63 | imageID: "" 64 | lastState: {} 65 | name: main 66 | ready: false 67 | restartCount: 0 68 | state: 69 | waiting: 70 | reason: PodInitializing 71 | hostIP: 192.168.64.41 72 | phase: Pending 73 | podIP: 172.17.0.9 74 | qosClass: BestEffort 75 | startTime: 2018-12-02T10:16:04Z 76 | -------------------------------------------------------------------------------- /pkg/health/testdata/pod-running-restart-never.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | creationTimestamp: 2018-12-02T09:15:16Z 5 | name: my-pod 6 | namespace: argocd 7 | resourceVersion: "151053" 8 | selfLink: /api/v1/namespaces/argocd/pods/my-pod 9 | uid: c86e909c-f612-11e8-a057-fe5f49266390 10 | spec: 11 | containers: 12 | - command: 13 | - sh 14 | - -c 15 | - sleep 10 16 | image: alpine:latest 17 | imagePullPolicy: Always 18 | name: main 19 | resources: {} 20 | terminationMessagePath: /dev/termination-log 21 | terminationMessagePolicy: File 22 | volumeMounts: 23 | - mountPath: /var/run/secrets/kubernetes.io/serviceaccount 24 | name: default-token-f9jvj 25 | readOnly: true 26 | dnsPolicy: ClusterFirst 27 | nodeName: minikube 28 | restartPolicy: Never 29 | schedulerName: default-scheduler 30 | securityContext: {} 31 | serviceAccount: default 32 | serviceAccountName: default 33 | terminationGracePeriodSeconds: 30 34 | tolerations: 35 | - effect: NoExecute 36 | key: node.kubernetes.io/not-ready 37 | operator: Exists 38 | tolerationSeconds: 300 39 | - effect: NoExecute 40 | key: node.kubernetes.io/unreachable 41 | operator: Exists 42 | tolerationSeconds: 300 43 | volumes: 44 | - name: default-token-f9jvj 45 | secret: 46 | defaultMode: 420 47 | secretName: default-token-f9jvj 48 | status: 49 | conditions: 50 | - lastProbeTime: null 51 | lastTransitionTime: 2018-12-02T09:15:16Z 52 | status: "True" 53 | type: Initialized 54 | - lastProbeTime: null 55 | lastTransitionTime: 2018-12-02T09:15:19Z 56 | status: "True" 57 | type: Ready 58 | - lastProbeTime: null 59 | lastTransitionTime: 2018-12-02T09:15:16Z 60 | status: "True" 61 | type: PodScheduled 62 | containerStatuses: 63 | - containerID: docker://acfb261d6c1fe8c543438a202de62cb06c137fa93a2d59262d764470e96f3195 64 | image: alpine:latest 65 | imageID: docker-pullable://alpine@sha256:621c2f39f8133acb8e64023a94dbdf0d5ca81896102b9e57c0dc184cadaf5528 66 | lastState: {} 67 | name: main 68 | ready: true 69 | restartCount: 0 70 | state: 71 | running: 72 | startedAt: 2018-12-02T09:15:19Z 73 | hostIP: 192.168.64.41 74 | phase: Running 75 | podIP: 172.17.0.9 76 | qosClass: BestEffort 77 | startTime: 2018-12-02T09:15:16Z 78 | -------------------------------------------------------------------------------- /pkg/health/testdata/pod-running-restart-always.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | creationTimestamp: 2018-12-02T09:24:46Z 5 | name: my-pod 6 | namespace: argocd 7 | resourceVersion: "151753" 8 | selfLink: /api/v1/namespaces/argocd/pods/my-pod 9 | uid: 1c3943ee-f614-11e8-a057-fe5f49266390 10 | spec: 11 | containers: 12 | - command: 13 | - sh 14 | - -c 15 | - sleep 99999 16 | image: alpine:latest 17 | imagePullPolicy: Always 18 | name: main 19 | resources: {} 20 | terminationMessagePath: /dev/termination-log 21 | terminationMessagePolicy: File 22 | volumeMounts: 23 | - mountPath: /var/run/secrets/kubernetes.io/serviceaccount 24 | name: default-token-f9jvj 25 | readOnly: true 26 | dnsPolicy: ClusterFirst 27 | nodeName: minikube 28 | restartPolicy: Always 29 | schedulerName: default-scheduler 30 | securityContext: {} 31 | serviceAccount: default 32 | serviceAccountName: default 33 | terminationGracePeriodSeconds: 30 34 | tolerations: 35 | - effect: NoExecute 36 | key: node.kubernetes.io/not-ready 37 | operator: Exists 38 | tolerationSeconds: 300 39 | - effect: NoExecute 40 | key: node.kubernetes.io/unreachable 41 | operator: Exists 42 | tolerationSeconds: 300 43 | volumes: 44 | - name: default-token-f9jvj 45 | secret: 46 | defaultMode: 420 47 | secretName: default-token-f9jvj 48 | status: 49 | conditions: 50 | - lastProbeTime: null 51 | lastTransitionTime: 2018-12-02T09:24:46Z 52 | status: "True" 53 | type: Initialized 54 | - lastProbeTime: null 55 | lastTransitionTime: 2018-12-02T09:24:50Z 56 | status: "True" 57 | type: Ready 58 | - lastProbeTime: null 59 | lastTransitionTime: 2018-12-02T09:24:46Z 60 | status: "True" 61 | type: PodScheduled 62 | containerStatuses: 63 | - containerID: docker://be00d86c48878b352f0ae0cae5dd4ba78025726a62893c768a0fd5754f45e93a 64 | image: alpine:latest 65 | imageID: docker-pullable://alpine@sha256:621c2f39f8133acb8e64023a94dbdf0d5ca81896102b9e57c0dc184cadaf5528 66 | lastState: {} 67 | name: main 68 | ready: true 69 | restartCount: 0 70 | state: 71 | running: 72 | startedAt: 2018-12-02T09:24:49Z 73 | hostIP: 192.168.64.41 74 | phase: Running 75 | podIP: 172.17.0.9 76 | qosClass: BestEffort 77 | startTime: 2018-12-02T09:24:46Z 78 | -------------------------------------------------------------------------------- /agent/README.md: -------------------------------------------------------------------------------- 1 | # GitOps Agent 2 | 3 | The GitOps Agent leverages the GitOps Engine and provides access to many engine features via a simple CLI interface. 4 | The agent provides the same set of core features as Argo CD including basic reconciliation, syncing as well as sync hooks and sync waves. 5 | 6 | The main difference is that the agent is syncing one Git repository into the same cluster where it is installed. 7 | 8 | ## Quick Start 9 | 10 | By default the agent is configured to use manifests from [guestbook](https://github.com/argoproj/argocd-example-apps/tree/master/guestbook) 11 | directory in https://github.com/argoproj/argocd-example-apps repository. 12 | 13 | The agent supports two modes: 14 | 15 | * namespaced mode - agent manages the same namespace where it is installed 16 | * full cluster mode - agent manages the whole cluster 17 | 18 | ### Namespaced Mode 19 | 20 | Install the agent with the default settings using the command below. Done! 21 | 22 | ```bash 23 | kubectl apply -f https://raw.githubusercontent.com/argoproj/gitops-engine/master/agent/manifests/install-namespaced.yaml 24 | kubectl rollout status deploy/gitops-agent 25 | ``` 26 | 27 | The the agent logs: 28 | 29 | ```bash 30 | kubectl logs -f deploy/gitops-agent gitops-agent 31 | ``` 32 | 33 | Find the guestbook deployment in the current K8S namespace: 34 | 35 | ```bash 36 | kubectl get deployment 37 | ``` 38 | 39 | ### Cluster Mode 40 | 41 | The cluster mode grants full cluster access to the GitOps Agent. Use the following command to install an agent into the 42 | `gitops-agent` namespace and use it to manage resources in any cluster namespace. 43 | 44 | > Note. In cluster mode agents gets **full** cluster access. 45 | > See [gitops-agent-cluster-role.yaml](./manifests/cluster-install/gitops-agent-cluster-role.yaml) definition for more information. 46 | 47 | ```bash 48 | kubectl create ns gitops-agent 49 | kubectl apply -f https://raw.githubusercontent.com/argoproj/gitops-engine/master/agent/manifests/install.yaml -n gitops-agent 50 | ``` 51 | 52 | ### Customize Git Repository 53 | 54 | The agent runs [git-sync](https://github.com/kubernetes/git-sync) as a sidecar container to access the repository. 55 | Update the container env [variables](https://github.com/kubernetes/git-sync#parameters) to change the repository. 56 | 57 | ### Demo Recording 58 | 59 | [![asciicast](https://asciinema.org/a/FWbvVAiSsiI87wQx2TJbRMlxN.svg)](https://asciinema.org/a/FWbvVAiSsiI87wQx2TJbRMlxN) -------------------------------------------------------------------------------- /pkg/diff/testdata/endpoints-live.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "v1", 3 | "kind": "Endpoints", 4 | "metadata": { 5 | "annotations": { 6 | "description": "A workaround to support a set of backend IPs for solr", 7 | "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"kind\":\"Endpoints\",\"metadata\":{\"annotations\":{\"description\":\"A workaround to support a set of backend IPs for solr\",\"linkerd.io/inject\":\"disabled\"},\"labels\":{\"app.kubernetes.io/instance\":\"guestbook\"},\"name\":\"solrcloud\",\"namespace\":\"default\"},\"subsets\":[{\"addresses\":[{\"ip\":\"172.20.10.97\"},{\"ip\":\"172.20.10.98\"},{\"ip\":\"172.20.10.99\"},{\"ip\":\"172.20.10.100\"},{\"ip\":\"172.20.10.101\"}],\"ports\":[{\"name\":\"solr-http\",\"port\":8080}]}]}\n", 8 | "linkerd.io/inject": "disabled" 9 | }, 10 | "creationTimestamp": null, 11 | "labels": { 12 | "app.kubernetes.io/instance": "guestbook" 13 | }, 14 | "managedFields": [ 15 | { 16 | "apiVersion": "v1", 17 | "fieldsType": "FieldsV1", 18 | "fieldsV1": { 19 | "f:metadata": { 20 | "f:annotations": { 21 | ".": {}, 22 | "f:description": {}, 23 | "f:kubectl.kubernetes.io/last-applied-configuration": {}, 24 | "f:linkerd.io/inject": {} 25 | }, 26 | "f:labels": { 27 | ".": {}, 28 | "f:app.kubernetes.io/instance": {} 29 | } 30 | }, 31 | "f:subsets": {} 32 | }, 33 | "manager": "main", 34 | "operation": "Update", 35 | "time": "2020-10-09T17:26:49Z" 36 | } 37 | ], 38 | "name": "solrcloud", 39 | "namespace": "default", 40 | "resourceVersion": "139834", 41 | "selfLink": "/api/v1/namespaces/default/endpoints/solrcloud", 42 | "uid": "f11285f4-987b-4194-bda8-6372b3f3f08f" 43 | }, 44 | "subsets": [ 45 | { 46 | "addresses": [ 47 | { 48 | "ip": "172.20.10.100" 49 | }, 50 | { 51 | "ip": "172.20.10.101" 52 | }, 53 | { 54 | "ip": "172.20.10.97" 55 | }, 56 | { 57 | "ip": "172.20.10.98" 58 | }, 59 | { 60 | "ip": "172.20.10.99" 61 | } 62 | ], 63 | "ports": [ 64 | { 65 | "name": "solr-http", 66 | "port": 8080, 67 | "protocol": "TCP" 68 | } 69 | ] 70 | } 71 | ] 72 | } -------------------------------------------------------------------------------- /pkg/utils/kube/convert_test.go: -------------------------------------------------------------------------------- 1 | package kube 2 | 3 | import ( 4 | "testing" 5 | 6 | testingutils "github.com/argoproj/gitops-engine/pkg/utils/testing" 7 | 8 | "github.com/stretchr/testify/assert" 9 | "k8s.io/apimachinery/pkg/runtime/schema" 10 | "sigs.k8s.io/yaml" 11 | ) 12 | 13 | type testcase struct { 14 | name string 15 | file string 16 | outputVersion string 17 | fields []checkField 18 | } 19 | 20 | type checkField struct { 21 | expected string 22 | } 23 | 24 | func Test_convertToVersionWithScheme(t *testing.T) { 25 | for _, tt := range []testcase{ 26 | { 27 | name: "apps deployment to extensions deployment", 28 | file: "appsdeployment.yaml", 29 | outputVersion: "extensions/v1beta1", 30 | fields: []checkField{ 31 | { 32 | expected: "apiVersion: extensions/v1beta1", 33 | }, 34 | }, 35 | }, 36 | { 37 | name: "extensions deployment to apps deployment", 38 | file: "extensionsdeployment.yaml", 39 | outputVersion: "apps/v1beta2", 40 | fields: []checkField{ 41 | { 42 | expected: "apiVersion: apps/v1beta2", 43 | }, 44 | }, 45 | }, 46 | { 47 | name: "v1 HPA to v2beta1 HPA", 48 | file: "v1HPA.yaml", 49 | outputVersion: "autoscaling/v2beta1", 50 | fields: []checkField{ 51 | { 52 | expected: "apiVersion: autoscaling/v2beta1", 53 | }, 54 | { 55 | expected: "name: cpu", 56 | }, 57 | { 58 | expected: "targetAverageUtilization: 50", 59 | }, 60 | }, 61 | }, 62 | { 63 | name: "v2beta1 HPA to v1 HPA", 64 | file: "v2beta1HPA.yaml", 65 | outputVersion: "autoscaling/v1", 66 | fields: []checkField{ 67 | { 68 | expected: "apiVersion: autoscaling/v1", 69 | }, 70 | { 71 | expected: "targetCPUUtilizationPercentage: 50", 72 | }, 73 | }, 74 | }, 75 | } { 76 | t.Run(tt.name, func(t *testing.T) { 77 | obj := testingutils.UnstructuredFromFile("testdata/" + tt.file) 78 | target, err := schema.ParseGroupVersion(tt.outputVersion) 79 | assert.NoError(t, err) 80 | out, err := convertToVersionWithScheme(obj, target.Group, target.Version) 81 | if assert.NoError(t, err) { 82 | assert.NotNil(t, out) 83 | assert.Equal(t, target.Group, out.GroupVersionKind().Group) 84 | assert.Equal(t, target.Version, out.GroupVersionKind().Version) 85 | bytes, err := yaml.Marshal(out) 86 | assert.NoError(t, err) 87 | for _, field := range tt.fields { 88 | assert.Contains(t, string(bytes), field.expected) 89 | } 90 | } 91 | }) 92 | } 93 | } 94 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # GitOps Engine 2 |
3 | 4 | ![image](https://user-images.githubusercontent.com/426437/82109570-f6c7ed80-96eb-11ea-849c-2bd5fe89b571.png) 5 | 6 |
7 | 8 | Various GitOps operators address different use-cases and provide different user experiences but all have similar set of core features. The team behind 9 | [Argo CD](https://github.com/argoproj/argo-cd) has implemented a reusable library that implements core GitOps features: 10 | 11 | - Kubernetes resource cache ✅ 12 | - Resources reconciliation ✅ 13 | - Sync Planning ✅ 14 | - Access to Git repositories 15 | - Manifest Generation 16 | 17 | ## Proposals, specifications and ideas 18 | 19 | Do you want to propose one more feature and want to enhance the existing one? 20 | Proposals and ideas are in markdown docs in the [`specs/`](specs/) directory. 21 | To create a new proposal, simply copy the spec [`template`](specs/template.md), 22 | name the file corresponding to the title of your proposal, and place it in the 23 | `specs/` directory. 24 | 25 | A good starting point to understand the structure is the [GitOps Engine Design spec](specs/design.md). 26 | 27 | We tried to answer frequently asked question in a [separate FAQ document](docs/faq.md). 28 | 29 | ## Governance 30 | 31 | This project is licensed under the [Apache 2 license](LICENSE). 32 | 33 | The GitOps Engine follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). 34 | 35 | ## Get involved 36 | 37 | If you are as excited about GitOps and one common engine for it as much as we are, please get in touch. If you want to write code that's great, if you want to share feedback, ideas and use-cases, that's great too. 38 | 39 | Find us on the [#gitops channel][gitops-slack] on Kubernetes Slack (get an [invite here][kube-slack]). 40 | 41 | [gitops-slack]: https://kubernetes.slack.com/archives/CBT6N1ASG 42 | [kube-slack]: https://slack.k8s.io/ 43 | 44 | ### Meetings 45 | 46 | The developer team meets regularly, every 1st and 3rd Tuesday of the month, [16:00 UTC](http://time.unitarium.com/utc/16). Instructions, agenda and minutes can be found in [the meeting doc](https://docs.google.com/document/d/17AEZgv6yVuD4HS7_oNPiMKmS7Q6vjkhk6jH0YCELpRk/edit#). The meetings will be recorded and added to this [Youtube playlist](https://www.youtube.com/playlist?list=PLbx4FZ4kOKnvSQP394o5UdF9wL7FaQd-R). 47 | 48 | We look forward to seeing you at our meetings and hearing about your feedback and ideas there! 49 | 50 | ### Contributing to the effort 51 | 52 | At this stage we are interested in feedback, use-cases and help on the GitOps Engine. 53 | -------------------------------------------------------------------------------- /pkg/health/testdata/pod-succeeded.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | creationTimestamp: 2018-12-02T09:15:16Z 5 | name: my-pod 6 | namespace: argocd 7 | resourceVersion: "151066" 8 | selfLink: /api/v1/namespaces/argocd/pods/my-pod 9 | uid: c86e909c-f612-11e8-a057-fe5f49266390 10 | spec: 11 | containers: 12 | - command: 13 | - sh 14 | - -c 15 | - sleep 10 16 | image: alpine:latest 17 | imagePullPolicy: Always 18 | name: main 19 | resources: {} 20 | terminationMessagePath: /dev/termination-log 21 | terminationMessagePolicy: File 22 | volumeMounts: 23 | - mountPath: /var/run/secrets/kubernetes.io/serviceaccount 24 | name: default-token-f9jvj 25 | readOnly: true 26 | dnsPolicy: ClusterFirst 27 | nodeName: minikube 28 | restartPolicy: Never 29 | schedulerName: default-scheduler 30 | securityContext: {} 31 | serviceAccount: default 32 | serviceAccountName: default 33 | terminationGracePeriodSeconds: 30 34 | tolerations: 35 | - effect: NoExecute 36 | key: node.kubernetes.io/not-ready 37 | operator: Exists 38 | tolerationSeconds: 300 39 | - effect: NoExecute 40 | key: node.kubernetes.io/unreachable 41 | operator: Exists 42 | tolerationSeconds: 300 43 | volumes: 44 | - name: default-token-f9jvj 45 | secret: 46 | defaultMode: 420 47 | secretName: default-token-f9jvj 48 | status: 49 | conditions: 50 | - lastProbeTime: null 51 | lastTransitionTime: 2018-12-02T09:15:16Z 52 | reason: PodCompleted 53 | status: "True" 54 | type: Initialized 55 | - lastProbeTime: null 56 | lastTransitionTime: 2018-12-02T09:15:30Z 57 | reason: PodCompleted 58 | status: "False" 59 | type: Ready 60 | - lastProbeTime: null 61 | lastTransitionTime: 2018-12-02T09:15:16Z 62 | status: "True" 63 | type: PodScheduled 64 | containerStatuses: 65 | - containerID: docker://acfb261d6c1fe8c543438a202de62cb06c137fa93a2d59262d764470e96f3195 66 | image: alpine:latest 67 | imageID: docker-pullable://alpine@sha256:621c2f39f8133acb8e64023a94dbdf0d5ca81896102b9e57c0dc184cadaf5528 68 | lastState: {} 69 | name: main 70 | ready: false 71 | restartCount: 0 72 | state: 73 | terminated: 74 | containerID: docker://acfb261d6c1fe8c543438a202de62cb06c137fa93a2d59262d764470e96f3195 75 | exitCode: 0 76 | finishedAt: 2018-12-02T09:15:29Z 77 | reason: Completed 78 | startedAt: 2018-12-02T09:15:19Z 79 | hostIP: 192.168.64.41 80 | phase: Succeeded 81 | podIP: 172.17.0.9 82 | qosClass: BestEffort 83 | startTime: 2018-12-02T09:15:16Z 84 | -------------------------------------------------------------------------------- /pkg/health/testdata/pod-failed.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | creationTimestamp: 2018-12-02T09:17:56Z 5 | name: my-pod 6 | namespace: argocd 7 | resourceVersion: "151243" 8 | selfLink: /api/v1/namespaces/argocd/pods/my-pod 9 | uid: 27c0fdf5-f613-11e8-a057-fe5f49266390 10 | spec: 11 | containers: 12 | - command: 13 | - sh 14 | - -c 15 | - exit 1 16 | image: alpine:latest 17 | imagePullPolicy: Always 18 | name: main 19 | resources: {} 20 | terminationMessagePath: /dev/termination-log 21 | terminationMessagePolicy: File 22 | volumeMounts: 23 | - mountPath: /var/run/secrets/kubernetes.io/serviceaccount 24 | name: default-token-f9jvj 25 | readOnly: true 26 | dnsPolicy: ClusterFirst 27 | nodeName: minikube 28 | restartPolicy: Never 29 | schedulerName: default-scheduler 30 | securityContext: {} 31 | serviceAccount: default 32 | serviceAccountName: default 33 | terminationGracePeriodSeconds: 30 34 | tolerations: 35 | - effect: NoExecute 36 | key: node.kubernetes.io/not-ready 37 | operator: Exists 38 | tolerationSeconds: 300 39 | - effect: NoExecute 40 | key: node.kubernetes.io/unreachable 41 | operator: Exists 42 | tolerationSeconds: 300 43 | volumes: 44 | - name: default-token-f9jvj 45 | secret: 46 | defaultMode: 420 47 | secretName: default-token-f9jvj 48 | status: 49 | conditions: 50 | - lastProbeTime: null 51 | lastTransitionTime: 2018-12-02T09:17:56Z 52 | status: "True" 53 | type: Initialized 54 | - lastProbeTime: null 55 | lastTransitionTime: 2018-12-02T09:17:56Z 56 | message: 'containers with unready status: [main]' 57 | reason: ContainersNotReady 58 | status: "False" 59 | type: Ready 60 | - lastProbeTime: null 61 | lastTransitionTime: 2018-12-02T09:17:56Z 62 | status: "True" 63 | type: PodScheduled 64 | containerStatuses: 65 | - containerID: docker://e5f5ce03ecf6d5ad7e131aac09053e5ef6212b067b3d10f3e39ef6e176a3ed60 66 | image: alpine:latest 67 | imageID: docker-pullable://alpine@sha256:621c2f39f8133acb8e64023a94dbdf0d5ca81896102b9e57c0dc184cadaf5528 68 | lastState: {} 69 | name: main 70 | ready: false 71 | restartCount: 0 72 | state: 73 | terminated: 74 | containerID: docker://e5f5ce03ecf6d5ad7e131aac09053e5ef6212b067b3d10f3e39ef6e176a3ed60 75 | exitCode: 1 76 | finishedAt: 2018-12-02T09:17:59Z 77 | reason: Error 78 | startedAt: 2018-12-02T09:17:59Z 79 | hostIP: 192.168.64.41 80 | phase: Failed 81 | podIP: 172.17.0.9 82 | qosClass: BestEffort 83 | startTime: 2018-12-02T09:17:56Z 84 | -------------------------------------------------------------------------------- /pkg/health/testdata/pod-running-not-ready.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | creationTimestamp: 2018-12-02T10:30:57Z 5 | name: never-ready 6 | namespace: argocd 7 | resourceVersion: "156420" 8 | selfLink: /api/v1/namespaces/argocd/pods/never-ready 9 | uid: 5aa62a14-f61d-11e8-a058-fe5f49266390 10 | spec: 11 | containers: 12 | - command: 13 | - sh 14 | - -c 15 | - sleep 9999 16 | image: alpine:latest 17 | imagePullPolicy: Always 18 | name: main 19 | readinessProbe: 20 | failureThreshold: 3 21 | initialDelaySeconds: 10 22 | periodSeconds: 10 23 | successThreshold: 1 24 | tcpSocket: 25 | port: 8080 26 | timeoutSeconds: 1 27 | resources: {} 28 | terminationMessagePath: /dev/termination-log 29 | terminationMessagePolicy: File 30 | volumeMounts: 31 | - mountPath: /var/run/secrets/kubernetes.io/serviceaccount 32 | name: default-token-f9jvj 33 | readOnly: true 34 | dnsPolicy: ClusterFirst 35 | nodeName: minikube 36 | restartPolicy: Always 37 | schedulerName: default-scheduler 38 | securityContext: {} 39 | serviceAccount: default 40 | serviceAccountName: default 41 | terminationGracePeriodSeconds: 30 42 | tolerations: 43 | - effect: NoExecute 44 | key: node.kubernetes.io/not-ready 45 | operator: Exists 46 | tolerationSeconds: 300 47 | - effect: NoExecute 48 | key: node.kubernetes.io/unreachable 49 | operator: Exists 50 | tolerationSeconds: 300 51 | volumes: 52 | - name: default-token-f9jvj 53 | secret: 54 | defaultMode: 420 55 | secretName: default-token-f9jvj 56 | status: 57 | conditions: 58 | - lastProbeTime: null 59 | lastTransitionTime: 2018-12-02T10:30:57Z 60 | status: "True" 61 | type: Initialized 62 | - lastProbeTime: null 63 | lastTransitionTime: 2018-12-02T10:30:57Z 64 | message: 'containers with unready status: [main]' 65 | reason: ContainersNotReady 66 | status: "False" 67 | type: Ready 68 | - lastProbeTime: null 69 | lastTransitionTime: 2018-12-02T10:30:57Z 70 | status: "True" 71 | type: PodScheduled 72 | containerStatuses: 73 | - containerID: docker://29bc9e85f48af23d5fdcd55fe347350245b584bc11d2b27ebce64d69f26d749a 74 | image: alpine:latest 75 | imageID: docker-pullable://alpine@sha256:621c2f39f8133acb8e64023a94dbdf0d5ca81896102b9e57c0dc184cadaf5528 76 | lastState: {} 77 | name: main 78 | ready: false 79 | restartCount: 0 80 | state: 81 | running: 82 | startedAt: 2018-12-02T10:30:59Z 83 | hostIP: 192.168.64.41 84 | phase: Running 85 | podIP: 172.17.0.9 86 | qosClass: BestEffort 87 | startTime: 2018-12-02T10:30:57Z 88 | -------------------------------------------------------------------------------- /pkg/health/health_ingress.go: -------------------------------------------------------------------------------- 1 | package health 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/argoproj/gitops-engine/pkg/utils/kube" 7 | extv1beta1 "k8s.io/api/extensions/v1beta1" 8 | networkingv1 "k8s.io/api/networking/v1" 9 | networkingv1beta1 "k8s.io/api/networking/v1beta1" 10 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 11 | "k8s.io/apimachinery/pkg/runtime" 12 | ) 13 | 14 | func getIngressHealth(obj *unstructured.Unstructured) (*HealthStatus, error) { 15 | gvk := obj.GroupVersionKind() 16 | switch gvk { 17 | case networkingv1.SchemeGroupVersion.WithKind(kube.IngressKind): 18 | var ingress networkingv1.Ingress 19 | err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &ingress) 20 | if err != nil { 21 | return nil, fmt.Errorf("failed to convert unstructured Ingress to typed: %v", err) 22 | } 23 | return getNetworkingv1IngressHealth(&ingress) 24 | case networkingv1beta1.SchemeGroupVersion.WithKind(kube.IngressKind): 25 | var ingress networkingv1beta1.Ingress 26 | err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &ingress) 27 | if err != nil { 28 | return nil, fmt.Errorf("failed to convert unstructured Ingress to typed: %v", err) 29 | } 30 | return getNetworkingv1beta1IngressHealth(&ingress) 31 | case extv1beta1.SchemeGroupVersion.WithKind(kube.IngressKind): 32 | var ingress extv1beta1.Ingress 33 | err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &ingress) 34 | if err != nil { 35 | return nil, fmt.Errorf("failed to convert unstructured Ingress to typed: %v", err) 36 | } 37 | return getExtv1beta1IngressHealth(&ingress) 38 | default: 39 | return nil, fmt.Errorf("unsupported Ingress GVK: %s", gvk) 40 | } 41 | } 42 | 43 | func getNetworkingv1IngressHealth(ingress *networkingv1.Ingress) (*HealthStatus, error) { 44 | health := HealthStatus{} 45 | if len(ingress.Status.LoadBalancer.Ingress) > 0 { 46 | health.Status = HealthStatusHealthy 47 | } else { 48 | health.Status = HealthStatusProgressing 49 | } 50 | return &health, nil 51 | } 52 | 53 | func getNetworkingv1beta1IngressHealth(ingress *networkingv1beta1.Ingress) (*HealthStatus, error) { 54 | health := HealthStatus{} 55 | if len(ingress.Status.LoadBalancer.Ingress) > 0 { 56 | health.Status = HealthStatusHealthy 57 | } else { 58 | health.Status = HealthStatusProgressing 59 | } 60 | return &health, nil 61 | } 62 | 63 | func getExtv1beta1IngressHealth(ingress *extv1beta1.Ingress) (*HealthStatus, error) { 64 | health := HealthStatus{} 65 | if len(ingress.Status.LoadBalancer.Ingress) > 0 { 66 | health.Status = HealthStatusHealthy 67 | } else { 68 | health.Status = HealthStatusProgressing 69 | } 70 | return &health, nil 71 | } 72 | -------------------------------------------------------------------------------- /pkg/health/testdata/deployment-progressing.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | annotations: 5 | deployment.kubernetes.io/revision: "4" 6 | kubectl.kubernetes.io/last-applied-configuration: | 7 | {"apiVersion":"apps/v1beta2","kind":"Deployment","metadata":{"annotations":{},"labels":{"app.kubernetes.io/instance":"guestbook-default"},"name":"guestbook-ui","namespace":"default"},"spec":{"replicas":1,"selector":{"matchLabels":{"app":"guestbook-ui"}},"template":{"metadata":{"labels":{"app":"guestbook-ui","app.kubernetes.io/instance":"guestbook-default"}},"spec":{"containers":[{"image":"gcr.io/heptio-images/ks-guestbook-demo:0.3","name":"guestbook-ui","ports":[{"containerPort":80}]}]}}}} 8 | creationTimestamp: 2018-07-18T04:40:44Z 9 | generation: 4 10 | labels: 11 | app.kubernetes.io/instance: guestbook-default 12 | name: guestbook-ui 13 | namespace: default 14 | resourceVersion: "12819" 15 | selfLink: /apis/extensions/v1beta1/namespaces/default/deployments/guestbook-ui 16 | uid: bb9af0c7-8a44-11e8-9e23-42010aa80010 17 | spec: 18 | progressDeadlineSeconds: 600 19 | replicas: 1 20 | revisionHistoryLimit: 10 21 | selector: 22 | matchLabels: 23 | app: guestbook-ui 24 | strategy: 25 | rollingUpdate: 26 | maxSurge: 25% 27 | maxUnavailable: 25% 28 | type: RollingUpdate 29 | template: 30 | metadata: 31 | creationTimestamp: null 32 | labels: 33 | app: guestbook-ui 34 | app.kubernetes.io/instance: guestbook-default 35 | spec: 36 | containers: 37 | - image: gcr.io/heptio-images/ks-guestbook-demo:0.3 38 | imagePullPolicy: IfNotPresent 39 | name: guestbook-ui 40 | ports: 41 | - containerPort: 80 42 | protocol: TCP 43 | resources: {} 44 | terminationMessagePath: /dev/termination-log 45 | terminationMessagePolicy: File 46 | dnsPolicy: ClusterFirst 47 | restartPolicy: Always 48 | schedulerName: default-scheduler 49 | securityContext: {} 50 | terminationGracePeriodSeconds: 30 51 | status: 52 | availableReplicas: 1 53 | conditions: 54 | - lastTransitionTime: 2018-07-18T04:48:48Z 55 | lastUpdateTime: 2018-07-18T04:48:48Z 56 | message: Deployment has minimum availability. 57 | reason: MinimumReplicasAvailable 58 | status: "True" 59 | type: Available 60 | - lastTransitionTime: 2018-07-18T04:40:44Z 61 | lastUpdateTime: 2018-07-18T06:19:22Z 62 | message: ReplicaSet "guestbook-ui-75dd4d49d5" is progressing. 63 | reason: ReplicaSetUpdated 64 | status: "True" 65 | type: Progressing 66 | observedGeneration: 4 67 | readyReplicas: 1 68 | replicas: 2 69 | unavailableReplicas: 1 70 | updatedReplicas: 1 71 | -------------------------------------------------------------------------------- /pkg/health/testdata/deployment-degraded.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | annotations: 5 | deployment.kubernetes.io/revision: "4" 6 | kubectl.kubernetes.io/last-applied-configuration: | 7 | {"apiVersion":"apps/v1beta2","kind":"Deployment","metadata":{"annotations":{},"labels":{"app.kubernetes.io/instance":"guestbook-default"},"name":"guestbook-ui","namespace":"default"},"spec":{"replicas":1,"selector":{"matchLabels":{"app":"guestbook-ui"}},"template":{"metadata":{"labels":{"app":"guestbook-ui","app.kubernetes.io/instance":"guestbook-default"}},"spec":{"containers":[{"image":"gcr.io/heptio-images/ks-guestbook-demo:0.3","name":"guestbook-ui","ports":[{"containerPort":80}]}]}}}} 8 | creationTimestamp: 2018-07-18T04:40:44Z 9 | generation: 4 10 | labels: 11 | app.kubernetes.io/instance: guestbook-default 12 | name: guestbook-ui 13 | namespace: default 14 | resourceVersion: "13660" 15 | selfLink: /apis/extensions/v1beta1/namespaces/default/deployments/guestbook-ui 16 | uid: bb9af0c7-8a44-11e8-9e23-42010aa80010 17 | spec: 18 | progressDeadlineSeconds: 600 19 | replicas: 1 20 | revisionHistoryLimit: 10 21 | selector: 22 | matchLabels: 23 | app: guestbook-ui 24 | strategy: 25 | rollingUpdate: 26 | maxSurge: 25% 27 | maxUnavailable: 25% 28 | type: RollingUpdate 29 | template: 30 | metadata: 31 | creationTimestamp: null 32 | labels: 33 | app: guestbook-ui 34 | app.kubernetes.io/instance: guestbook-default 35 | spec: 36 | containers: 37 | - image: gcr.io/heptio-images/ks-guestbook-demo:0.3 38 | imagePullPolicy: IfNotPresent 39 | name: guestbook-ui 40 | ports: 41 | - containerPort: 80 42 | protocol: TCP 43 | resources: {} 44 | terminationMessagePath: /dev/termination-log 45 | terminationMessagePolicy: File 46 | dnsPolicy: ClusterFirst 47 | restartPolicy: Always 48 | schedulerName: default-scheduler 49 | securityContext: {} 50 | terminationGracePeriodSeconds: 30 51 | status: 52 | availableReplicas: 1 53 | conditions: 54 | - lastTransitionTime: 2018-07-18T04:48:48Z 55 | lastUpdateTime: 2018-07-18T04:48:48Z 56 | message: Deployment has minimum availability. 57 | reason: MinimumReplicasAvailable 58 | status: "True" 59 | type: Available 60 | - lastTransitionTime: 2018-07-18T06:29:23Z 61 | lastUpdateTime: 2018-07-18T06:29:23Z 62 | message: ReplicaSet "guestbook-ui-75dd4d49d5" has timed out progressing. 63 | reason: ProgressDeadlineExceeded 64 | status: "False" 65 | type: Progressing 66 | observedGeneration: 4 67 | readyReplicas: 1 68 | replicas: 2 69 | unavailableReplicas: 1 70 | updatedReplicas: 1 71 | -------------------------------------------------------------------------------- /pkg/health/testdata/deployment-suspended.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | annotations: 5 | deployment.kubernetes.io/revision: "4" 6 | kubectl.kubernetes.io/last-applied-configuration: | 7 | {"apiVersion":"apps/v1beta2","kind":"Deployment","metadata":{"annotations":{},"labels":{"app.kubernetes.io/instance":"guestbook-default"},"name":"guestbook-ui","namespace":"default"},"spec":{"replicas":1,"selector":{"matchLabels":{"app":"guestbook-ui"}},"template":{"metadata":{"labels":{"app":"guestbook-ui","app.kubernetes.io/instance":"guestbook-default"}},"spec":{"containers":[{"image":"gcr.io/heptio-images/ks-guestbook-demo:0.3","name":"guestbook-ui","ports":[{"containerPort":80}]}]}}}} 8 | creationTimestamp: 2018-07-18T04:40:44Z 9 | generation: 4 10 | labels: 11 | app.kubernetes.io/instance: guestbook-default 12 | name: guestbook-ui 13 | namespace: default 14 | resourceVersion: "12819" 15 | selfLink: /apis/extensions/v1beta1/namespaces/default/deployments/guestbook-ui 16 | uid: bb9af0c7-8a44-11e8-9e23-42010aa80010 17 | spec: 18 | progressDeadlineSeconds: 600 19 | replicas: 1 20 | revisionHistoryLimit: 10 21 | selector: 22 | matchLabels: 23 | app: guestbook-ui 24 | strategy: 25 | rollingUpdate: 26 | maxSurge: 25% 27 | maxUnavailable: 25% 28 | type: RollingUpdate 29 | paused: true 30 | template: 31 | metadata: 32 | creationTimestamp: null 33 | labels: 34 | app: guestbook-ui 35 | app.kubernetes.io/instance: guestbook-default 36 | spec: 37 | containers: 38 | - image: gcr.io/heptio-images/ks-guestbook-demo:0.3 39 | imagePullPolicy: IfNotPresent 40 | name: guestbook-ui 41 | ports: 42 | - containerPort: 80 43 | protocol: TCP 44 | resources: {} 45 | terminationMessagePath: /dev/termination-log 46 | terminationMessagePolicy: File 47 | dnsPolicy: ClusterFirst 48 | restartPolicy: Always 49 | schedulerName: default-scheduler 50 | securityContext: {} 51 | terminationGracePeriodSeconds: 30 52 | status: 53 | availableReplicas: 1 54 | conditions: 55 | - lastTransitionTime: 2018-07-18T04:48:48Z 56 | lastUpdateTime: 2018-07-18T04:48:48Z 57 | message: Deployment has minimum availability. 58 | reason: MinimumReplicasAvailable 59 | status: "True" 60 | type: Available 61 | - lastTransitionTime: 2018-07-18T04:40:44Z 62 | lastUpdateTime: 2018-07-18T06:19:22Z 63 | message: ReplicaSet "guestbook-ui-75dd4d49d5" is progressing. 64 | reason: ReplicaSetUpdated 65 | status: "True" 66 | type: Progressing 67 | observedGeneration: 4 68 | readyReplicas: 1 69 | replicas: 2 70 | unavailableReplicas: 1 71 | updatedReplicas: 1 72 | -------------------------------------------------------------------------------- /pkg/health/testdata/pod-crashloop.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | creationTimestamp: 2018-12-02T09:19:36Z 5 | name: my-pod 6 | namespace: argocd 7 | resourceVersion: "151454" 8 | selfLink: /api/v1/namespaces/argocd/pods/my-pod 9 | uid: 63674389-f613-11e8-a057-fe5f49266390 10 | spec: 11 | containers: 12 | - command: 13 | - sh 14 | - -c 15 | - exit 1 16 | image: alpine:latest 17 | imagePullPolicy: Always 18 | name: main 19 | resources: {} 20 | terminationMessagePath: /dev/termination-log 21 | terminationMessagePolicy: File 22 | volumeMounts: 23 | - mountPath: /var/run/secrets/kubernetes.io/serviceaccount 24 | name: default-token-f9jvj 25 | readOnly: true 26 | dnsPolicy: ClusterFirst 27 | nodeName: minikube 28 | restartPolicy: Always 29 | schedulerName: default-scheduler 30 | securityContext: {} 31 | serviceAccount: default 32 | serviceAccountName: default 33 | terminationGracePeriodSeconds: 30 34 | tolerations: 35 | - effect: NoExecute 36 | key: node.kubernetes.io/not-ready 37 | operator: Exists 38 | tolerationSeconds: 300 39 | - effect: NoExecute 40 | key: node.kubernetes.io/unreachable 41 | operator: Exists 42 | tolerationSeconds: 300 43 | volumes: 44 | - name: default-token-f9jvj 45 | secret: 46 | defaultMode: 420 47 | secretName: default-token-f9jvj 48 | status: 49 | conditions: 50 | - lastProbeTime: null 51 | lastTransitionTime: 2018-12-02T09:19:36Z 52 | status: "True" 53 | type: Initialized 54 | - lastProbeTime: null 55 | lastTransitionTime: 2018-12-02T09:19:36Z 56 | message: 'containers with unready status: [main]' 57 | reason: ContainersNotReady 58 | status: "False" 59 | type: Ready 60 | - lastProbeTime: null 61 | lastTransitionTime: 2018-12-02T09:19:36Z 62 | status: "True" 63 | type: PodScheduled 64 | containerStatuses: 65 | - containerID: docker://c3aa0064b95a26045999b99c268e715a1c64201e816f1279ac06638778547bb8 66 | image: alpine:latest 67 | imageID: docker-pullable://alpine@sha256:621c2f39f8133acb8e64023a94dbdf0d5ca81896102b9e57c0dc184cadaf5528 68 | lastState: 69 | terminated: 70 | containerID: docker://c3aa0064b95a26045999b99c268e715a1c64201e816f1279ac06638778547bb8 71 | exitCode: 1 72 | finishedAt: 2018-12-02T09:20:25Z 73 | reason: Error 74 | startedAt: 2018-12-02T09:20:25Z 75 | name: main 76 | ready: false 77 | restartCount: 3 78 | state: 79 | waiting: 80 | message: Back-off 40s restarting failed container=main pod=my-pod_argocd(63674389-f613-11e8-a057-fe5f49266390) 81 | reason: CrashLoopBackOff 82 | hostIP: 192.168.64.41 83 | phase: Running 84 | podIP: 172.17.0.9 85 | qosClass: BestEffort 86 | startTime: 2018-12-02T09:19:36Z 87 | -------------------------------------------------------------------------------- /pkg/health/testdata/pod-running-restart-onfailure.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | creationTimestamp: 2018-12-02T09:47:10Z 5 | name: my-pod 6 | namespace: argocd 7 | resourceVersion: "153419" 8 | selfLink: /api/v1/namespaces/argocd/pods/my-pod 9 | uid: 3cf9325e-f617-11e8-a057-fe5f49266390 10 | spec: 11 | containers: 12 | - command: 13 | - sh 14 | - -c 15 | - exit 1 16 | image: alpine:latest 17 | imagePullPolicy: Always 18 | name: main 19 | resources: {} 20 | terminationMessagePath: /dev/termination-log 21 | terminationMessagePolicy: File 22 | volumeMounts: 23 | - mountPath: /var/run/secrets/kubernetes.io/serviceaccount 24 | name: default-token-f9jvj 25 | readOnly: true 26 | dnsPolicy: ClusterFirst 27 | nodeName: minikube 28 | restartPolicy: OnFailure 29 | schedulerName: default-scheduler 30 | securityContext: {} 31 | serviceAccount: default 32 | serviceAccountName: default 33 | terminationGracePeriodSeconds: 30 34 | tolerations: 35 | - effect: NoExecute 36 | key: node.kubernetes.io/not-ready 37 | operator: Exists 38 | tolerationSeconds: 300 39 | - effect: NoExecute 40 | key: node.kubernetes.io/unreachable 41 | operator: Exists 42 | tolerationSeconds: 300 43 | volumes: 44 | - name: default-token-f9jvj 45 | secret: 46 | defaultMode: 420 47 | secretName: default-token-f9jvj 48 | status: 49 | conditions: 50 | - lastProbeTime: null 51 | lastTransitionTime: 2018-12-02T09:47:10Z 52 | status: "True" 53 | type: Initialized 54 | - lastProbeTime: null 55 | lastTransitionTime: 2018-12-02T09:47:10Z 56 | message: 'containers with unready status: [main]' 57 | reason: ContainersNotReady 58 | status: "False" 59 | type: Ready 60 | - lastProbeTime: null 61 | lastTransitionTime: 2018-12-02T09:47:10Z 62 | status: "True" 63 | type: PodScheduled 64 | containerStatuses: 65 | - containerID: docker://977dcb5c66325385f6df86276a3bcc2419e6aecc0b6682ab90853bd30f21fa51 66 | image: alpine:latest 67 | imageID: docker-pullable://alpine@sha256:621c2f39f8133acb8e64023a94dbdf0d5ca81896102b9e57c0dc184cadaf5528 68 | lastState: 69 | terminated: 70 | containerID: docker://977dcb5c66325385f6df86276a3bcc2419e6aecc0b6682ab90853bd30f21fa51 71 | exitCode: 1 72 | finishedAt: 2018-12-02T09:48:54Z 73 | reason: Error 74 | startedAt: 2018-12-02T09:48:54Z 75 | name: main 76 | ready: false 77 | restartCount: 4 78 | state: 79 | waiting: 80 | message: Back-off 1m20s restarting failed container=main pod=my-pod_argocd(3cf9325e-f617-11e8-a057-fe5f49266390) 81 | reason: CrashLoopBackOff 82 | hostIP: 192.168.64.41 83 | phase: Running 84 | podIP: 172.17.0.9 85 | qosClass: BestEffort 86 | startTime: 2018-12-02T09:47:10Z 87 | -------------------------------------------------------------------------------- /pkg/health/testdata/pod-error.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | creationTimestamp: 2018-12-02T09:19:36Z 5 | name: my-pod 6 | namespace: argocd 7 | resourceVersion: "151396" 8 | selfLink: /api/v1/namespaces/argocd/pods/my-pod 9 | uid: 63674389-f613-11e8-a057-fe5f49266390 10 | spec: 11 | containers: 12 | - command: 13 | - sh 14 | - -c 15 | - exit 1 16 | image: alpine:latest 17 | imagePullPolicy: Always 18 | name: main 19 | resources: {} 20 | terminationMessagePath: /dev/termination-log 21 | terminationMessagePolicy: File 22 | volumeMounts: 23 | - mountPath: /var/run/secrets/kubernetes.io/serviceaccount 24 | name: default-token-f9jvj 25 | readOnly: true 26 | dnsPolicy: ClusterFirst 27 | nodeName: minikube 28 | restartPolicy: Always 29 | schedulerName: default-scheduler 30 | securityContext: {} 31 | serviceAccount: default 32 | serviceAccountName: default 33 | terminationGracePeriodSeconds: 30 34 | tolerations: 35 | - effect: NoExecute 36 | key: node.kubernetes.io/not-ready 37 | operator: Exists 38 | tolerationSeconds: 300 39 | - effect: NoExecute 40 | key: node.kubernetes.io/unreachable 41 | operator: Exists 42 | tolerationSeconds: 300 43 | volumes: 44 | - name: default-token-f9jvj 45 | secret: 46 | defaultMode: 420 47 | secretName: default-token-f9jvj 48 | status: 49 | conditions: 50 | - lastProbeTime: null 51 | lastTransitionTime: 2018-12-02T09:19:36Z 52 | status: "True" 53 | type: Initialized 54 | - lastProbeTime: null 55 | lastTransitionTime: 2018-12-02T09:19:36Z 56 | message: 'containers with unready status: [main]' 57 | reason: ContainersNotReady 58 | status: "False" 59 | type: Ready 60 | - lastProbeTime: null 61 | lastTransitionTime: 2018-12-02T09:19:36Z 62 | status: "True" 63 | type: PodScheduled 64 | containerStatuses: 65 | - containerID: docker://fc8dca42fb4f35dac154db3ed45ad7952523345d470b2992779a03c332589ac4 66 | image: alpine:latest 67 | imageID: docker-pullable://alpine@sha256:621c2f39f8133acb8e64023a94dbdf0d5ca81896102b9e57c0dc184cadaf5528 68 | lastState: 69 | terminated: 70 | containerID: docker://54fe1af9c2c0b61b3697abfeb33adc9ce76ec192b3703b46278d9df7573dff72 71 | exitCode: 1 72 | finishedAt: 2018-12-02T09:19:41Z 73 | reason: Error 74 | startedAt: 2018-12-02T09:19:41Z 75 | name: main 76 | ready: false 77 | restartCount: 2 78 | state: 79 | terminated: 80 | containerID: docker://fc8dca42fb4f35dac154db3ed45ad7952523345d470b2992779a03c332589ac4 81 | exitCode: 1 82 | finishedAt: 2018-12-02T09:19:56Z 83 | reason: Error 84 | startedAt: 2018-12-02T09:19:56Z 85 | hostIP: 192.168.64.41 86 | phase: Running 87 | podIP: 172.17.0.9 88 | qosClass: BestEffort 89 | startTime: 2018-12-02T09:19:36Z 90 | -------------------------------------------------------------------------------- /pkg/health/health_apiservice.go: -------------------------------------------------------------------------------- 1 | package health 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/argoproj/gitops-engine/pkg/utils/kube" 7 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 8 | "k8s.io/apimachinery/pkg/runtime" 9 | apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" 10 | apiregistrationv1beta1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1" 11 | ) 12 | 13 | func getAPIServiceHealth(obj *unstructured.Unstructured) (*HealthStatus, error) { 14 | gvk := obj.GroupVersionKind() 15 | switch gvk { 16 | case apiregistrationv1.SchemeGroupVersion.WithKind(kube.APIServiceKind): 17 | var apiService apiregistrationv1.APIService 18 | err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &apiService) 19 | if err != nil { 20 | return nil, fmt.Errorf("failed to convert unstructured APIService to typed: %v", err) 21 | } 22 | return getApiregistrationv1APIServiceHealth(&apiService) 23 | case apiregistrationv1beta1.SchemeGroupVersion.WithKind(kube.APIServiceKind): 24 | var apiService apiregistrationv1beta1.APIService 25 | err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, &apiService) 26 | if err != nil { 27 | return nil, fmt.Errorf("failed to convert unstructured APIService to typed: %v", err) 28 | } 29 | return getApiregistrationv1beta1APIServiceHealth(&apiService) 30 | default: 31 | return nil, fmt.Errorf("unsupported APIService GVK: %s", gvk) 32 | } 33 | } 34 | 35 | func getApiregistrationv1APIServiceHealth(apiservice *apiregistrationv1.APIService) (*HealthStatus, error) { 36 | for _, c := range apiservice.Status.Conditions { 37 | switch c.Type { 38 | case apiregistrationv1.Available: 39 | if c.Status == apiregistrationv1.ConditionTrue { 40 | return &HealthStatus{ 41 | Status: HealthStatusHealthy, 42 | Message: fmt.Sprintf("%s: %s", c.Reason, c.Message), 43 | }, nil 44 | } else { 45 | return &HealthStatus{ 46 | Status: HealthStatusProgressing, 47 | Message: fmt.Sprintf("%s: %s", c.Reason, c.Message), 48 | }, nil 49 | } 50 | } 51 | } 52 | return &HealthStatus{ 53 | Status: HealthStatusProgressing, 54 | Message: "Waiting to be processed", 55 | }, nil 56 | } 57 | 58 | func getApiregistrationv1beta1APIServiceHealth(apiservice *apiregistrationv1beta1.APIService) (*HealthStatus, error) { 59 | for _, c := range apiservice.Status.Conditions { 60 | switch c.Type { 61 | case apiregistrationv1beta1.Available: 62 | if c.Status == apiregistrationv1beta1.ConditionTrue { 63 | return &HealthStatus{ 64 | Status: HealthStatusHealthy, 65 | Message: fmt.Sprintf("%s: %s", c.Reason, c.Message), 66 | }, nil 67 | } else { 68 | return &HealthStatus{ 69 | Status: HealthStatusProgressing, 70 | Message: fmt.Sprintf("%s: %s", c.Reason, c.Message), 71 | }, nil 72 | } 73 | } 74 | } 75 | return &HealthStatus{ 76 | Status: HealthStatusProgressing, 77 | Message: "Waiting to be processed", 78 | }, nil 79 | } 80 | -------------------------------------------------------------------------------- /pkg/health/testdata/pod-imagepullbackoff.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | creationTimestamp: "2019-04-04T16:06:55Z" 5 | generateName: guestbook-ui-errimagepullbackoff-66cfffb669- 6 | labels: 7 | app: guestbook-ui 8 | pod-template-hash: "2279996225" 9 | name: guestbook-ui-errimagepullbackoff-66cfffb669-45w2j 10 | namespace: default 11 | ownerReferences: 12 | - apiVersion: extensions/v1beta1 13 | blockOwnerDeletion: true 14 | controller: true 15 | kind: ReplicaSet 16 | name: guestbook-ui-errimagepullbackoff-66cfffb669 17 | uid: ab0e507e-56f3-11e9-8721-025000000001 18 | resourceVersion: "339374" 19 | selfLink: /api/v1/namespaces/default/pods/guestbook-ui-errimagepullbackoff-66cfffb669-45w2j 20 | uid: ab1a9982-56f3-11e9-8721-025000000001 21 | spec: 22 | containers: 23 | - image: gcr.io/heptio-images/ks-guestbook-demo:0.3 24 | imagePullPolicy: IfNotPresent 25 | name: errimagepullbackoff 26 | resources: {} 27 | terminationMessagePath: /dev/termination-log 28 | terminationMessagePolicy: File 29 | volumeMounts: 30 | - mountPath: /var/run/secrets/kubernetes.io/serviceaccount 31 | name: default-token-7hn5p 32 | readOnly: true 33 | dnsPolicy: ClusterFirst 34 | nodeName: docker-for-desktop 35 | restartPolicy: Always 36 | schedulerName: default-scheduler 37 | securityContext: {} 38 | serviceAccount: default 39 | serviceAccountName: default 40 | terminationGracePeriodSeconds: 30 41 | tolerations: 42 | - effect: NoExecute 43 | key: node.kubernetes.io/not-ready 44 | operator: Exists 45 | tolerationSeconds: 300 46 | - effect: NoExecute 47 | key: node.kubernetes.io/unreachable 48 | operator: Exists 49 | tolerationSeconds: 300 50 | volumes: 51 | - name: default-token-7hn5p 52 | secret: 53 | defaultMode: 420 54 | secretName: default-token-7hn5p 55 | status: 56 | conditions: 57 | - lastProbeTime: null 58 | lastTransitionTime: "2019-04-04T16:06:56Z" 59 | status: "True" 60 | type: Initialized 61 | - lastProbeTime: null 62 | lastTransitionTime: "2019-04-04T16:06:56Z" 63 | message: 'containers with unready status: [errimagepullbackoff]' 64 | reason: ContainersNotReady 65 | status: "False" 66 | type: Ready 67 | - lastProbeTime: null 68 | lastTransitionTime: "2019-04-04T16:06:55Z" 69 | status: "True" 70 | type: PodScheduled 71 | containerStatuses: 72 | - image: gcr.io/heptio-images/ks-guestbook-demo:0.3 73 | imageID: "" 74 | lastState: {} 75 | name: errimagepullbackoff 76 | ready: false 77 | restartCount: 0 78 | state: 79 | waiting: 80 | message: Back-off pulling image "gcr.io/heptio-images/ks-guestbook-demo:0.3" 81 | reason: ImagePullBackOff 82 | hostIP: 192.168.65.3 83 | phase: Pending 84 | podIP: 10.1.0.136 85 | qosClass: BestEffort 86 | startTime: "2019-04-04T16:06:56Z" 87 | -------------------------------------------------------------------------------- /specs/template.md: -------------------------------------------------------------------------------- 1 | 6 | # Title 7 | 8 | This is the title of the spec. Keep it simple and descriptive. A good title 9 | can help communicate what the spec is and should be considered as part of any 10 | review. 11 | 12 | The title should be lowercased and spaces/punctuation should be replaced with 13 | `-`. 14 | 15 | ## Summary 16 | 17 | The `Summary` section is incredibly important for producing high quality 18 | user-focused documentation such as release notes or a development roadmap. It 19 | should be possible to collect this information before implementation begins in 20 | order to avoid requiring implementors to split their attention between writing 21 | release notes and implementing the feature itself. Ensure that the tone and 22 | content of the `Summary` section is useful for a wide audience. 23 | 24 | A good summary is probably at least a paragraph in length. 25 | 26 | ## Goals 27 | 28 | List the specific goals of the spec. How will we know that this has succeeded? 29 | 30 | ## Non-Goals 31 | 32 | What is out of scope for this spec? Listing non-goals helps to focus 33 | discussion and make progress. 34 | 35 | ## Proposal 36 | 37 | This is where we get down to the nitty gritty of what the proposal actually is. 38 | 39 | ### User Stories [optional] 40 | 41 | Detail the things that people will be able to do if this spec is implemented. 42 | Include as much detail as possible so that people can understand the "how" of 43 | the system. The goal here is to make this feel real for users without getting 44 | bogged down. 45 | 46 | #### Story 1 47 | 48 | #### Story 2 49 | 50 | ### Implementation Details/Notes/Constraints [optional] 51 | 52 | What are the caveats to the implementation? What are some important details 53 | that didn't come across above. Go in to as much detail as necessary here. 54 | This might be a good place to talk about core concepts and how they releate. 55 | 56 | ### Risks and Mitigations 57 | 58 | What are the risks of this proposal and how do we mitigate. Think broadly. 59 | For example, consider both security and how this will impact the larger 60 | kubernetes ecosystem. 61 | 62 | How will security be reviewed and by whom? How will UX be reviewed and by 63 | whom? 64 | 65 | Consider including folks that also work outside the SIG or subproject. 66 | 67 | ## Design Details 68 | 69 | ### Upgrade / Downgrade / Migration Strategy 70 | 71 | If applicable, how will the component be upgraded and downgraded? Does this 72 | spec propose migrating users from one component or behaviour to another? 73 | 74 | ### Public API changes 75 | 76 | Does the spec propose a public API-facing change? If so, describe the impact of 77 | changes. 78 | 79 | ## Drawbacks [optional] 80 | 81 | Why should this spec _not_ be implemented. 82 | 83 | ## Alternatives [optional] 84 | 85 | Similar to the `Drawbacks` section the `Alternatives` section is used to 86 | highlight and record other possible approaches to delivering the value proposed 87 | by the spec. 88 | -------------------------------------------------------------------------------- /pkg/health/testdata/hpa-v2beta1-healthy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: autoscaling/v2beta1 2 | kind: HorizontalPodAutoscaler 3 | metadata: 4 | annotations: 5 | kubectl.kubernetes.io/last-applied-configuration: > 6 | {"apiVersion":"autoscaling/v2beta1","kind":"HorizontalPodAutoscaler","metadata":{"annotations":{},"labels":{"app.kubernetes.io/component":"repo-server","app.kubernetes.io/instance":"argocd","app.kubernetes.io/managed-by":"Helm","app.kubernetes.io/name":"argocd-repo-server-hpa","app.kubernetes.io/part-of":"argocd","argocd.argoproj.io/instance":"argocd","helm.sh/chart":"argo-cd-2.5.0"},"name":"argocd-repo-server-hpa","namespace":"argocd"},"spec":{"maxReplicas":40,"metrics":[{"resource":{"name":"memory","targetAverageUtilization":150},"type":"Resource"},{"resource":{"name":"cpu","targetAverageUtilization":80},"type":"Resource"}],"minReplicas":1,"scaleTargetRef":{"apiVersion":"apps/v1","kind":"Deployment","name":"argocd-repo-server"}}} 7 | meta.helm.sh/release-name: argocd 8 | meta.helm.sh/release-namespace: argocd 9 | creationTimestamp: '2020-09-01T23:37:42Z' 10 | labels: 11 | app.kubernetes.io/component: repo-server 12 | app.kubernetes.io/instance: argocd 13 | app.kubernetes.io/managed-by: Helm 14 | app.kubernetes.io/name: argocd-repo-server-hpa 15 | app.kubernetes.io/part-of: argocd 16 | argocd.argoproj.io/instance: argocd 17 | helm.sh/chart: argo-cd-2.5.0 18 | name: argocd-repo-server-hpa 19 | namespace: argocd 20 | resourceVersion: '65843573' 21 | selfLink: >- 22 | /apis/autoscaling/v2beta1/namespaces/argocd/horizontalpodautoscalers/argocd-repo-server-hpa 23 | uid: ca7e0de8-7eb1-404a-b2f9-b9702b88ca8b 24 | spec: 25 | maxReplicas: 40 26 | metrics: 27 | - resource: 28 | name: memory 29 | targetAverageUtilization: 150 30 | type: Resource 31 | - resource: 32 | name: cpu 33 | targetAverageUtilization: 80 34 | type: Resource 35 | minReplicas: 1 36 | scaleTargetRef: 37 | apiVersion: apps/v1 38 | kind: Deployment 39 | name: argocd-repo-server 40 | status: 41 | conditions: 42 | - lastTransitionTime: '2020-09-16T17:59:42Z' 43 | message: recommended size matches current size 44 | reason: ReadyForNewScale 45 | status: 'True' 46 | type: AbleToScale 47 | - lastTransitionTime: '2020-11-09T21:31:12Z' 48 | message: >- 49 | the HPA was able to successfully calculate a replica count from memory 50 | resource utilization (percentage of request) 51 | reason: ValidMetricFound 52 | status: 'True' 53 | type: ScalingActive 54 | - lastTransitionTime: '2020-11-14T23:12:46Z' 55 | message: the desired count is within the acceptable range 56 | reason: DesiredWithinRange 57 | status: 'False' 58 | type: ScalingLimited 59 | currentMetrics: 60 | - resource: 61 | currentAverageUtilization: 12 62 | currentAverageValue: '65454080' 63 | name: memory 64 | type: Resource 65 | - resource: 66 | currentAverageUtilization: 2 67 | currentAverageValue: 12m 68 | name: cpu 69 | type: Resource 70 | currentReplicas: 1 71 | desiredReplicas: 1 72 | lastScaleTime: '2020-12-07T22:59:53Z' -------------------------------------------------------------------------------- /pkg/utils/kube/kubetest/mock.go: -------------------------------------------------------------------------------- 1 | package kubetest 2 | 3 | import ( 4 | "context" 5 | "sync" 6 | 7 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 8 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 9 | "k8s.io/apimachinery/pkg/runtime/schema" 10 | "k8s.io/apimachinery/pkg/types" 11 | "k8s.io/apimachinery/pkg/watch" 12 | "k8s.io/client-go/dynamic" 13 | "k8s.io/client-go/rest" 14 | cmdutil "k8s.io/kubectl/pkg/cmd/util" 15 | 16 | "github.com/argoproj/gitops-engine/pkg/utils/kube" 17 | ) 18 | 19 | type KubectlOutput struct { 20 | Output string 21 | Err error 22 | } 23 | 24 | type MockKubectlCmd struct { 25 | APIResources []kube.APIResourceInfo 26 | Commands map[string]KubectlOutput 27 | Events chan watch.Event 28 | lastValidate bool 29 | Version string 30 | DynamicClient dynamic.Interface 31 | APIGroups []metav1.APIGroup 32 | lastValidateLock sync.RWMutex 33 | } 34 | 35 | func (k *MockKubectlCmd) SetLastValidate(validate bool) { 36 | k.lastValidateLock.Lock() 37 | k.lastValidate = validate 38 | k.lastValidateLock.Unlock() 39 | } 40 | 41 | func (k *MockKubectlCmd) GetLastValidate() bool { 42 | k.lastValidateLock.RLock() 43 | validate := k.lastValidate 44 | k.lastValidateLock.RUnlock() 45 | return validate 46 | } 47 | 48 | func (k *MockKubectlCmd) NewDynamicClient(config *rest.Config) (dynamic.Interface, error) { 49 | return k.DynamicClient, nil 50 | } 51 | 52 | func (k *MockKubectlCmd) GetAPIResources(config *rest.Config, resourceFilter kube.ResourceFilter) ([]kube.APIResourceInfo, error) { 53 | return k.APIResources, nil 54 | } 55 | 56 | func (k *MockKubectlCmd) GetResource(ctx context.Context, config *rest.Config, gvk schema.GroupVersionKind, name string, namespace string) (*unstructured.Unstructured, error) { 57 | return nil, nil 58 | } 59 | 60 | func (k *MockKubectlCmd) PatchResource(ctx context.Context, config *rest.Config, gvk schema.GroupVersionKind, name string, namespace string, patchType types.PatchType, patchBytes []byte, subresources ...string) (*unstructured.Unstructured, error) { 61 | return nil, nil 62 | } 63 | 64 | func (k *MockKubectlCmd) DeleteResource(ctx context.Context, config *rest.Config, gvk schema.GroupVersionKind, name string, namespace string, deleteOptions metav1.DeleteOptions) error { 65 | command, ok := k.Commands[name] 66 | if !ok { 67 | return nil 68 | } 69 | return command.Err 70 | } 71 | 72 | func (k *MockKubectlCmd) ApplyResource(ctx context.Context, config *rest.Config, obj *unstructured.Unstructured, namespace string, dryRunStrategy cmdutil.DryRunStrategy, force, validate bool) (string, error) { 73 | k.SetLastValidate(validate) 74 | command, ok := k.Commands[obj.GetName()] 75 | if !ok { 76 | return "", nil 77 | } 78 | return command.Output, command.Err 79 | } 80 | 81 | // ConvertToVersion converts an unstructured object into the specified group/version 82 | func (k *MockKubectlCmd) ConvertToVersion(obj *unstructured.Unstructured, group, version string) (*unstructured.Unstructured, error) { 83 | return obj, nil 84 | } 85 | 86 | func (k *MockKubectlCmd) GetServerVersion(config *rest.Config) (string, error) { 87 | return k.Version, nil 88 | } 89 | 90 | func (k *MockKubectlCmd) GetAPIGroups(config *rest.Config) ([]metav1.APIGroup, error) { 91 | return k.APIGroups, nil 92 | } 93 | 94 | func (k *MockKubectlCmd) SetOnKubectlRun(onKubectlRun kube.OnKubectlRunFunc) { 95 | } 96 | -------------------------------------------------------------------------------- /pkg/cache/resource.go: -------------------------------------------------------------------------------- 1 | package cache 2 | 3 | import ( 4 | "fmt" 5 | 6 | v1 "k8s.io/api/core/v1" 7 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 8 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 9 | 10 | "github.com/argoproj/gitops-engine/pkg/utils/kube" 11 | ) 12 | 13 | // Resource holds the information about Kubernetes resource, ownership references and optional information 14 | type Resource struct { 15 | // ResourceVersion holds most recent observed resource version 16 | ResourceVersion string 17 | // Resource reference 18 | Ref v1.ObjectReference 19 | // References to resource owners 20 | OwnerRefs []metav1.OwnerReference 21 | // Optional creation timestamp of the resource 22 | CreationTimestamp *metav1.Time 23 | // Optional additional information about the resource 24 | Info interface{} 25 | // Optional whole resource manifest 26 | Resource *unstructured.Unstructured 27 | 28 | // answers if resource is inferred parent of provided resource 29 | isInferredParentOf func(key kube.ResourceKey) bool 30 | } 31 | 32 | func (r *Resource) ResourceKey() kube.ResourceKey { 33 | return kube.NewResourceKey(r.Ref.GroupVersionKind().Group, r.Ref.Kind, r.Ref.Namespace, r.Ref.Name) 34 | } 35 | 36 | func (r *Resource) isParentOf(child *Resource) bool { 37 | for i, ownerRef := range child.OwnerRefs { 38 | 39 | // backfill UID of inferred owner child references 40 | if ownerRef.UID == "" && r.Ref.Kind == ownerRef.Kind && r.Ref.APIVersion == ownerRef.APIVersion && r.Ref.Name == ownerRef.Name { 41 | ownerRef.UID = r.Ref.UID 42 | child.OwnerRefs[i] = ownerRef 43 | return true 44 | } 45 | 46 | if r.Ref.UID == ownerRef.UID { 47 | return true 48 | } 49 | } 50 | 51 | return false 52 | } 53 | 54 | // setOwnerRef adds or removes specified owner reference 55 | func (r *Resource) setOwnerRef(ref metav1.OwnerReference, add bool) { 56 | index := -1 57 | for i, item := range r.OwnerRefs { 58 | if item.UID == ref.UID { 59 | index = i 60 | break 61 | } 62 | } 63 | added := index > -1 64 | if add != added { 65 | if add { 66 | r.OwnerRefs = append(r.OwnerRefs, ref) 67 | } else { 68 | r.OwnerRefs = append(r.OwnerRefs[:index], r.OwnerRefs[index+1:]...) 69 | } 70 | } 71 | } 72 | 73 | func (r *Resource) toOwnerRef() metav1.OwnerReference { 74 | return metav1.OwnerReference{UID: r.Ref.UID, Name: r.Ref.Name, Kind: r.Ref.Kind, APIVersion: r.Ref.APIVersion} 75 | } 76 | 77 | func newResourceKeySet(set map[kube.ResourceKey]bool, keys ...kube.ResourceKey) map[kube.ResourceKey]bool { 78 | newSet := make(map[kube.ResourceKey]bool) 79 | for k, v := range set { 80 | newSet[k] = v 81 | } 82 | for i := range keys { 83 | newSet[keys[i]] = true 84 | } 85 | return newSet 86 | } 87 | 88 | func (r *Resource) iterateChildren(ns map[kube.ResourceKey]*Resource, parents map[kube.ResourceKey]bool, action func(err error, child *Resource, namespaceResources map[kube.ResourceKey]*Resource)) { 89 | for childKey, child := range ns { 90 | if r.isParentOf(ns[childKey]) { 91 | if parents[childKey] { 92 | key := r.ResourceKey() 93 | action(fmt.Errorf("circular dependency detected. %s is child and parent of %s", childKey.String(), key.String()), child, ns) 94 | } else { 95 | action(nil, child, ns) 96 | child.iterateChildren(ns, newResourceKeySet(parents, r.ResourceKey()), action) 97 | } 98 | } 99 | } 100 | } 101 | -------------------------------------------------------------------------------- /pkg/sync/hook/hook_test.go: -------------------------------------------------------------------------------- 1 | package hook 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 8 | 9 | "github.com/argoproj/gitops-engine/pkg/sync/common" 10 | . "github.com/argoproj/gitops-engine/pkg/utils/testing" 11 | ) 12 | 13 | func TestNoHooks(t *testing.T) { 14 | obj := &unstructured.Unstructured{} 15 | assert.False(t, IsHook(obj)) 16 | assert.False(t, Skip(obj)) 17 | assert.Nil(t, Types(obj)) 18 | } 19 | 20 | func TestOneHook(t *testing.T) { 21 | hookTypesString := []string{"PreSync", "Sync", "PostSync", "SyncFail"} 22 | hookTypes := []common.HookType{common.HookTypePreSync, common.HookTypeSync, common.HookTypePostSync, common.HookTypeSyncFail} 23 | for i, hook := range hookTypesString { 24 | obj := example(hook) 25 | assert.True(t, IsHook(obj)) 26 | assert.False(t, Skip(obj)) 27 | assert.Equal(t, []common.HookType{hookTypes[i]}, Types(obj)) 28 | } 29 | } 30 | 31 | // peculiar case of something marked with "Skip" cannot, by definition, be a hook 32 | // IMHO this is bad design as it conflates a flag on something that can never be a hook, with something that is 33 | // always a hook, creating a nasty exception we always need to check for, and a bunch of horrible edge cases 34 | func TestSkipHook(t *testing.T) { 35 | obj := example("Skip") 36 | assert.False(t, IsHook(obj)) 37 | assert.True(t, Skip(obj)) 38 | assert.Equal(t, []common.HookType{common.HookTypeSkip}, Types(obj)) 39 | } 40 | 41 | // we treat garbage as the user intended you to be a hook, but spelled it wrong, so you are a hook, but we don't 42 | // know what phase you're a part of 43 | func TestGarbageHook(t *testing.T) { 44 | obj := example("Garbage") 45 | assert.True(t, IsHook(obj)) 46 | assert.False(t, Skip(obj)) 47 | assert.Nil(t, Types(obj)) 48 | } 49 | 50 | func TestTwoHooks(t *testing.T) { 51 | obj := example("PreSync,PostSync") 52 | assert.True(t, IsHook(obj)) 53 | assert.False(t, Skip(obj)) 54 | assert.ElementsMatch(t, []common.HookType{common.HookTypePreSync, common.HookTypePostSync}, Types(obj)) 55 | } 56 | 57 | func TestDupHookTypes(t *testing.T) { 58 | assert.Equal(t, []common.HookType{common.HookTypeSync}, Types(example("Sync,Sync"))) 59 | } 60 | 61 | // horrible edge case 62 | func TestSkipAndHook(t *testing.T) { 63 | obj := example("Skip,PreSync,PostSync") 64 | assert.True(t, IsHook(obj)) 65 | assert.False(t, Skip(obj)) 66 | assert.ElementsMatch(t, []common.HookType{common.HookTypeSkip, common.HookTypePreSync, common.HookTypePostSync}, Types(obj)) 67 | } 68 | 69 | func TestGarbageAndHook(t *testing.T) { 70 | obj := example("Sync,Garbage") 71 | assert.True(t, IsHook(obj)) 72 | assert.False(t, Skip(obj)) 73 | assert.Equal(t, []common.HookType{common.HookTypeSync}, Types(obj)) 74 | } 75 | 76 | func TestHelmHook(t *testing.T) { 77 | obj := Annotate(NewPod(), "helm.sh/hook", "pre-install") 78 | assert.True(t, IsHook(obj)) 79 | assert.False(t, Skip(obj)) 80 | assert.Equal(t, []common.HookType{common.HookTypePreSync}, Types(obj)) 81 | } 82 | 83 | func TestGarbageHelmHook(t *testing.T) { 84 | obj := Annotate(NewPod(), "helm.sh/hook", "garbage") 85 | assert.True(t, IsHook(obj)) 86 | assert.False(t, Skip(obj)) 87 | assert.Nil(t, Types(obj)) 88 | } 89 | 90 | // we should ignore Helm hooks if we have an Argo CD hook 91 | func TestBothHooks(t *testing.T) { 92 | obj := Annotate(example("Sync"), "helm.sh/hook", "pre-install") 93 | assert.Equal(t, []common.HookType{common.HookTypeSync}, Types(obj)) 94 | } 95 | 96 | func example(hook string) *unstructured.Unstructured { 97 | return Annotate(NewPod(), "argocd.argoproj.io/hook", hook) 98 | } 99 | -------------------------------------------------------------------------------- /pkg/engine/engine.go: -------------------------------------------------------------------------------- 1 | /* 2 | The package provides high-level interface that leverages "pkg/cache", "pkg/sync", "pkg/health" and "pkg/diff" packages 3 | and "implements" GitOps. 4 | 5 | Example 6 | 7 | The https://github.com/argoproj/gitops-engine/tree/master/agent demonstrates how to use the engine. 8 | */ 9 | 10 | package engine 11 | 12 | import ( 13 | "context" 14 | "fmt" 15 | "time" 16 | 17 | "github.com/go-logr/logr" 18 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 19 | "k8s.io/client-go/rest" 20 | 21 | "github.com/argoproj/gitops-engine/pkg/cache" 22 | "github.com/argoproj/gitops-engine/pkg/diff" 23 | "github.com/argoproj/gitops-engine/pkg/sync" 24 | "github.com/argoproj/gitops-engine/pkg/sync/common" 25 | "github.com/argoproj/gitops-engine/pkg/utils/kube" 26 | ) 27 | 28 | const ( 29 | operationRefreshTimeout = time.Second * 1 30 | ) 31 | 32 | type StopFunc func() 33 | 34 | type GitOpsEngine interface { 35 | // Run initializes engine 36 | Run() (StopFunc, error) 37 | // Synchronizes resources in the cluster 38 | Sync(ctx context.Context, resources []*unstructured.Unstructured, isManaged func(r *cache.Resource) bool, revision string, namespace string, opts ...sync.SyncOpt) ([]common.ResourceSyncResult, error) 39 | } 40 | 41 | type gitOpsEngine struct { 42 | config *rest.Config 43 | cache cache.ClusterCache 44 | kubectl kube.Kubectl 45 | log logr.Logger 46 | } 47 | 48 | // NewEngine creates new instances of the GitOps engine 49 | func NewEngine(config *rest.Config, clusterCache cache.ClusterCache, opts ...Option) GitOpsEngine { 50 | o := applyOptions(opts) 51 | return &gitOpsEngine{ 52 | config: config, 53 | cache: clusterCache, 54 | kubectl: o.kubectl, 55 | log: o.log, 56 | } 57 | } 58 | 59 | func (e *gitOpsEngine) Run() (StopFunc, error) { 60 | err := e.cache.EnsureSynced() 61 | if err != nil { 62 | return nil, err 63 | } 64 | 65 | return func() { 66 | e.cache.Invalidate() 67 | }, nil 68 | } 69 | 70 | func (e *gitOpsEngine) Sync(ctx context.Context, 71 | resources []*unstructured.Unstructured, 72 | isManaged func(r *cache.Resource) bool, 73 | revision string, 74 | namespace string, 75 | opts ...sync.SyncOpt, 76 | ) ([]common.ResourceSyncResult, error) { 77 | managedResources, err := e.cache.GetManagedLiveObjs(resources, isManaged) 78 | if err != nil { 79 | return nil, err 80 | } 81 | result := sync.Reconcile(resources, managedResources, namespace, e.cache) 82 | diffRes, err := diff.DiffArray(result.Target, result.Live, diff.WithLogr(e.log)) 83 | if err != nil { 84 | return nil, err 85 | } 86 | opts = append(opts, sync.WithSkipHooks(!diffRes.Modified)) 87 | syncCtx, err := sync.NewSyncContext(revision, result, e.config, e.config, e.kubectl, namespace, opts...) 88 | if err != nil { 89 | return nil, err 90 | } 91 | 92 | resUpdated := make(chan bool) 93 | unsubscribe := e.cache.OnResourceUpdated(func(newRes *cache.Resource, oldRes *cache.Resource, namespaceResources map[kube.ResourceKey]*cache.Resource) { 94 | var key kube.ResourceKey 95 | if newRes != nil { 96 | key = newRes.ResourceKey() 97 | } else { 98 | key = oldRes.ResourceKey() 99 | } 100 | if _, ok := managedResources[key]; ok { 101 | resUpdated <- true 102 | } 103 | }) 104 | defer unsubscribe() 105 | for { 106 | syncCtx.Sync() 107 | phase, message, resources := syncCtx.GetState() 108 | if phase.Completed() { 109 | if phase == common.OperationError { 110 | err = fmt.Errorf("sync operation failed: %s", message) 111 | } 112 | return resources, err 113 | } 114 | select { 115 | case <-ctx.Done(): 116 | syncCtx.Terminate() 117 | return resources, ctx.Err() 118 | case <-time.After(operationRefreshTimeout): 119 | case <-resUpdated: 120 | } 121 | } 122 | } 123 | -------------------------------------------------------------------------------- /pkg/sync/reconcile.go: -------------------------------------------------------------------------------- 1 | package sync 2 | 3 | import ( 4 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 5 | "k8s.io/apimachinery/pkg/types" 6 | 7 | hookutil "github.com/argoproj/gitops-engine/pkg/sync/hook" 8 | "github.com/argoproj/gitops-engine/pkg/sync/ignore" 9 | "github.com/argoproj/gitops-engine/pkg/utils/kube" 10 | kubeutil "github.com/argoproj/gitops-engine/pkg/utils/kube" 11 | "github.com/argoproj/gitops-engine/pkg/utils/text" 12 | ) 13 | 14 | func splitHooks(target []*unstructured.Unstructured) ([]*unstructured.Unstructured, []*unstructured.Unstructured) { 15 | targetObjs := make([]*unstructured.Unstructured, 0) 16 | hooks := make([]*unstructured.Unstructured, 0) 17 | for _, obj := range target { 18 | if obj == nil || ignore.Ignore(obj) { 19 | continue 20 | } 21 | if hookutil.IsHook(obj) { 22 | hooks = append(hooks, obj) 23 | } else { 24 | targetObjs = append(targetObjs, obj) 25 | } 26 | } 27 | return targetObjs, hooks 28 | } 29 | 30 | // dedupLiveResources handles removes live resource duplicates with the same UID. Duplicates are created in a separate resource groups. 31 | // E.g. apps/Deployment produces duplicate in extensions/Deployment, authorization.openshift.io/ClusterRole produces duplicate in rbac.authorization.k8s.io/ClusterRole etc. 32 | // The method removes such duplicates unless it was defined in git ( exists in target resources list ). At least one duplicate stays. 33 | // If non of duplicates are in git at random one stays 34 | func dedupLiveResources(targetObjs []*unstructured.Unstructured, liveObjsByKey map[kubeutil.ResourceKey]*unstructured.Unstructured) { 35 | targetObjByKey := make(map[kubeutil.ResourceKey]*unstructured.Unstructured) 36 | for i := range targetObjs { 37 | targetObjByKey[kubeutil.GetResourceKey(targetObjs[i])] = targetObjs[i] 38 | } 39 | liveObjsById := make(map[types.UID][]*unstructured.Unstructured) 40 | for k := range liveObjsByKey { 41 | obj := liveObjsByKey[k] 42 | if obj != nil { 43 | liveObjsById[obj.GetUID()] = append(liveObjsById[obj.GetUID()], obj) 44 | } 45 | } 46 | for id := range liveObjsById { 47 | objs := liveObjsById[id] 48 | 49 | if len(objs) > 1 { 50 | duplicatesLeft := len(objs) 51 | for i := range objs { 52 | obj := objs[i] 53 | resourceKey := kubeutil.GetResourceKey(obj) 54 | if _, ok := targetObjByKey[resourceKey]; !ok { 55 | delete(liveObjsByKey, resourceKey) 56 | duplicatesLeft-- 57 | if duplicatesLeft == 1 { 58 | break 59 | } 60 | } 61 | } 62 | } 63 | } 64 | } 65 | 66 | type ReconciliationResult struct { 67 | Live []*unstructured.Unstructured 68 | Target []*unstructured.Unstructured 69 | Hooks []*unstructured.Unstructured 70 | } 71 | 72 | func Reconcile(targetObjs []*unstructured.Unstructured, liveObjByKey map[kube.ResourceKey]*unstructured.Unstructured, namespace string, resInfo kubeutil.ResourceInfoProvider) ReconciliationResult { 73 | targetObjs, hooks := splitHooks(targetObjs) 74 | dedupLiveResources(targetObjs, liveObjByKey) 75 | 76 | managedLiveObj := make([]*unstructured.Unstructured, len(targetObjs)) 77 | for i, obj := range targetObjs { 78 | gvk := obj.GroupVersionKind() 79 | ns := text.FirstNonEmpty(obj.GetNamespace(), namespace) 80 | if namespaced := kubeutil.IsNamespacedOrUnknown(resInfo, obj.GroupVersionKind().GroupKind()); !namespaced { 81 | ns = "" 82 | } 83 | key := kubeutil.NewResourceKey(gvk.Group, gvk.Kind, ns, obj.GetName()) 84 | if liveObj, ok := liveObjByKey[key]; ok { 85 | managedLiveObj[i] = liveObj 86 | delete(liveObjByKey, key) 87 | } else { 88 | managedLiveObj[i] = nil 89 | } 90 | } 91 | for _, obj := range liveObjByKey { 92 | targetObjs = append(targetObjs, nil) 93 | managedLiveObj = append(managedLiveObj, obj) 94 | } 95 | return ReconciliationResult{ 96 | Target: targetObjs, 97 | Hooks: hooks, 98 | Live: managedLiveObj, 99 | } 100 | } 101 | -------------------------------------------------------------------------------- /pkg/cache/settings.go: -------------------------------------------------------------------------------- 1 | package cache 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/go-logr/logr" 7 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 8 | "k8s.io/client-go/rest" 9 | 10 | "github.com/argoproj/gitops-engine/pkg/health" 11 | "github.com/argoproj/gitops-engine/pkg/utils/kube" 12 | "github.com/argoproj/gitops-engine/pkg/utils/tracing" 13 | ) 14 | 15 | type noopSettings struct { 16 | } 17 | 18 | func (f *noopSettings) GetResourceHealth(_ *unstructured.Unstructured) (*health.HealthStatus, error) { 19 | return nil, nil 20 | } 21 | 22 | func (f *noopSettings) IsExcludedResource(_, _, _ string) bool { 23 | return false 24 | } 25 | 26 | // Settings caching customizations 27 | type Settings struct { 28 | // ResourceHealthOverride contains health assessment overrides 29 | ResourceHealthOverride health.HealthOverride 30 | // ResourcesFilter holds filter that excludes resources 31 | ResourcesFilter kube.ResourceFilter 32 | } 33 | 34 | type UpdateSettingsFunc func(cache *clusterCache) 35 | 36 | // SetKubectl allows to override kubectl wrapper implementation 37 | func SetKubectl(kubectl kube.Kubectl) UpdateSettingsFunc { 38 | return func(cache *clusterCache) { 39 | cache.kubectl = kubectl 40 | } 41 | } 42 | 43 | // SetPopulateResourceInfoHandler updates handler that populates resource info 44 | func SetPopulateResourceInfoHandler(handler OnPopulateResourceInfoHandler) UpdateSettingsFunc { 45 | return func(cache *clusterCache) { 46 | cache.populateResourceInfoHandler = handler 47 | } 48 | } 49 | 50 | // SetSettings updates caching settings 51 | func SetSettings(settings Settings) UpdateSettingsFunc { 52 | return func(cache *clusterCache) { 53 | cache.settings = Settings{settings.ResourceHealthOverride, settings.ResourcesFilter} 54 | } 55 | } 56 | 57 | // SetNamespaces updates list of monitored namespaces 58 | func SetNamespaces(namespaces []string) UpdateSettingsFunc { 59 | return func(cache *clusterCache) { 60 | cache.namespaces = namespaces 61 | } 62 | } 63 | 64 | // SetConfig updates cluster rest config 65 | func SetConfig(config *rest.Config) UpdateSettingsFunc { 66 | return func(cache *clusterCache) { 67 | cache.config = config 68 | } 69 | } 70 | 71 | // SetListPageSize sets the page size for list pager. 72 | func SetListPageSize(listPageSize int64) UpdateSettingsFunc { 73 | return func(cache *clusterCache) { 74 | cache.listPageSize = listPageSize 75 | } 76 | } 77 | 78 | // SetListPageBufferSize sets the number of pages to prefetch for list pager. 79 | func SetListPageBufferSize(listPageBufferSize int32) UpdateSettingsFunc { 80 | return func(cache *clusterCache) { 81 | cache.listPageBufferSize = listPageBufferSize 82 | } 83 | } 84 | 85 | // SetListSemaphore sets the semaphore for list operations. 86 | // Taking an object rather than a number allows to share a semaphore among multiple caches if necessary. 87 | func SetListSemaphore(listSemaphore WeightedSemaphore) UpdateSettingsFunc { 88 | return func(cache *clusterCache) { 89 | cache.listSemaphore = listSemaphore 90 | } 91 | } 92 | 93 | // SetResyncTimeout updates cluster re-sync timeout 94 | func SetResyncTimeout(timeout time.Duration) UpdateSettingsFunc { 95 | return func(cache *clusterCache) { 96 | cache.syncStatus.lock.Lock() 97 | defer cache.syncStatus.lock.Unlock() 98 | 99 | cache.syncStatus.resyncTimeout = timeout 100 | } 101 | } 102 | 103 | // SetLogr sets the logger to use. 104 | func SetLogr(log logr.Logger) UpdateSettingsFunc { 105 | return func(cache *clusterCache) { 106 | cache.log = log 107 | if kcmd, ok := cache.kubectl.(*kube.KubectlCmd); ok { 108 | kcmd.Log = log 109 | } 110 | } 111 | } 112 | 113 | // SetTracer sets the tracer to use. 114 | func SetTracer(tracer tracing.Tracer) UpdateSettingsFunc { 115 | return func(cache *clusterCache) { 116 | if kcmd, ok := cache.kubectl.(*kube.KubectlCmd); ok { 117 | kcmd.Tracer = tracer 118 | } 119 | } 120 | } 121 | -------------------------------------------------------------------------------- /pkg/cache/references.go: -------------------------------------------------------------------------------- 1 | package cache 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "strings" 7 | 8 | v1 "k8s.io/api/apps/v1" 9 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 10 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 11 | "k8s.io/apimachinery/pkg/types" 12 | 13 | "github.com/argoproj/gitops-engine/pkg/utils/kube" 14 | ) 15 | 16 | // mightHaveInferredOwner returns true of given resource might have inferred owners 17 | func mightHaveInferredOwner(r *Resource) bool { 18 | return r.Ref.GroupVersionKind().Group == "" && r.Ref.Kind == kube.PersistentVolumeClaimKind 19 | } 20 | 21 | func (c *clusterCache) resolveResourceReferences(un *unstructured.Unstructured) ([]metav1.OwnerReference, func(kube.ResourceKey) bool) { 22 | var isInferredParentOf func(_ kube.ResourceKey) bool 23 | ownerRefs := un.GetOwnerReferences() 24 | gvk := un.GroupVersionKind() 25 | 26 | switch { 27 | 28 | // Special case for endpoint. Remove after https://github.com/kubernetes/kubernetes/issues/28483 is fixed 29 | case gvk.Group == "" && gvk.Kind == kube.EndpointsKind && len(un.GetOwnerReferences()) == 0: 30 | ownerRefs = append(ownerRefs, metav1.OwnerReference{ 31 | Name: un.GetName(), 32 | Kind: kube.ServiceKind, 33 | APIVersion: "v1", 34 | }) 35 | 36 | // Special case for Operator Lifecycle Manager ClusterServiceVersion: 37 | case un.GroupVersionKind().Group == "operators.coreos.com" && un.GetKind() == "ClusterServiceVersion": 38 | if un.GetAnnotations()["olm.operatorGroup"] != "" { 39 | ownerRefs = append(ownerRefs, metav1.OwnerReference{ 40 | Name: un.GetAnnotations()["olm.operatorGroup"], 41 | Kind: "OperatorGroup", 42 | APIVersion: "operators.coreos.com/v1", 43 | }) 44 | } 45 | 46 | // Edge case: consider auto-created service account tokens as a child of service account objects 47 | case un.GetKind() == kube.SecretKind && un.GroupVersionKind().Group == "": 48 | if yes, ref := isServiceAccountTokenSecret(un); yes { 49 | ownerRefs = append(ownerRefs, ref) 50 | } 51 | 52 | case (un.GroupVersionKind().Group == "apps" || un.GroupVersionKind().Group == "extensions") && un.GetKind() == kube.StatefulSetKind: 53 | if refs, err := isStatefulSetChild(un); err != nil { 54 | c.log.Error(err, fmt.Sprintf("Failed to extract StatefulSet %s/%s PVC references", un.GetNamespace(), un.GetName())) 55 | } else { 56 | isInferredParentOf = refs 57 | } 58 | } 59 | 60 | return ownerRefs, isInferredParentOf 61 | } 62 | 63 | func isStatefulSetChild(un *unstructured.Unstructured) (func(kube.ResourceKey) bool, error) { 64 | sts := v1.StatefulSet{} 65 | data, err := json.Marshal(un) 66 | if err != nil { 67 | return nil, err 68 | } 69 | err = json.Unmarshal(data, &sts) 70 | if err != nil { 71 | return nil, err 72 | } 73 | 74 | templates := sts.Spec.VolumeClaimTemplates 75 | return func(key kube.ResourceKey) bool { 76 | if key.Kind == kube.PersistentVolumeClaimKind && key.GroupKind().Group == "" { 77 | for _, templ := range templates { 78 | if strings.HasPrefix(key.Name, fmt.Sprintf("%s-%s-", templ.Name, un.GetName())) { 79 | return true 80 | } 81 | } 82 | } 83 | return false 84 | }, nil 85 | } 86 | 87 | func isServiceAccountTokenSecret(un *unstructured.Unstructured) (bool, metav1.OwnerReference) { 88 | ref := metav1.OwnerReference{ 89 | APIVersion: "v1", 90 | Kind: kube.ServiceAccountKind, 91 | } 92 | 93 | if typeVal, ok, err := unstructured.NestedString(un.Object, "type"); !ok || err != nil || typeVal != "kubernetes.io/service-account-token" { 94 | return false, ref 95 | } 96 | 97 | annotations := un.GetAnnotations() 98 | if annotations == nil { 99 | return false, ref 100 | } 101 | 102 | id, okId := annotations["kubernetes.io/service-account.uid"] 103 | name, okName := annotations["kubernetes.io/service-account.name"] 104 | if okId && okName { 105 | ref.Name = name 106 | ref.UID = types.UID(id) 107 | } 108 | return ref.Name != "" && ref.UID != "", ref 109 | } 110 | -------------------------------------------------------------------------------- /pkg/cache/predicates_test.go: -------------------------------------------------------------------------------- 1 | package cache 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | 7 | "github.com/argoproj/gitops-engine/pkg/utils/kube" 8 | 9 | "github.com/stretchr/testify/assert" 10 | "github.com/stretchr/testify/require" 11 | appsv1 "k8s.io/api/apps/v1" 12 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 13 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 14 | "k8s.io/client-go/rest" 15 | ) 16 | 17 | func TestResourceOfGroupKind(t *testing.T) { 18 | deploy := &appsv1.Deployment{ 19 | TypeMeta: metav1.TypeMeta{ 20 | APIVersion: "apps/v1", 21 | Kind: "Deployment", 22 | }, 23 | ObjectMeta: metav1.ObjectMeta{ 24 | Name: "deploy", 25 | }, 26 | } 27 | service := &appsv1.Deployment{ 28 | TypeMeta: metav1.TypeMeta{ 29 | APIVersion: "", 30 | Kind: "Service", 31 | }, 32 | ObjectMeta: metav1.ObjectMeta{ 33 | Name: "service", 34 | }, 35 | } 36 | 37 | cluster := newCluster(t, deploy, service) 38 | err := cluster.EnsureSynced() 39 | require.NoError(t, err) 40 | 41 | resources := cluster.FindResources("", ResourceOfGroupKind("apps", "Deployment")) 42 | assert.Len(t, resources, 1) 43 | assert.NotNil(t, resources[kube.NewResourceKey("apps", "Deployment", "", "deploy")]) 44 | } 45 | 46 | func TestGetNamespaceResources(t *testing.T) { 47 | defaultNamespaceTopLevel1 := &appsv1.Deployment{ 48 | TypeMeta: metav1.TypeMeta{ 49 | APIVersion: "apps/v1", 50 | Kind: "Deployment", 51 | }, 52 | ObjectMeta: metav1.ObjectMeta{ 53 | Name: "helm-guestbook1", 54 | Namespace: "default", 55 | }, 56 | } 57 | defaultNamespaceTopLevel2 := &appsv1.Deployment{ 58 | TypeMeta: metav1.TypeMeta{ 59 | APIVersion: "apps/v1", 60 | Kind: "Deployment", 61 | }, 62 | ObjectMeta: metav1.ObjectMeta{ 63 | Name: "helm-guestbook2", 64 | Namespace: "default", 65 | }, 66 | } 67 | kubesystemNamespaceTopLevel2 := &appsv1.Deployment{ 68 | TypeMeta: metav1.TypeMeta{ 69 | APIVersion: "apps/v1", 70 | Kind: "Deployment", 71 | }, 72 | ObjectMeta: metav1.ObjectMeta{ 73 | Name: "helm-guestbook3", 74 | Namespace: "kube-system", 75 | }, 76 | } 77 | 78 | cluster := newCluster(t, defaultNamespaceTopLevel1, defaultNamespaceTopLevel2, kubesystemNamespaceTopLevel2) 79 | err := cluster.EnsureSynced() 80 | require.NoError(t, err) 81 | 82 | resources := cluster.FindResources("default", TopLevelResource) 83 | assert.Len(t, resources, 2) 84 | assert.Equal(t, resources[getResourceKey(t, defaultNamespaceTopLevel1)].Ref.Name, "helm-guestbook1") 85 | assert.Equal(t, resources[getResourceKey(t, defaultNamespaceTopLevel2)].Ref.Name, "helm-guestbook2") 86 | 87 | resources = cluster.FindResources("kube-system", TopLevelResource) 88 | assert.Len(t, resources, 1) 89 | assert.Equal(t, resources[getResourceKey(t, kubesystemNamespaceTopLevel2)].Ref.Name, "helm-guestbook3") 90 | } 91 | 92 | func ExampleNewClusterCache_inspectNamespaceResources() { 93 | // kubernetes cluster config here 94 | config := &rest.Config{} 95 | 96 | clusterCache := NewClusterCache(config, 97 | // cache default namespace only 98 | SetNamespaces([]string{"default", "kube-system"}), 99 | // configure custom logic to cache resources manifest and additional metadata 100 | SetPopulateResourceInfoHandler(func(un *unstructured.Unstructured, isRoot bool) (info interface{}, cacheManifest bool) { 101 | // if resource belongs to 'extensions' group then mark if with 'deprecated' label 102 | if un.GroupVersionKind().Group == "extensions" { 103 | info = []string{"deprecated"} 104 | } 105 | _, ok := un.GetLabels()["acme.io/my-label"] 106 | // cache whole manifest if resource has label 107 | cacheManifest = ok 108 | return 109 | }), 110 | ) 111 | // Ensure cluster is synced before using it 112 | if err := clusterCache.EnsureSynced(); err != nil { 113 | panic(err) 114 | } 115 | // Iterate default namespace resources tree 116 | for _, root := range clusterCache.FindResources("default", TopLevelResource) { 117 | clusterCache.IterateHierarchy(root.ResourceKey(), func(resource *Resource, _ map[kube.ResourceKey]*Resource) { 118 | fmt.Printf("resource: %s, info: %v\n", resource.Ref.String(), resource.Info) 119 | }) 120 | } 121 | } 122 | -------------------------------------------------------------------------------- /pkg/sync/sync_task.go: -------------------------------------------------------------------------------- 1 | package sync 2 | 3 | import ( 4 | "fmt" 5 | 6 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 7 | "k8s.io/apimachinery/pkg/runtime/schema" 8 | 9 | "github.com/argoproj/gitops-engine/pkg/sync/common" 10 | "github.com/argoproj/gitops-engine/pkg/sync/hook" 11 | "github.com/argoproj/gitops-engine/pkg/sync/syncwaves" 12 | "github.com/argoproj/gitops-engine/pkg/utils/kube" 13 | ) 14 | 15 | // syncTask holds the live and target object. At least one should be non-nil. A targetObj of nil 16 | // indicates the live object needs to be pruned. A liveObj of nil indicates the object has yet to 17 | // be deployed 18 | type syncTask struct { 19 | phase common.SyncPhase 20 | liveObj *unstructured.Unstructured 21 | targetObj *unstructured.Unstructured 22 | skipDryRun bool 23 | syncStatus common.ResultCode 24 | operationState common.OperationPhase 25 | message string 26 | waveOverride *int 27 | } 28 | 29 | func ternary(val bool, a, b string) string { 30 | if val { 31 | return a 32 | } else { 33 | return b 34 | } 35 | } 36 | 37 | func (t *syncTask) String() string { 38 | return fmt.Sprintf("%s/%d %s %s/%s:%s/%s %s->%s (%s,%s,%s)", 39 | t.phase, t.wave(), 40 | ternary(t.isHook(), "hook", "resource"), t.group(), t.kind(), t.namespace(), t.name(), 41 | ternary(t.liveObj != nil, "obj", "nil"), ternary(t.targetObj != nil, "obj", "nil"), 42 | t.syncStatus, t.operationState, t.message, 43 | ) 44 | } 45 | 46 | func (t *syncTask) isPrune() bool { 47 | return t.targetObj == nil 48 | } 49 | 50 | func (t *syncTask) resultKey() string { 51 | return resourceResultKey(kube.GetResourceKey(t.obj()), t.phase) 52 | } 53 | 54 | // return the target object (if this exists) otherwise the live object 55 | // some caution - often you explicitly want the live object not the target object 56 | func (t *syncTask) obj() *unstructured.Unstructured { 57 | return obj(t.targetObj, t.liveObj) 58 | } 59 | 60 | func (t *syncTask) wave() int { 61 | if t.waveOverride != nil { 62 | return *t.waveOverride 63 | } 64 | return syncwaves.Wave(t.obj()) 65 | } 66 | 67 | func (t *syncTask) isHook() bool { 68 | return hook.IsHook(t.obj()) 69 | } 70 | 71 | func (t *syncTask) group() string { 72 | return t.groupVersionKind().Group 73 | } 74 | func (t *syncTask) kind() string { 75 | return t.groupVersionKind().Kind 76 | } 77 | 78 | func (t *syncTask) version() string { 79 | return t.groupVersionKind().Version 80 | } 81 | 82 | func (t *syncTask) groupVersionKind() schema.GroupVersionKind { 83 | return t.obj().GroupVersionKind() 84 | } 85 | 86 | func (t *syncTask) name() string { 87 | return t.obj().GetName() 88 | } 89 | 90 | func (t *syncTask) namespace() string { 91 | return t.obj().GetNamespace() 92 | } 93 | 94 | func (t *syncTask) pending() bool { 95 | return t.operationState == "" 96 | } 97 | 98 | func (t *syncTask) running() bool { 99 | return t.operationState.Running() 100 | } 101 | 102 | func (t *syncTask) completed() bool { 103 | return t.operationState.Completed() 104 | } 105 | 106 | func (t *syncTask) successful() bool { 107 | return t.operationState.Successful() 108 | } 109 | 110 | func (t *syncTask) hookType() common.HookType { 111 | if t.isHook() { 112 | return common.HookType(t.phase) 113 | } else { 114 | return "" 115 | } 116 | } 117 | 118 | func (t *syncTask) hasHookDeletePolicy(policy common.HookDeletePolicy) bool { 119 | // cannot have a policy if it is not a hook, it is meaningless 120 | if !t.isHook() { 121 | return false 122 | } 123 | for _, p := range hook.DeletePolicies(t.obj()) { 124 | if p == policy { 125 | return true 126 | } 127 | } 128 | return false 129 | } 130 | 131 | func (t *syncTask) deleteBeforeCreation() bool { 132 | return t.liveObj != nil && t.pending() && t.hasHookDeletePolicy(common.HookDeletePolicyBeforeHookCreation) 133 | } 134 | 135 | func (t *syncTask) deleteOnPhaseCompletion() bool { 136 | return t.deleteOnPhaseFailed() || t.deleteOnPhaseSuccessful() 137 | } 138 | 139 | func (t *syncTask) deleteOnPhaseSuccessful() bool { 140 | return t.liveObj != nil && t.hasHookDeletePolicy(common.HookDeletePolicyHookSucceeded) 141 | } 142 | 143 | func (t *syncTask) deleteOnPhaseFailed() bool { 144 | return t.liveObj != nil && t.hasHookDeletePolicy(common.HookDeletePolicyHookFailed) 145 | } 146 | -------------------------------------------------------------------------------- /pkg/sync/sync_task_test.go: -------------------------------------------------------------------------------- 1 | package sync 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 8 | 9 | "github.com/argoproj/gitops-engine/pkg/sync/common" 10 | . "github.com/argoproj/gitops-engine/pkg/utils/testing" 11 | ) 12 | 13 | func newHook(hookType common.HookType) *unstructured.Unstructured { 14 | return Annotate(NewPod(), "argocd.argoproj.io/hook", string(hookType)) 15 | } 16 | 17 | func Test_syncTask_hookType(t *testing.T) { 18 | type fields struct { 19 | phase common.SyncPhase 20 | liveObj *unstructured.Unstructured 21 | } 22 | tests := []struct { 23 | name string 24 | fields fields 25 | want common.HookType 26 | }{ 27 | {"Empty", fields{common.SyncPhaseSync, NewPod()}, ""}, 28 | {"PreSyncHook", fields{common.SyncPhasePreSync, newHook(common.HookTypePreSync)}, common.HookTypePreSync}, 29 | {"SyncHook", fields{common.SyncPhaseSync, newHook(common.HookTypeSync)}, common.HookTypeSync}, 30 | {"PostSyncHook", fields{common.SyncPhasePostSync, newHook(common.HookTypePostSync)}, common.HookTypePostSync}, 31 | } 32 | for _, tt := range tests { 33 | t.Run(tt.name, func(t *testing.T) { 34 | task := &syncTask{ 35 | phase: tt.fields.phase, 36 | liveObj: tt.fields.liveObj, 37 | } 38 | hookType := task.hookType() 39 | assert.EqualValues(t, tt.want, hookType) 40 | }) 41 | } 42 | } 43 | 44 | func Test_syncTask_hasHookDeletePolicy(t *testing.T) { 45 | assert.False(t, (&syncTask{targetObj: NewPod()}).hasHookDeletePolicy(common.HookDeletePolicyBeforeHookCreation)) 46 | assert.False(t, (&syncTask{targetObj: NewPod()}).hasHookDeletePolicy(common.HookDeletePolicyHookSucceeded)) 47 | assert.False(t, (&syncTask{targetObj: NewPod()}).hasHookDeletePolicy(common.HookDeletePolicyHookFailed)) 48 | // must be hook 49 | assert.False(t, (&syncTask{targetObj: Annotate(NewPod(), "argocd.argoproj.io/hook-delete-policy", "BeforeHookCreation")}).hasHookDeletePolicy(common.HookDeletePolicyBeforeHookCreation)) 50 | assert.True(t, (&syncTask{targetObj: Annotate(Annotate(NewPod(), "argocd.argoproj.io/hook", "Sync"), "argocd.argoproj.io/hook-delete-policy", "BeforeHookCreation")}).hasHookDeletePolicy(common.HookDeletePolicyBeforeHookCreation)) 51 | assert.True(t, (&syncTask{targetObj: Annotate(Annotate(NewPod(), "argocd.argoproj.io/hook", "Sync"), "argocd.argoproj.io/hook-delete-policy", "HookSucceeded")}).hasHookDeletePolicy(common.HookDeletePolicyHookSucceeded)) 52 | assert.True(t, (&syncTask{targetObj: Annotate(Annotate(NewPod(), "argocd.argoproj.io/hook", "Sync"), "argocd.argoproj.io/hook-delete-policy", "HookFailed")}).hasHookDeletePolicy(common.HookDeletePolicyHookFailed)) 53 | } 54 | 55 | func Test_syncTask_deleteOnPhaseCompletion(t *testing.T) { 56 | assert.False(t, (&syncTask{liveObj: NewPod()}).deleteOnPhaseCompletion()) 57 | // must be hook 58 | assert.True(t, (&syncTask{operationState: common.OperationSucceeded, liveObj: Annotate(Annotate(NewPod(), "argocd.argoproj.io/hook", "Sync"), "argocd.argoproj.io/hook-delete-policy", "HookSucceeded")}).deleteOnPhaseCompletion()) 59 | assert.True(t, (&syncTask{operationState: common.OperationFailed, liveObj: Annotate(Annotate(NewPod(), "argocd.argoproj.io/hook", "Sync"), "argocd.argoproj.io/hook-delete-policy", "HookFailed")}).deleteOnPhaseCompletion()) 60 | } 61 | 62 | func Test_syncTask_deleteBeforeCreation(t *testing.T) { 63 | assert.False(t, (&syncTask{liveObj: NewPod()}).deleteBeforeCreation()) 64 | // must be hook 65 | assert.False(t, (&syncTask{liveObj: Annotate(NewPod(), "argocd.argoproj.io/hook-delete-policy", "BeforeHookCreation")}).deleteBeforeCreation()) 66 | // no need to delete if no live obj 67 | assert.False(t, (&syncTask{targetObj: Annotate(Annotate(NewPod(), "argoocd.argoproj.io/hook", "Sync"), "argocd.argoproj.io/hook-delete-policy", "BeforeHookCreation")}).deleteBeforeCreation()) 68 | assert.True(t, (&syncTask{liveObj: Annotate(Annotate(NewPod(), "argocd.argoproj.io/hook", "Sync"), "argocd.argoproj.io/hook-delete-policy", "BeforeHookCreation")}).deleteBeforeCreation()) 69 | assert.True(t, (&syncTask{liveObj: Annotate(Annotate(NewPod(), "argocd.argoproj.io/hook", "Sync"), "argocd.argoproj.io/hook-delete-policy", "BeforeHookCreation")}).deleteBeforeCreation()) 70 | 71 | } 72 | 73 | func Test_syncTask_wave(t *testing.T) { 74 | assert.Equal(t, 0, (&syncTask{targetObj: NewPod()}).wave()) 75 | assert.Equal(t, 1, (&syncTask{targetObj: Annotate(NewPod(), "argocd.argoproj.io/sync-wave", "1")}).wave()) 76 | } 77 | -------------------------------------------------------------------------------- /pkg/diff/testdata/mutatingwebhookconfig-live.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "admissionregistration.k8s.io/v1beta1", 3 | "kind": "MutatingWebhookConfiguration", 4 | "metadata": { 5 | "annotations": { 6 | "cert-manager.io/inject-apiserver-ca": "true", 7 | "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"admissionregistration.k8s.io/v1beta1\",\"kind\":\"MutatingWebhookConfiguration\",\"metadata\":{\"annotations\":{\"cert-manager.io/inject-apiserver-ca\":\"true\"},\"labels\":{\"app\":\"webhook\",\"app.kubernetes.io/instance\":\"cert-manager\",\"app.kubernetes.io/managed-by\":\"Tiller\",\"app.kubernetes.io/name\":\"webhook\",\"helm.sh/chart\":\"cert-manager-v0.11.0\"},\"name\":\"cert-manager-webhook\"},\"webhooks\":[{\"clientConfig\":{\"service\":{\"name\":\"kubernetes\",\"namespace\":\"default\",\"path\":\"/apis/webhook.cert-manager.io/v1beta1/mutations\"}},\"failurePolicy\":\"Fail\",\"name\":\"webhook.cert-manager.io\",\"rules\":[{\"apiGroups\":[\"cert-manager.io\"],\"apiVersions\":[\"v1alpha2\"],\"operations\":[\"CREATE\",\"UPDATE\"],\"resources\":[\"certificates\",\"issuers\",\"clusterissuers\",\"orders\",\"challenges\",\"certificaterequests\"]}]}]}\n" 8 | }, 9 | "creationTimestamp": "2019-11-13T22:18:01Z", 10 | "generation": 2, 11 | "labels": { 12 | "app": "webhook", 13 | "app.kubernetes.io/instance": "cert-manager", 14 | "app.kubernetes.io/managed-by": "Tiller", 15 | "app.kubernetes.io/name": "webhook", 16 | "helm.sh/chart": "cert-manager-v0.11.0" 17 | }, 18 | "name": "cert-manager-webhook", 19 | "resourceVersion": "192158268", 20 | "selfLink": "/apis/admissionregistration.k8s.io/v1beta1/mutatingwebhookconfigurations/cert-manager-webhook", 21 | "uid": "746a6ca1-0663-11ea-b4b2-42010a800164" 22 | }, 23 | "webhooks": [ 24 | { 25 | "admissionReviewVersions": [ 26 | "v1beta1" 27 | ], 28 | "clientConfig": { 29 | "caBundle": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURERENDQWZTZ0F3SUJBZ0lSQU5iM3pUUEs0VlVmdnpyMHBteTZLSFV3RFFZSktvWklodmNOQVFFTEJRQXcKTHpFdE1Dc0dBMVVFQXhNa1lqQXlNemc0TnpNdE56Z3pNaTAwTVRFMkxUZzNaakV0T1RNeE9HSmtaV0ZtTTJJeQpNQjRYRFRFNE1EVXhNREl3TlRNMU1Wb1hEVEl6TURVd09USXhOVE0xTVZvd0x6RXRNQ3NHQTFVRUF4TWtZakF5Ck16ZzROek10Tnpnek1pMDBNVEUyTFRnM1pqRXRPVE14T0dKa1pXRm1NMkl5TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdWFrM2VleHFHS3g2dEJ2cExVdGgzSUlPQVBrVHd2bkVobER5dUFwOQpDY0tVU2JsOUR3VnVyYUtrYkZSdFZhM0xpaDhGQmZ0aDhETWI3RkxoNEZ5d1JpbDNJUEhWaEZacTFvQi9aSVQxCitDUlN6NXZXZWlVeGZCM0l5L05wSTBleFVHVVAzV2N2bHFiQU9odVhXRVo5OTB0NVpiWE9GYi9hcm9sU3hyQ0cKUkFQcUhzSlNoa3kyV3FNWnkyQ29yZjV3ZTZtZmVJcFpRUmlJOWd3eWN4S2ZWbmtrT28vRlhvQjYvbnJPRlNmVwpacmJvYmVxRWsvRFB3akRDdXY4aEpjajlFdjRwVytSZjRTakRVSjBBYWZMVU1yQ05qREtXUHJoYlRTMHpBdGlKClhLdmttVFFtWWZlYjVESEJUbmw2THJoN1FSanBsTlk1MW1vZ1MvSDBOb2trZndJREFRQUJveU13SVRBT0JnTlYKSFE4QkFmOEVCQU1DQWdRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBTkJna3Foa2lHOXcwQkFRc0ZBQU9DQVFFQQpZdC93bVlubUw2RzJySm1PWVY3N1ZQdEMrN25TKzhUbmVkbkFlNFhLSUF6alk3RTNLazdvSk5uT1VxT2V5a2hpClVuRXhpNWRmTis0b0Z1TDFKd3lDVzR2R2ZNVnJzdFFFcXhMSkM5TFhzSGJXSUp4b1VTNlpiTjRPVWl2WlVTNE8KWGlKTlFSQW5GZnY3Qkw5SXR5NWxucS9CZTBPMFlBQkhoeGZOQ0FHOFRjd0cxVUxWMlRGMlZXVi9LQVAvODFiUQpWNmxrZlVnNTNZQmpTRVN1WkRRbmZibkJrMDFjd3FmUzZyZFV3dW44U3BjbHBQMFlFbWIyTUpEZmF6bll3RkdBCldQNnRiSzhTTXhiMlJnOUR1VWdrMEZBb1NFcDlvc25NQVF2eEMzZmx2Mzd4b0VYZTBDYXdjVEFwOEo2S1J2QlAKcmp2eDVUWkZrU2xjV2RCTktVZEN0QT09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K", 30 | "service": { 31 | "name": "kubernetes", 32 | "namespace": "default", 33 | "path": "/apis/webhook.cert-manager.io/v1beta1/mutations", 34 | "port": 443 35 | } 36 | }, 37 | "failurePolicy": "Fail", 38 | "matchPolicy": "Exact", 39 | "name": "webhook.cert-manager.io", 40 | "namespaceSelector": {}, 41 | "objectSelector": {}, 42 | "reinvocationPolicy": "Never", 43 | "rules": [ 44 | { 45 | "apiGroups": [ 46 | "cert-manager.io" 47 | ], 48 | "apiVersions": [ 49 | "v1alpha2" 50 | ], 51 | "operations": [ 52 | "CREATE", 53 | "UPDATE" 54 | ], 55 | "resources": [ 56 | "certificates", 57 | "issuers", 58 | "clusterissuers", 59 | "orders", 60 | "challenges", 61 | "certificaterequests" 62 | ], 63 | "scope": "*" 64 | } 65 | ], 66 | "sideEffects": "Unknown", 67 | "timeoutSeconds": 30 68 | } 69 | ] 70 | } -------------------------------------------------------------------------------- /pkg/sync/doc.go: -------------------------------------------------------------------------------- 1 | /* 2 | Package implements Kubernetes resources synchronization and provides the following main features: 3 | - basic syncing 4 | - resource pruning 5 | - resource hooks 6 | - sync waves 7 | - sync options 8 | 9 | Basic Syncing 10 | 11 | Executes equivalent of `kubectl apply` for each specified resource. The apply operations are executed in the predefined 12 | order depending of resource type: namespaces, custom resource definitions first and workload resources last. 13 | 14 | Resource Pruning 15 | 16 | An ability to delete resources that no longer should exist in the cluster. By default obsolete resources are not deleted 17 | and only reported in the sync operation result. 18 | 19 | Resource Hooks 20 | 21 | Hooks provide an ability to create resources such as Pod, Job or any other resource, that are 'executed' before, after 22 | or even during the synchronization process. Hooks enable use-cases such as database migration and post sync notifications. 23 | 24 | Hooks are regular Kubernetes resources that have `argocd.argoproj.io/hook` annotation: 25 | 26 | apiVersion: batch/v1 27 | kind: Job 28 | metadata: 29 | generateName: schema-migrate- 30 | annotations: 31 | argocd.argoproj.io/hook: PreSync 32 | 33 | The annotation value indicates the sync operation phase: 34 | 35 | - PreSync - executes prior to the apply of the manifests. 36 | - PostSync - executes after all Sync hooks completed and were successful, a successful apply, and all resources in a Healthy state. 37 | - SyncFail - executes when the sync operation fails. 38 | - Sync - executes after all PreSync hooks completed and were successful, at the same time as the apply of the manifests. 39 | 40 | Named hooks (i.e. ones with /metadata/name) will only be created once. If you want a hook to be re-created each time 41 | either use BeforeHookCreation policy (see below) or /metadata/generateName. 42 | 43 | The same resource hook might be executed in several sync phases: 44 | 45 | apiVersion: batch/v1 46 | kind: Job 47 | metadata: 48 | generateName: schema-migrate- 49 | annotations: 50 | argocd.argoproj.io/hook: PreSync,PostSync 51 | 52 | Hooks can be deleted in an automatic fashion using the annotation: argocd.argoproj.io/hook-delete-policy. 53 | 54 | apiVersion: batch/v1 55 | kind: Job 56 | metadata: 57 | generateName: integration-test- 58 | annotations: 59 | argocd.argoproj.io/hook: PostSync 60 | argocd.argoproj.io/hook-delete-policy: HookSucceeded 61 | 62 | The following policies define when the hook will be deleted. 63 | 64 | - HookSucceeded - the hook resource is deleted after the hook succeeded (e.g. Job/Workflow completed successfully). 65 | - HookFailed - the hook resource is deleted after the hook failed. 66 | - BeforeHookCreation - any existing hook resource is deleted before the new one is created 67 | 68 | Sync Waves 69 | 70 | The waves allow to group sync execution of syncing process into batches when each batch is executed sequentially one after 71 | another. Hooks and resources are assigned to wave zero by default. The wave can be negative, so you can create a wave 72 | that runs before all other resources. The `argocd.argoproj.io/sync-wave` annotation assign resource to a wave: 73 | 74 | metadata: 75 | annotations: 76 | argocd.argoproj.io/sync-wave: "5" 77 | 78 | Sync Options 79 | 80 | The sync options allows customizing the synchronization of selected resources. The options are specified using the 81 | annotation 'argocd.argoproj.io/sync-options'. Following sync options are supported: 82 | 83 | - SkipDryRunOnMissingResource=true - disables dry run in resource is missing in the cluster 84 | - Prune=false - disables resource pruning 85 | - Validate=false - disables resource validation (equivalent to 'kubectl apply --validate=false') 86 | 87 | How Does It Work Together? 88 | 89 | Syncing process orders the resources in the following precedence: 90 | 91 | - The phase 92 | - The wave they are in (lower values first) 93 | - By kind (e.g. namespaces first) 94 | - By name 95 | 96 | It then determines which the number of the next wave to apply. This is the first number where any resource is 97 | out-of-sync or unhealthy. It applies resources in that wave. It repeats this process until all phases and waves are in 98 | in-sync and healthy. 99 | 100 | Example 101 | 102 | Find real-life example in https://github.com/argoproj/gitops-engine/blob/master/pkg/engine/engine.go 103 | 104 | */ 105 | package sync 106 | --------------------------------------------------------------------------------