├── .gitignore ├── templates ├── service-settings │ └── gate.yml ├── profiles │ ├── front50-local.yml │ ├── settings-local.js │ └── gate-local.yml ├── profiles-auth │ ├── settings-local.js │ └── gate-local.yml ├── addons │ ├── demo │ │ ├── demok8s │ │ │ ├── pipelines │ │ │ │ ├── last-modified.json.tmpl │ │ │ │ └── PIPELINE_UUID │ │ │ │ │ └── pipeline-metadata.json.tmpl │ │ │ └── applications │ │ │ │ ├── last-modified.json.tmpl │ │ │ │ └── demok8s │ │ │ │ ├── application-permissions.json.tmpl │ │ │ │ └── application-metadata.json.tmpl │ │ └── democanary │ │ │ ├── pipelines │ │ │ ├── last-modified.json.tmpl │ │ │ └── PIPELINE_UUID │ │ │ │ └── pipeline-metadata.json.tmpl │ │ │ ├── applications │ │ │ ├── last-modified.json.tmpl │ │ │ └── democanary │ │ │ │ ├── application-permissions.json.tmpl │ │ │ │ └── application-metadata.json.tmpl │ │ │ └── canary_config │ │ │ └── Latency.json.tmpl │ └── prometheus │ │ ├── prometheus-service.yaml │ │ ├── prometheus-ingress-noauth.yaml │ │ └── prometheus-ingress.yaml ├── manifests │ ├── namespace.yml │ ├── spinnaker-default-clusteradmin-clusterrolebinding.yml │ ├── spinnaker-ingress.yml │ ├── halyard.yml │ ├── mariadb.yml │ └── minio.yml ├── config-armory ├── config └── archive │ ├── minio-without-pvc.yml │ ├── mariadb-without-pvc.yml │ └── halyard-with-pvc.yml ├── bin └── build.sh ├── pipelines ├── armory-app.yaml ├── namespaces.yaml ├── aws-app.yaml ├── kustomization.yaml ├── armory-github.yaml ├── armory-jenkins.yaml ├── armory-slack.yaml ├── armory-basic-k8s-pipe.yaml └── bootstrap.yaml ├── scripts ├── README.md ├── uninstall-k3s.sh ├── utils │ ├── scale_local.sh │ ├── remove_auth.sh │ ├── undo_expose_local.sh │ ├── expose_local.sh │ ├── switch_to_oss.sh │ └── external_service_setup.sh ├── addons │ ├── spinnaker_enable_canary.sh │ ├── setup_demo.sh │ ├── install_prometheus_osx.sh │ ├── install_prometheus.sh │ └── setup_demo_canary.sh ├── refresh_endpoint.sh ├── regenerate_password.sh ├── osx_install.sh ├── no_auth_install.sh ├── install.sh └── functions.sh ├── .github └── workflows │ └── main.yml ├── guides ├── set-up-slack-notifications.md ├── setup-ldap.md ├── add-kubernetes-cluster.md ├── first-pipeline-jenkins.md └── setup-dev-environment.md ├── LICENSE └── readme.md /.gitignore: -------------------------------------------------------------------------------- 1 | workspace 2 | build 3 | .DS_Store -------------------------------------------------------------------------------- /templates/service-settings/gate.yml: -------------------------------------------------------------------------------- 1 | healthEndpoint: /api/v1/health -------------------------------------------------------------------------------- /templates/profiles/front50-local.yml: -------------------------------------------------------------------------------- 1 | spinnaker.s3.versioning: false 2 | -------------------------------------------------------------------------------- /templates/profiles-auth/settings-local.js: -------------------------------------------------------------------------------- 1 | window.spinnakerSettings.authEnabled = true; 2 | -------------------------------------------------------------------------------- /templates/addons/demo/demok8s/pipelines/last-modified.json.tmpl: -------------------------------------------------------------------------------- 1 | {"lastModified":__TIMESTAMP__} 2 | -------------------------------------------------------------------------------- /templates/profiles/settings-local.js: -------------------------------------------------------------------------------- 1 | window.spinnakerSettings.feature.kustomizeEnabled = true; 2 | -------------------------------------------------------------------------------- /templates/addons/demo/democanary/pipelines/last-modified.json.tmpl: -------------------------------------------------------------------------------- 1 | {"lastModified":__TIMESTAMP__} 2 | -------------------------------------------------------------------------------- /templates/addons/demo/demok8s/applications/last-modified.json.tmpl: -------------------------------------------------------------------------------- 1 | {"lastModified":__TIMESTAMP__} 2 | -------------------------------------------------------------------------------- /templates/addons/demo/democanary/applications/last-modified.json.tmpl: -------------------------------------------------------------------------------- 1 | {"lastModified":__TIMESTAMP__} 2 | -------------------------------------------------------------------------------- /templates/manifests/namespace.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: NAMESPACE 6 | -------------------------------------------------------------------------------- /templates/config-armory: -------------------------------------------------------------------------------- 1 | armory: 2 | diagnostics: 3 | enabled: true 4 | uuid: cafed00d 5 | logging: 6 | enabled: true 7 | -------------------------------------------------------------------------------- /templates/profiles-auth/gate-local.yml: -------------------------------------------------------------------------------- 1 | security: 2 | basicform: 3 | enabled: true 4 | user: 5 | name: admin 6 | password: SPINNAKER_PASSWORD 7 | -------------------------------------------------------------------------------- /bin/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | mkdir -p build/minnaker 5 | cp -rpv templates scripts operator build/minnaker 6 | cd build && tar -czvf minnaker.tgz minnaker -------------------------------------------------------------------------------- /templates/addons/demo/demok8s/applications/demok8s/application-permissions.json.tmpl: -------------------------------------------------------------------------------- 1 | { 2 | "name": "demok8s", 3 | "lastModified": __TIMESTAMP__, 4 | "lastModifiedBy": "demo", 5 | "permissions": {} 6 | } -------------------------------------------------------------------------------- /templates/addons/demo/democanary/applications/democanary/application-permissions.json.tmpl: -------------------------------------------------------------------------------- 1 | { 2 | "name": "democanary", 3 | "lastModified": __TIMESTAMP__, 4 | "lastModifiedBy": "demo", 5 | "permissions": {} 6 | } -------------------------------------------------------------------------------- /pipelines/armory-app.yaml: -------------------------------------------------------------------------------- 1 | #file: application.yaml 2 | apiVersion: pacrd.armory.spinnaker.io/v1alpha1 3 | kind: Application 4 | metadata: 5 | name: armory-samples 6 | spec: 7 | email: test@armory.io 8 | description: Description 9 | -------------------------------------------------------------------------------- /templates/profiles/gate-local.yml: -------------------------------------------------------------------------------- 1 | server: 2 | servlet: 3 | context-path: /api/v1 4 | tomcat: 5 | protocolHeader: X-Forwarded-Proto 6 | remoteIpHeader: X-Forwarded-For 7 | internalProxies: .* 8 | httpsServerPort: X-Forwarded-Port 9 | -------------------------------------------------------------------------------- /pipelines/namespaces.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: dev 5 | --- 6 | apiVersion: v1 7 | kind: Namespace 8 | metadata: 9 | name: stage 10 | --- 11 | apiVersion: v1 12 | kind: Namespace 13 | metadata: 14 | name: prod -------------------------------------------------------------------------------- /templates/addons/prometheus/prometheus-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: prometheus 5 | spec: 6 | ports: 7 | - name: web 8 | port: 9090 9 | protocol: TCP 10 | targetPort: web 11 | selector: 12 | app: prometheus 13 | type: ClusterIP -------------------------------------------------------------------------------- /pipelines/aws-app.yaml: -------------------------------------------------------------------------------- 1 | #file: application.yaml 2 | apiVersion: pacrd.armory.spinnaker.io/v1alpha1 3 | kind: Application 4 | metadata: 5 | name: armory-aws 6 | spec: 7 | email: test@armory.io 8 | description: Pre-built AWS armory app with pipelines for all targets (EC2, ECS, Fargate, EKS, Lambda) 9 | -------------------------------------------------------------------------------- /templates/addons/prometheus/prometheus-ingress-noauth.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: prom-ingress 5 | spec: 6 | rules: 7 | - http: 8 | paths: 9 | - backend: 10 | serviceName: prometheus-operated 11 | servicePort: 9090 12 | path: /prometheus -------------------------------------------------------------------------------- /scripts/README.md: -------------------------------------------------------------------------------- 1 | # Scripts 2 | 3 | This directory holds the scripts that make up the core of Minnaker. 4 | 5 | We use `yml` instead of `yaml` for consistency (all service-settings and profiles require `yml`) 6 | 7 | ## TODOs 8 | 9 | * Fix osx_install.sh to use kustomize 10 | * Fix no_auth_install.sh to use kustomize 11 | * Fix addons/ and utils/ scripts 12 | -------------------------------------------------------------------------------- /templates/manifests/spinnaker-default-clusteradmin-clusterrolebinding.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1beta1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | name: NAMESPACE-default-admin 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: ClusterRole 9 | name: cluster-admin 10 | subjects: 11 | - kind: ServiceAccount 12 | name: default 13 | namespace: NAMESPACE 14 | -------------------------------------------------------------------------------- /pipelines/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # file: kustomization.yaml 2 | resources: 3 | - armory-app.yaml 4 | - armory-basic-k8s-pipe.yaml 5 | - armory-jenkins.yaml 6 | - armory-github.yaml 7 | - armory-slack.yaml 8 | - bootstrap.yaml 9 | - namespaces.yaml 10 | 11 | #patchesStrategicMerge: 12 | # - patch.yaml 13 | #namespace: spinnaker # Note: you should change this value if you are _not_ deploying into the `spinnaker` namespace. 14 | -------------------------------------------------------------------------------- /templates/addons/prometheus/prometheus-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: prom-ingress 5 | annotations: 6 | traefik.ingress.kubernetes.io/auth-type: basic 7 | traefik.ingress.kubernetes.io/auth-secret: prometheus-auth 8 | spec: 9 | rules: 10 | - http: 11 | paths: 12 | - backend: 13 | serviceName: prometheus-operated 14 | servicePort: 9090 15 | path: /prometheus -------------------------------------------------------------------------------- /templates/addons/demo/demok8s/applications/demok8s/application-metadata.json.tmpl: -------------------------------------------------------------------------------- 1 | { 2 | "name": "DEMOK8S", 3 | "description": null, 4 | "email": "demo@armory.io", 5 | "updateTs": "__TIMESTAMP__", 6 | "createTs": "__TIMESTAMP__", 7 | "lastModifiedBy": "demo", 8 | "cloudProviders": "kubernetes", 9 | "trafficGuards": [], 10 | "instancePort": 80, 11 | "user": "demo", 12 | "dataSources": { 13 | "disabled": [], 14 | "enabled": [] 15 | } 16 | } -------------------------------------------------------------------------------- /templates/manifests/spinnaker-ingress.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: networking.k8s.io/v1beta1 3 | kind: Ingress 4 | metadata: 5 | labels: 6 | app: spin 7 | name: spin-ingress 8 | namespace: NAMESPACE 9 | spec: 10 | rules: 11 | - 12 | http: 13 | paths: 14 | - backend: 15 | serviceName: spin-deck 16 | servicePort: 9000 17 | path: / 18 | - backend: 19 | serviceName: spin-gate 20 | servicePort: 8084 21 | path: /api/v1 22 | -------------------------------------------------------------------------------- /templates/addons/demo/democanary/applications/democanary/application-metadata.json.tmpl: -------------------------------------------------------------------------------- 1 | { 2 | "name": "DEMOCANARY", 3 | "description": null, 4 | "email": "demo@armory.io", 5 | "updateTs": "__TIMESTAMP__", 6 | "createTs": "__TIMESTAMP__", 7 | "lastModifiedBy": "demo", 8 | "cloudProviders": "kubernetes", 9 | "trafficGuards": [], 10 | "instancePort": 80, 11 | "user": "demo", 12 | "dataSources": { 13 | "disabled": [], 14 | "enabled": [ 15 | "canaryConfigs" 16 | ] 17 | } 18 | } -------------------------------------------------------------------------------- /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | # This action creates a new patch release on push to master. 2 | # The behavior can be modified based on commit message or PR label. 3 | # See https://github.com/rymndhng/release-on-push-action#readme for more info. 4 | 5 | name: Publish Patch Release 6 | 7 | on: 8 | push: 9 | branches: 10 | - master 11 | 12 | jobs: 13 | release-on-push: 14 | runs-on: ubuntu-latest 15 | env: 16 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 17 | steps: 18 | - uses: rymndhng/release-on-push-action@v0.16.0 19 | with: 20 | bump_version_scheme: patch 21 | -------------------------------------------------------------------------------- /pipelines/armory-github.yaml: -------------------------------------------------------------------------------- 1 | # file: deploy-nginx.yaml 2 | apiVersion: pacrd.armory.spinnaker.io/v1alpha1 3 | kind: Pipeline 4 | metadata: 5 | name: configure-github-integration 6 | spec: 7 | description: Click "Start Manual Execution" to the Right for Instructions 8 | application: &app-name armory-samples 9 | stages: 10 | - type: manualJudgment 11 | properties: 12 | name: Configure Github Integration 13 | refId: "1" 14 | failPipeline: true 15 | instructions: "Click Here -> Configure Github

Watch Video and connect Github

" 16 | -------------------------------------------------------------------------------- /pipelines/armory-jenkins.yaml: -------------------------------------------------------------------------------- 1 | # file: deploy-nginx.yaml 2 | apiVersion: pacrd.armory.spinnaker.io/v1alpha1 3 | kind: Pipeline 4 | metadata: 5 | name: configure-jenkins-integration 6 | spec: 7 | description: Click "Start Manual Execution" to the Right for Instructions 8 | application: &app-name armory-samples 9 | stages: 10 | - type: manualJudgment 11 | properties: 12 | name: Configure Jenkins Integration 13 | refId: "1" 14 | failPipeline: true 15 | instructions: "Click Here -> Configure Jenkins

Watch Video and Configure Jenkins

" 16 | -------------------------------------------------------------------------------- /pipelines/armory-slack.yaml: -------------------------------------------------------------------------------- 1 | # file: deploy-nginx.yaml 2 | apiVersion: pacrd.armory.spinnaker.io/v1alpha1 3 | kind: Pipeline 4 | metadata: 5 | name: configure-slack-integration 6 | spec: 7 | description: Click "Start Manual Execution" to the Right for Instructions 8 | application: &app-name armory-samples 9 | stages: 10 | - type: manualJudgment 11 | properties: 12 | name: Configure Slack Integration 13 | refId: "1" 14 | failPipeline: true 15 | instructions: "Click Here -> Configure Slack

Watch Video and Configure Slack

" 16 | 17 | -------------------------------------------------------------------------------- /scripts/uninstall-k3s.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ################################################################################ 4 | # Copyright 2021 Armory, Inc. 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | ################################################################################ 18 | 19 | echo "Uninstalling K3s" 20 | # REMOVE K3S 21 | /usr/local/bin/k3s-uninstall.sh 22 | 23 | -------------------------------------------------------------------------------- /scripts/utils/scale_local.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | NAMESPACE=spinnaker 3 | NAMESPACE_INGRESS=ingress-nginx 4 | 5 | scale() { 6 | kubectl get deployments -n ${NAMESPACE} | awk '{ if (NR > 1) print $1}' | xargs -L1 kubectl scale deploy --replicas=$1 -n ${NAMESPACE} 7 | kubectl get statefulsets -n ${NAMESPACE} | awk '{ if (NR > 1) print $1}' | xargs -L1 kubectl scale sts --replicas=$1 -n ${NAMESPACE} 8 | kubectl get deployments -n ${NAMESPACE_INGRESS} | awk '{ if (NR > 1) print $1}' | xargs -L1 kubectl scale deploy --replicas=$1 -n ${NAMESPACE_INGRESS} 9 | } 10 | 11 | pause=false 12 | 13 | for arg in "$@" 14 | do 15 | case $arg in 16 | -p|--pause) 17 | pause=true 18 | shift 19 | ;; 20 | -r|--resume|-s|--start) 21 | pause=false 22 | shift 23 | ;; 24 | *) 25 | OTHER_ARGUMENTS+=("$1") 26 | shift # Remove generic argument from processing 27 | ;; 28 | esac 29 | done 30 | 31 | if [ $pause == true ] 32 | then 33 | scale 0 34 | else 35 | scale 1 36 | fi -------------------------------------------------------------------------------- /templates/config: -------------------------------------------------------------------------------- 1 | currentDeployment: default 2 | deploymentConfigurations: 3 | - name: default 4 | version: 2.19.8 5 | providers: 6 | kubernetes: 7 | enabled: true 8 | accounts: 9 | - name: spinnaker 10 | providerVersion: V2 11 | serviceAccount: true 12 | onlySpinnakerManaged: true 13 | primaryAccount: spinnaker 14 | deploymentEnvironment: 15 | size: SMALL 16 | type: Distributed 17 | accountName: spinnaker 18 | location: NAMESPACE 19 | persistentStorage: 20 | persistentStoreType: s3 21 | s3: 22 | bucket: spinnaker 23 | rootFolder: front50 24 | pathStyleAccess: true 25 | endpoint: http://minio.spinnaker:9000 26 | accessKeyId: minio 27 | secretAccessKey: MINIO_PASSWORD 28 | features: 29 | artifacts: true 30 | artifactsRewrite: true 31 | security: 32 | apiSecurity: 33 | ssl: 34 | enabled: false 35 | overrideBaseUrl: https://PUBLIC_ENDPOINT/api/v1 36 | uiSecurity: 37 | ssl: 38 | enabled: false 39 | overrideBaseUrl: https://PUBLIC_ENDPOINT 40 | artifacts: 41 | http: 42 | enabled: true 43 | accounts: [] 44 | telemetry: 45 | enabled: true 46 | -------------------------------------------------------------------------------- /templates/archive/minio-without-pvc.yml: -------------------------------------------------------------------------------- 1 | # This is currently not used 2 | --- 3 | apiVersion: apps/v1 4 | kind: StatefulSet 5 | metadata: 6 | name: minio 7 | namespace: spinnaker 8 | spec: 9 | replicas: 1 10 | serviceName: minio 11 | selector: 12 | matchLabels: 13 | app: minio 14 | template: 15 | metadata: 16 | labels: 17 | app: minio 18 | spec: 19 | containers: 20 | - name: minio 21 | image: minio/minio 22 | args: 23 | - server 24 | - /storage 25 | env: 26 | # MinIO access key and secret key 27 | - name: MINIO_ACCESS_KEY 28 | value: "minio" 29 | - name: MINIO_SECRET_KEY 30 | value: "MINIO_PASSWORD" 31 | ports: 32 | - containerPort: 9000 33 | volumeMounts: 34 | - name: storage 35 | mountPath: "/storage" 36 | volumes: 37 | - name: storage 38 | hostPath: 39 | path: BASE_DIR/minio 40 | type: DirectoryOrCreate 41 | --- 42 | apiVersion: v1 43 | kind: Service 44 | metadata: 45 | name: minio 46 | namespace: spinnaker 47 | spec: 48 | ports: 49 | - port: 9000 50 | targetPort: 9000 51 | protocol: TCP 52 | selector: 53 | app: minio 54 | -------------------------------------------------------------------------------- /templates/archive/mariadb-without-pvc.yml: -------------------------------------------------------------------------------- 1 | # This is currently not used 2 | # Do not use this, permissions are not currently set up correctly 3 | --- 4 | apiVersion: apps/v1 5 | kind: StatefulSet 6 | metadata: 7 | name: mariadb 8 | namespace: spinnaker 9 | spec: 10 | replicas: 1 11 | serviceName: mariadb 12 | selector: 13 | matchLabels: 14 | app: mariadb 15 | template: 16 | metadata: 17 | labels: 18 | app: mariadb 19 | spec: 20 | containers: 21 | - name: mariadb 22 | image: mariadb:10.4.12-bionic 23 | volumeMounts: 24 | - name: mysql 25 | mountPath: "/var/lib/mysql" 26 | env: 27 | # - name: HOME 28 | # value: "/home/spinnaker" 29 | - name: MYSQL_ROOT_PASSWORD 30 | value: "MYSQL_PASSWORD" 31 | ports: 32 | - containerPort: 3306 33 | protocol: TCP 34 | volumes: 35 | - name: mysql 36 | hostPath: 37 | path: BASE_DIR/mysql 38 | type: DirectoryOrCreate 39 | --- 40 | apiVersion: v1 41 | kind: Service 42 | metadata: 43 | name: mariadb 44 | namespace: spinnaker 45 | spec: 46 | ports: 47 | - port: 3306 48 | targetPort: 3306 49 | protocol: TCP 50 | selector: 51 | app: mariadb -------------------------------------------------------------------------------- /templates/addons/demo/democanary/canary_config/Latency.json.tmpl: -------------------------------------------------------------------------------- 1 | { 2 | "createdTimestamp": __TIMESTAMP__, 3 | "updatedTimestamp": __TIMESTAMP__, 4 | "createdTimestampIso": "__ISO_TIMESTAMP__", 5 | "updatedTimestampIso": "__ISO_TIMESTAMP__", 6 | "name": "Latency", 7 | "id": "${CANARY_CONFIG_UUID}", 8 | "description": "Latency Canary Config", 9 | "configVersion": "1", 10 | "applications": [ 11 | "democanary" 12 | ], 13 | "judge": { 14 | "name": "NetflixACAJudge-v1.0", 15 | "judgeConfigurations": {} 16 | }, 17 | "metrics": [ 18 | { 19 | "name": "latency", 20 | "query": { 21 | "type": "prometheus", 22 | "metricName": "custom_dummy_latency", 23 | "labelBindings": [], 24 | "groupByFields": [], 25 | "customInlineTemplate": "", 26 | "customFilterTemplate": "Filter", 27 | "serviceType": "prometheus" 28 | }, 29 | "groups": [ 30 | "Latency" 31 | ], 32 | "analysisConfigurations": { 33 | "canary": { 34 | "direction": "increase" 35 | } 36 | }, 37 | "scopeName": "default" 38 | } 39 | ], 40 | "templates": { 41 | "Filter": "group=\"${scope}\",namespace=\"${location}\"" 42 | }, 43 | "classifier": { 44 | "groupWeights": { 45 | "Latency": 100 46 | } 47 | } 48 | } -------------------------------------------------------------------------------- /templates/manifests/halyard.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: StatefulSet 4 | metadata: 5 | name: halyard 6 | namespace: NAMESPACE 7 | spec: 8 | replicas: 1 9 | serviceName: halyard 10 | selector: 11 | matchLabels: 12 | app: halyard 13 | template: 14 | metadata: 15 | labels: 16 | app: halyard 17 | spec: 18 | containers: 19 | - name: halyard 20 | image: HALYARD_IMAGE 21 | volumeMounts: 22 | - name: hal 23 | mountPath: "/home/spinnaker/.hal" 24 | - name: kube 25 | mountPath: "/home/spinnaker/.kube" 26 | env: 27 | - name: HOME 28 | value: "/home/spinnaker" 29 | ports: 30 | - containerPort: 8064 31 | protocol: TCP 32 | readinessProbe: 33 | exec: 34 | command: 35 | - wget 36 | - --no-check-certificate 37 | - --spider 38 | - -q 39 | - http://localhost:8064/health 40 | securityContext: 41 | runAsUser: 1000 42 | runAsGroup: 65535 43 | volumes: 44 | - name: hal 45 | hostPath: 46 | path: BASE_DIR/.hal 47 | type: DirectoryOrCreate 48 | - name: kube 49 | hostPath: 50 | path: BASE_DIR/.kube 51 | type: DirectoryOrCreate 52 | -------------------------------------------------------------------------------- /templates/manifests/mariadb.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: mariadb-pvc 6 | labels: 7 | app: mariadb 8 | namespace: NAMESPACE 9 | spec: 10 | accessModes: 11 | - ReadWriteOnce 12 | resources: 13 | requests: 14 | storage: 4Gi 15 | --- 16 | apiVersion: apps/v1 17 | kind: StatefulSet 18 | metadata: 19 | name: mariadb 20 | namespace: NAMESPACE 21 | spec: 22 | replicas: 1 23 | serviceName: mariadb 24 | selector: 25 | matchLabels: 26 | app: mariadb 27 | template: 28 | metadata: 29 | labels: 30 | app: mariadb 31 | spec: 32 | containers: 33 | - name: mariadb 34 | image: mariadb:10.4.12-bionic 35 | volumeMounts: 36 | - name: mysql 37 | mountPath: "/var/lib/mysql" 38 | env: 39 | - name: MYSQL_ROOT_PASSWORD 40 | value: "MARIADB_PASSWORD" 41 | ports: 42 | - containerPort: 3306 43 | protocol: TCP 44 | securityContext: 45 | runAsUser: 1000 46 | runAsGroup: 65535 47 | fsGroup: 65535 48 | volumes: 49 | - name: mysql 50 | persistentVolumeClaim: 51 | claimName: mariadb-pvc 52 | --- 53 | apiVersion: v1 54 | kind: Service 55 | metadata: 56 | name: mariadb 57 | namespace: NAMESPACE 58 | spec: 59 | ports: 60 | - port: 3306 61 | targetPort: 3306 62 | protocol: TCP 63 | selector: 64 | app: mariadb 65 | -------------------------------------------------------------------------------- /scripts/utils/remove_auth.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ################################################################################ 4 | # Copyright 2020 Armory, Inc. 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | ################################################################################ 18 | 19 | set -e 20 | 21 | # Linux only 22 | 23 | PROJECT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )/../../" >/dev/null 2>&1 && pwd ) 24 | KUBERNETES_CONTEXT=default 25 | NAMESPACE=spinnaker 26 | BASE_DIR=/etc/spinnaker 27 | 28 | mv ${BASE_DIR}/.hal/.secret/spinnaker_password ${BASE_DIR}/.hal/.secret/spinnaker_password_removed 29 | yq d -i ${BASE_DIR}/.hal/default/profiles/gate-local.yml security 30 | sed -i 's|^window.spinnakerSettings.authEnabled|# window.spinnakerSettings.authEnabled|g' ${BASE_DIR}/.hal/default/profiles/settings-local.js 31 | 32 | kubectl --context ${KUBERNETES_CONTEXT} -n ${NAMESPACE} exec -i halyard-0 -- sh -c "hal deploy apply" -------------------------------------------------------------------------------- /scripts/utils/undo_expose_local.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ################################################################################ 4 | # Copyright 2020 Armory, Inc. 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | ################################################################################ 18 | 19 | set -e 20 | 21 | # Linux only 22 | 23 | PROJECT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )/../../" >/dev/null 2>&1 && pwd ) 24 | KUBERNETES_CONTEXT=default 25 | NAMESPACE=spinnaker 26 | 27 | BASE_DIR=/etc/spinnaker 28 | 29 | for SVC in front50 igor rosco echo deck orca gate kayenta fiat clouddriver redis; do 30 | touch ${BASE_DIR}/.hal/default/service-settings/${SVC}.yml 31 | yq d -i ${BASE_DIR}/.hal/default/service-settings/${SVC}.yml kubernetes.serviceType 32 | done 33 | 34 | kubectl --context ${KUBERNETES_CONTEXT} --namespace ${NAMESPACE} delete svc -l app=spin 35 | 36 | kubectl --context ${KUBERNETES_CONTEXT} -n ${NAMESPACE} exec -i halyard-0 -- sh -c "hal deploy apply" -------------------------------------------------------------------------------- /scripts/utils/expose_local.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ################################################################################ 4 | # Copyright 2020 Armory, Inc. 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | ################################################################################ 18 | 19 | set -e 20 | 21 | # Linux only 22 | 23 | PROJECT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )/../../" >/dev/null 2>&1 && pwd ) 24 | KUBERNETES_CONTEXT=default 25 | NAMESPACE=spinnaker 26 | 27 | BASE_DIR=/etc/spinnaker 28 | 29 | for SVC in front50 igor rosco echo deck orca gate kayenta fiat clouddriver redis; do 30 | touch ${BASE_DIR}/.hal/default/service-settings/${SVC}.yml 31 | yq w -i ${BASE_DIR}/.hal/default/service-settings/${SVC}.yml kubernetes.serviceType LoadBalancer 32 | done 33 | 34 | kubectl --context ${KUBERNETES_CONTEXT} --namespace ${NAMESPACE} delete svc -l app=spin 35 | 36 | kubectl --context ${KUBERNETES_CONTEXT} -n ${NAMESPACE} exec -i halyard-0 -- sh -c "hal deploy apply" 37 | -------------------------------------------------------------------------------- /templates/manifests/minio.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: minio-pvc 6 | labels: 7 | app: minio 8 | namespace: NAMESPACE 9 | spec: 10 | accessModes: 11 | - ReadWriteOnce 12 | resources: 13 | requests: 14 | storage: 1Gi 15 | --- 16 | apiVersion: apps/v1 17 | kind: StatefulSet 18 | metadata: 19 | name: minio 20 | namespace: NAMESPACE 21 | spec: 22 | replicas: 1 23 | serviceName: minio 24 | selector: 25 | matchLabels: 26 | app: minio 27 | template: 28 | metadata: 29 | labels: 30 | app: minio 31 | spec: 32 | containers: 33 | - name: minio 34 | image: minio/minio 35 | args: 36 | - server 37 | - /storage 38 | env: 39 | # MinIO access key and secret key 40 | - name: MINIO_ACCESS_KEY 41 | value: "minio" 42 | - name: MINIO_SECRET_KEY 43 | value: "MINIO_PASSWORD" 44 | ports: 45 | - containerPort: 9000 46 | volumeMounts: 47 | - name: storage 48 | mountPath: "/storage" 49 | securityContext: 50 | runAsUser: 1000 51 | runAsGroup: 65535 52 | fsGroup: 65535 53 | volumes: 54 | - name: storage 55 | persistentVolumeClaim: 56 | claimName: minio-pvc 57 | --- 58 | apiVersion: v1 59 | kind: Service 60 | metadata: 61 | name: minio 62 | namespace: NAMESPACE 63 | spec: 64 | ports: 65 | - port: 9000 66 | targetPort: 9000 67 | protocol: TCP 68 | selector: 69 | app: minio 70 | -------------------------------------------------------------------------------- /guides/set-up-slack-notifications.md: -------------------------------------------------------------------------------- 1 | # Setting up Notifications to Slack 2 | 3 | Spinnaker supports notifications to Slack (and other places) on these six events: 4 | 5 | * Pipeline start 6 | * Pipeline completion 7 | * Pipeline failure 8 | * Stage start 9 | * Stage completion 10 | * Stage failure 11 | 12 | ## Create a Slack bot user (and get the Slack auth token) 13 | 14 | 1. Go to your Slack management page, and navigate to "Configure Apps" (or go to https://your-slack-workspace.slack.com/apps/manage) 15 | 1. Click on "Custom Integrations" 16 | 1. Click on "Bots" 17 | 1. Click on "Add to Slack" 18 | 1. Give your Slack bot a username (such as `spinnakerbot`) 19 | 1. Click "Add bot integration" 20 | 1. Copy the "API Token". Optionally, customize other settings on the Bot configuration. 21 | 1. Click "Save Integration" 22 | 23 | ## Add the Slack bot user to Spinnaker 24 | 25 | 1. SSH into your Minnaker instance 26 | 1. Run this command (replace `spinnakerbot` with your Slack bot's username) to add the Slack notification configuration. 27 | 28 | ```bash 29 | hal config notification slack edit --bot-name spinnakerbot --token 30 | ``` 31 | 32 | 1. Run this command to enable the Slack notification 33 | 34 | ``bash 35 | hal config notification slack enable 36 | ``` 37 | 38 | 1. Run this command to apply your changes 39 | 40 | ```bash 41 | hal deploy apply 42 | ``` 43 | 44 | ## Use the Slack notification 45 | 46 | In order to notify into a given Slack channel, the Slack bot should be invited into your Slack channel(s). Then, in a pipeline configuration, on the 'configuration' page, you can configure notifications to those channels for when your pipeline starts, completes, or fails, and you can additionally configure the same notifications on individual stages. 47 | -------------------------------------------------------------------------------- /pipelines/armory-basic-k8s-pipe.yaml: -------------------------------------------------------------------------------- 1 | # file: deploy-nginx.yaml 2 | apiVersion: pacrd.armory.spinnaker.io/v1alpha1 3 | kind: Pipeline 4 | metadata: 5 | name: basic-deploy-to-kubernetes 6 | spec: 7 | description: Basic nginx deploy to K3s 8 | application: &app-name armory-samples 9 | stages: 10 | - type: manualJudgment 11 | properties: 12 | name: This deploys locally on K3s. Want External K8s? 13 | refId: "1" 14 | failPipeline: true 15 | instructions: "Click Here -> Connect to Deployment Cluster?

Watch Video and Configure Armory Agent

" 16 | - type: deployManifest 17 | properties: 18 | name: Deploy text manifest 19 | refId: "2" 20 | requisiteStageRefIds: ["1"] 21 | account: spinnaker 22 | cloudProvider: kubernetes 23 | moniker: 24 | app: *app-name 25 | skipExpressionEvaluation: true 26 | source: text 27 | comments: This is a test for weekhooks 28 | manifests: 29 | - | 30 | apiVersion: apps/v1 31 | kind: Deployment 32 | metadata: 33 | name: new-microservice 34 | namespace: prod 35 | labels: 36 | app: nginx 37 | spec: 38 | replicas: 2 39 | selector: 40 | matchLabels: 41 | app: nginx 42 | template: 43 | metadata: 44 | labels: 45 | app: nginx 46 | spec: 47 | containers: 48 | - name: nginx 49 | image: nginx:1.14.2 50 | ports: 51 | - containerPort: 80 52 | -------------------------------------------------------------------------------- /templates/archive/halyard-with-pvc.yml: -------------------------------------------------------------------------------- 1 | # This is currently not used 2 | --- 3 | apiVersion: v1 4 | kind: PersistentVolumeClaim 5 | metadata: 6 | name: hal-pvc 7 | labels: 8 | app: halyard 9 | namespace: spinnaker 10 | spec: 11 | accessModes: 12 | - ReadWriteOnce 13 | resources: 14 | requests: 15 | storage: 100Mi 16 | --- 17 | apiVersion: v1 18 | kind: PersistentVolumeClaim 19 | metadata: 20 | name: kube-pvc 21 | labels: 22 | app: halyard 23 | namespace: spinnaker 24 | spec: 25 | accessModes: 26 | - ReadWriteOnce 27 | resources: 28 | requests: 29 | storage: 10Mi 30 | --- 31 | apiVersion: apps/v1 32 | kind: StatefulSet 33 | metadata: 34 | name: halyard 35 | namespace: spinnaker 36 | spec: 37 | replicas: 1 38 | serviceName: halyard 39 | selector: 40 | matchLabels: 41 | app: halyard 42 | template: 43 | metadata: 44 | labels: 45 | app: halyard 46 | spec: 47 | containers: 48 | - name: halyard 49 | image: HALYARD_IMAGE 50 | volumeMounts: 51 | - name: hal 52 | mountPath: "/home/spinnaker/.hal" 53 | - name: kube 54 | mountPath: "/home/spinnaker/.kube" 55 | env: 56 | - name: HOME 57 | value: "/home/spinnaker" 58 | ports: 59 | - containerPort: 8064 60 | protocol: TCP 61 | readinessProbe: 62 | exec: 63 | command: 64 | - wget 65 | - --no-check-certificate 66 | - --spider 67 | - -q 68 | - http://localhost:8064/health 69 | securityContext: 70 | runAsUser: 1000 71 | runAsGroup: 65535 72 | # fsGroup: 65535 73 | volumes: 74 | - name: hal 75 | persistentVolumeClaim: 76 | claimName: hal-pvc 77 | - name: kube 78 | persistentVolumeClaim: 79 | claimName: kube-pvc 80 | -------------------------------------------------------------------------------- /scripts/addons/spinnaker_enable_canary.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ################################################################################ 4 | # Copyright 2020 Armory, Inc. 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | ################################################################################ 18 | 19 | set -x 20 | set -e 21 | 22 | ###### 23 | hal config canary enable 24 | hal config canary prometheus enable 25 | hal config canary prometheus account add prometheus --base-url http://prometheus.default:9090/prometheus 26 | 27 | hal config canary aws enable 28 | # For some reason, Kayenta doesn't like using the same bucket as Front50, so we're setting up a different bucket 29 | # This will result in s3://kayenta/kayenta 30 | # TODO: Detect existing account 31 | echo "MINIO_PASSWORD" | hal config canary aws account add minio --bucket kayenta --root-folder kayenta --endpoint http://minio.spinnaker:9000 --access-key-id minio --secret-access-key 32 | hal config canary aws edit --s3-enabled=true 33 | 34 | hal config canary edit --default-metrics-store prometheus 35 | hal config canary edit --default-metrics-account prometheus 36 | hal config canary edit --default-storage-account minio 37 | 38 | # TODO: Detect existence of this 39 | # Extra blank lines are intentional 40 | tee -a /etc/spinnaker/.hal/default/profiles/gate-local.yml <<-'EOF' 41 | 42 | services: 43 | kayenta: 44 | canaryConfigStore: true 45 | 46 | EOF 47 | 48 | hal deploy apply -------------------------------------------------------------------------------- /pipelines/bootstrap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: pacrd.armory.spinnaker.io/v1alpha1 2 | kind: Application 3 | metadata: 4 | name: bootstrap 5 | spec: 6 | email: chad.tripod@armory.io 7 | description: Sample pipeline showing a blue/green deployment 8 | --- 9 | apiVersion: pacrd.armory.spinnaker.io/v1alpha1 10 | kind: Pipeline 11 | metadata: 12 | name: click-start-manual-execution 13 | spec: 14 | description: Click "Start Manual Execution" Link to the Right. Then see the "Armroy samples" Application 15 | application: &app-name bootstrap 16 | expectedArtifacts: 17 | - id: &manifest-repo-id pipelines 18 | displayName: manifest-repo 19 | matchArtifact: &manifest-repo-artifact 20 | type: git/repo 21 | properties: 22 | artifactAccount: gitrepo 23 | reference: https://github.com/armory/minnaker.git 24 | version: pacrd 25 | defaultArtifact: 26 | <<: *manifest-repo-artifact 27 | useDefaultArtifact: true 28 | usePriorArtifact: false 29 | stages: 30 | - type: bakeManifest 31 | properties: 32 | templateRenderer: KUSTOMIZE 33 | refId: "1" 34 | name: Render Kustomize Template 35 | kustomizeFilePath: "pipelines/kustomization.yaml" 36 | inputArtifact: 37 | id: *manifest-repo-id 38 | account: gitrepo 39 | expectedArtifacts: 40 | - id: &rendered-manifest-id rendered-manifest-id 41 | displayName: app-manifest 42 | useDefaultArtifact: false 43 | usePriorArtifact: false 44 | matchArtifact: 45 | type: embedded/base64 46 | properties: 47 | name: app-manifest 48 | - type: manualJudgment 49 | properties: 50 | name: Continue Deployment? 51 | refId: "2" 52 | requisiteStageRefIds: [ "1" ] 53 | - type: deployManifest 54 | properties: 55 | name: Deploy Application 56 | refId: "3" 57 | requisiteStageRefIds: ["2"] 58 | account: spinnaker 59 | cloudProvider: kubernetes 60 | source: artifact 61 | manifestArtifactAccount: embedded-artifact 62 | manifestArtifactId: *rendered-manifest-id 63 | moniker: 64 | app: *app-name 65 | namespaceOverride: spinnaker 66 | -------------------------------------------------------------------------------- /scripts/utils/switch_to_oss.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ################################################################################ 4 | # Copyright 2020 Armory, Inc. 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | ################################################################################ 18 | 19 | set -e 20 | 21 | # Linux only 22 | 23 | PROJECT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )/../../" >/dev/null 2>&1 && pwd ) 24 | KUBERNETES_CONTEXT=default 25 | NAMESPACE=spinnaker 26 | BASE_DIR=/etc/spinnaker 27 | 28 | OLD_IMAGE=$(yq r ${BASE_DIR}/manifests/halyard.yml spec.template.spec.containers[0].image) 29 | if [[ ${OLD_IMAGE} =~ "armory" ]]; then 30 | echo ${OLD_IMAGE} > ${BASE_DIR}/armory_image 31 | else 32 | echo ${OLD_IMAGE} > ${BASE_DIR}/oss_image 33 | fi 34 | 35 | if [[ -f ${BASE_DIR}/oss_image ]]; then 36 | yq w -i ${BASE_DIR}/manifests/halyard.yml spec.template.spec.containers[0].image $(cat ${BASE_DIR}/oss_image) 37 | else 38 | yq w -i ${BASE_DIR}/manifests/halyard.yml spec.template.spec.containers[0].image gcr.io/spinnaker-marketplace/halyard:stable 39 | fi 40 | 41 | kubectl apply -f ${BASE_DIR}/manifests/halyard.yml 42 | 43 | sleep 5 44 | 45 | while [[ $(kubectl --context ${KUBERNETES_CONTEXT} get statefulset -n ${NAMESPACE} halyard -ojsonpath='{.status.readyReplicas}') -ne 1 ]]; 46 | do 47 | echo "Waiting for Halyard pod to start" 48 | sleep 5; 49 | done 50 | 51 | yq r ${BASE_DIR}/.hal/config deploymentConfigurations[0].armory >> ${BASE_DIR}/halconfig_armory 52 | yq d -i ${BASE_DIR}/.hal/config deploymentConfigurations[0].armory 53 | 54 | VERSION=$(kubectl --context ${KUBERNETES_CONTEXT} -n ${NAMESPACE} exec -i halyard-0 -- sh -c "hal version latest -q") 55 | kubectl --context ${KUBERNETES_CONTEXT} -n ${NAMESPACE} exec -i halyard-0 -- sh -c "hal config version edit --version ${VERSION}" 56 | kubectl --context ${KUBERNETES_CONTEXT} -n ${NAMESPACE} exec -i halyard-0 -- sh -c "hal deploy apply" -------------------------------------------------------------------------------- /scripts/refresh_endpoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ################################################################################ 4 | # Copyright 2020 Armory, Inc. 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | ################################################################################ 18 | 19 | # Not (currently) designed for OSX 20 | 21 | # This is used to 'reset' a Minnaker instance. It regenerates the Minio and Gate passwords 22 | # Also, it will do the following with the public endpoint: 23 | # # If a new public endpoint is provided (with the flag -P), then the new endpoint will be used 24 | # # Otherwise, if the previous public endpoint was provided was a flag, that endpoint will be used 25 | # # Otherwise, the public endpoint will be re-detected 26 | 27 | # set -x 28 | set -e 29 | 30 | ##### Functions 31 | print_help () { 32 | set +x 33 | echo "Usage: refresh_endpoint.sh" 34 | echo " [-P|--public-endpoint ] : Specify public IP (or DNS name) for instance (rather than autodetection)" 35 | echo " [-B|--base-dir ] : Specify root directory to use for manifests" 36 | set -x 37 | } 38 | 39 | apply_changes () { 40 | info "Executing ${BASE_DIR}/deploy.sh" 41 | cd "${BASE_DIR}" 42 | ./deploy.sh 43 | } 44 | 45 | PUBLIC_ENDPOINT="" 46 | PROJECT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )/../" >/dev/null 2>&1 && pwd ) 47 | BASE_DIR=${BASE_DIR:=$PROJECT_DIR/spinsvc} 48 | OUT="$PROJECT_DIR/minnaker.log" 49 | 50 | . "${PROJECT_DIR}/scripts/functions.sh" 51 | 52 | while [ "$#" -gt 0 ]; do 53 | case "$1" in 54 | -P|--public-endpoint) 55 | if [ -n $2 ]; then 56 | PUBLIC_ENDPOINT=$2 57 | shift 58 | else 59 | printf "Error: --public-endpoint requires an IP address >&2" 60 | exit 1 61 | fi 62 | ;; 63 | -B|--base-dir) 64 | if [ -n $2 ]; then 65 | BASE_DIR=$2 66 | else 67 | printf "Error: --base-dir requires a directory >&2" 68 | exit 1 69 | fi 70 | ;; 71 | -h|--help) 72 | print_help 73 | exit 1 74 | ;; 75 | esac 76 | shift 77 | done 78 | 79 | PATH=${PATH}:/usr/local/bin 80 | export PATH 81 | 82 | info "Refreshing Endpoint - Please note this can take a few minutes since we need to restart k3s" 83 | 84 | detect_endpoint force_refresh 85 | update_endpoint 86 | restart_k3s 87 | apply_changes 88 | spin_endpoint -------------------------------------------------------------------------------- /guides/setup-ldap.md: -------------------------------------------------------------------------------- 1 | 2 | # Set up LDAP (Futurama container) 3 | 4 | Create this manifest: 5 | 6 | ```yml 7 | # ldap.yml 8 | --- 9 | apiVersion: apps/v1 10 | kind: Deployment 11 | metadata: 12 | name: ldap 13 | labels: 14 | app: ldap 15 | namespace: spinnaker 16 | spec: 17 | replicas: 1 18 | selector: 19 | matchLabels: 20 | app: ldap 21 | template: 22 | metadata: 23 | labels: 24 | app: ldap 25 | spec: 26 | containers: 27 | - name: ldap 28 | image: rroemhild/test-openldap:latest 29 | ports: 30 | - containerPort: 389 31 | protocol: TCP 32 | - containerPort: 636 33 | protocol: TCP 34 | --- 35 | apiVersion: v1 36 | kind: Service 37 | metadata: 38 | name: ldap 39 | namespace: spinnaker 40 | spec: 41 | ports: 42 | - port: 389 43 | name: ldap 44 | protocol: TCP 45 | targetPort: 389 46 | - port: 636 47 | name: ldaps 48 | protocol: TCP 49 | targetPort: 636 50 | selector: 51 | app: ldap 52 | type: ClusterIP 53 | ``` 54 | 55 | Create it: 56 | 57 | ```bash 58 | kubectl apply -f ldap.yml 59 | ``` 60 | 61 | ## Enable LDAP AuthN 62 | 63 | ```bash 64 | hal config security authn ldap edit \ 65 | --user-search-filter "(uid={0})" \ 66 | --user-search-base "ou=people,dc=planetexpress,dc=com" \ 67 | --url "ldap://ldap.spinnaker:389" 68 | 69 | hal config security authn ldap enable 70 | ``` 71 | 72 | Remove settings-local.js authEnabled flag: 73 | 74 | (We can't remove the file completely cause of artifactrewrite, and can't explicitly set it to false, so we comment it) 75 | 76 | ```bash 77 | sed -i 's|^window.spinnakerSettings.authEnabled|// window.spinnakerSettings.authEnabled|g' \ 78 | /etc/spinnaker/.hal/default/profiles/settings-local.js 79 | ``` 80 | 81 | Disable basic auth: 82 | 83 | ```bash 84 | sed -i 's/enabled: .*/enabled: false/g' \ 85 | /etc/spinnaker/.hal/default/profiles/gate-local.yml 86 | ``` 87 | 88 | ```bash 89 | hal deploy apply 90 | ``` 91 | 92 | ## Enable LDAP AuthZ 93 | 94 | **Must get into Halyard container:** 95 | 96 | ```bash 97 | kubectl -n spinnaker get pods 98 | 99 | # Grab name of halyard pod 100 | kubectl -n spinnaker exec -it bash 101 | ``` 102 | 103 | ```bash 104 | hal config security authz ldap edit \ 105 | --url 'ldap://ldap.spinnaker:389' \ 106 | --manager-dn 'cn=Hubert J. Farnsworth,ou=people,dc=planetexpress,dc=com' \ 107 | --manager-password \ 108 | --user-search-base 'dc=planetexpress,dc=com' \ 109 | --user-search-filter '(uid={0})' \ 110 | --group-search-base 'dc=planetexpress,dc=com' \ 111 | --group-search-filter '(member={0})' \ 112 | --group-role-attributes cn 113 | 114 | hal config security authz edit --type ldap 115 | hal config security authz enable 116 | ``` 117 | 118 | ## Apply changes 119 | 120 | ```bash 121 | hal deploy apply 122 | ``` 123 | -------------------------------------------------------------------------------- /scripts/regenerate_password.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ################################################################################ 4 | # Copyright 2020 Armory, Inc. 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | ################################################################################ 18 | 19 | # Not (currently) designed for OSX 20 | 21 | # This is used to 'reset' a Minnaker instance. It regenerates the Minio and Gate passwords 22 | # Also, it will do the following with the public endpoint: 23 | # # If a new public endpoint is provided (with the flag -P), then the new endpoint will be used 24 | # # Otherwise, if the previous public endpoint was provided was a flag, that endpoint will be used 25 | # # Otherwise, the public endpoint will be re-detected 26 | 27 | set -x 28 | set -e 29 | 30 | print_help () { 31 | set +x 32 | echo "Usage: regenerate_password.sh" 33 | echo " [-B|--base-dir ] : Specify root directory to use for manifests" 34 | set -x 35 | } 36 | 37 | while [ "$#" -gt 0 ]; do 38 | case "$1" in 39 | -B|--base-dir) 40 | if [ -n $2 ]; then 41 | BASE_DIR=$2 42 | else 43 | printf "Error: --base-dir requires a directory >&2" 44 | exit 1 45 | fi 46 | ;; 47 | -h|--help) 48 | print_help 49 | exit 1 50 | ;; 51 | esac 52 | shift 53 | done 54 | 55 | PROJECT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )/../" >/dev/null 2>&1 && pwd ) 56 | . "${PROJECT_DIR}/scripts/functions.sh" 57 | 58 | 59 | update_spinnaker_password () { 60 | SPINNAKER_PASSWORD=$(cat ${BASE_DIR}/.hal/.secret/spinnaker_password) 61 | yq w -i ${BASE_DIR}/.hal/default/profiles/gate-local.yml security.user.password ${SPINNAKER_PASSWORD} 62 | } 63 | 64 | apply_changes () { 65 | while [[ $(kubectl get statefulset -n spinnaker halyard -ojsonpath='{.status.readyReplicas}') -ne 1 ]]; 66 | do 67 | echo "Waiting for Halyard pod to start" 68 | sleep 2; 69 | done 70 | 71 | # We do this twice, because for some reason Kubernetes sometimes reports pods as healthy on first start after a reboot 72 | sleep 15 73 | 74 | while [[ $(kubectl get statefulset -n spinnaker halyard -ojsonpath='{.status.readyReplicas}') -ne 1 ]]; 75 | do 76 | echo "Waiting for Halyard pod to start" 77 | sleep 2; 78 | done 79 | 80 | kubectl -n spinnaker exec -i halyard-0 -- hal deploy apply 81 | } 82 | 83 | # PUBLIC_ENDPOINT="" 84 | BASE_DIR=$PROJECT_DIR/spinsvc 85 | 86 | PATH=${PATH}:/usr/local/bin 87 | export PATH 88 | 89 | generate_passwords 90 | update_spinnaker_password 91 | apply_changes 92 | -------------------------------------------------------------------------------- /scripts/addons/setup_demo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ################################################################################ 4 | # Copyright 2020 Armory, Inc. 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | ################################################################################ 18 | 19 | set -x 20 | set -e 21 | 22 | # Takes two parameteters: 23 | # - Filename for UUID 24 | # - Purpose of UUID (only used for the output text) 25 | function generate_or_use_uuid () { 26 | if [[ ! -s $1 ]]; then 27 | echo "Generating $2 UUID ($1)" 28 | uuidgen > ${1} 29 | else 30 | echo "$2 UUID already exists: $1: $(cat $1)" 31 | fi 32 | } 33 | 34 | BASE_DIR=/etc/spinnaker 35 | PROJECT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )/../../" >/dev/null 2>&1 && pwd ) 36 | 37 | PVC="minio-pvc" 38 | FRONT50_BUCKET="spinnaker" 39 | APPLICATION_NAME="demok8s" 40 | 41 | cp -rv ${PROJECT_DIR}/templates/addons/demo ${BASE_DIR}/templates/ 42 | 43 | UUID_PATH=${BASE_DIR}/.hal/.secret/demo_k8s_pipeline_uuid 44 | 45 | generate_or_use_uuid ${UUID_PATH} "K8s Demo Pipeline" 46 | 47 | PIPELINE_UUID=$(cat ${UUID_PATH}) 48 | 49 | MINIO_PATH=$(kubectl -n spinnaker get pv -ojsonpath="{.items[?(@.spec.claimRef.name==\"${PVC}\")].spec.hostPath.path}") 50 | 51 | FRONT50_PATH=${MINIO_PATH}/${FRONT50_BUCKET}/front50 52 | 53 | mkdir -p ${FRONT50_PATH}/{applications,pipelines} 54 | mkdir -p ${FRONT50_PATH}/applications/${APPLICATION_NAME} 55 | mkdir -p ${FRONT50_PATH}/pipelines/${PIPELINE_UUID} 56 | 57 | TIMESTAMP=$(date +%s000) 58 | ISO_TIMESTAMP=$(date +"%Y-%m-%dT%T.000Z") 59 | 60 | # Create namespace(s) 61 | set +e 62 | kubectl create ns dev 63 | kubectl create ns test 64 | kubectl create ns prod 65 | set -e 66 | 67 | # Create application 68 | sed -e "s|__TIMESTAMP__|${TIMESTAMP}|g" \ 69 | ${BASE_DIR}/templates/demo/${APPLICATION_NAME}/applications/${APPLICATION_NAME}/application-metadata.json.tmpl \ 70 | > ${FRONT50_PATH}/applications/${APPLICATION_NAME}/application-metadata.json 71 | 72 | sed -e "s|__TIMESTAMP__|${TIMESTAMP}|g" \ 73 | ${BASE_DIR}/templates/demo/${APPLICATION_NAME}/applications/${APPLICATION_NAME}/application-permissions.json.tmpl \ 74 | > ${FRONT50_PATH}/applications/${APPLICATION_NAME}/application-permissions.json 75 | 76 | # Create the pipeline 77 | sed -e "s|__TIMESTAMP__|${TIMESTAMP}|g" \ 78 | -e "s|__PIPELINE_UUID__|${PIPELINE_UUID}|g" \ 79 | ${BASE_DIR}/templates/demo/${APPLICATION_NAME}/pipelines/PIPELINE_UUID/pipeline-metadata.json.tmpl \ 80 | > ${FRONT50_PATH}/pipelines/${PIPELINE_UUID}/pipeline-metadata.json 81 | 82 | # Bump last-modified for pipeline 83 | sed -e "s|__TIMESTAMP__|${TIMESTAMP}|g" \ 84 | ${BASE_DIR}/templates/demo/${APPLICATION_NAME}/pipelines/last-modified.json.tmpl \ 85 | > ${FRONT50_PATH}/pipelines/last-modified.json -------------------------------------------------------------------------------- /scripts/addons/install_prometheus_osx.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ################################################################################ 4 | # Copyright 2020 Armory, Inc. 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | ################################################################################ 18 | 19 | set -x 20 | set -e 21 | 22 | # The filename is intentionally prometheus_install and not install_prometheus so install.sh continues to autocomplete 23 | 24 | BASE_DIR=~/minnaker 25 | PROJECT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )/../../" >/dev/null 2>&1 && pwd ) 26 | 27 | curl -L https://github.com/coreos/prometheus-operator/archive/v0.37.0.tar.gz -o /tmp/prometheus-operator.tgz 28 | tar -xzvf /tmp/prometheus-operator.tgz -C ${BASE_DIR}/ 29 | 30 | mv ${BASE_DIR}/prometheus-operator-* ${BASE_DIR}/prometheus 31 | 32 | cp -rv ${PROJECT_DIR}/templates/addons/prometheus ${BASE_DIR}/templates 33 | 34 | # Installs operator into default namespace. Has these resources: 35 | # - clusterrolebinding.rbac.authorization.k8s.io/prometheus-operator 36 | # - clusterrole.rbac.authorization.k8s.io/prometheus-operator 37 | # - deployment.apps/prometheus-operator 38 | # - serviceaccount/prometheus-operator 39 | # - servicemonitor.monitoring.coreos.com/prometheus-operator 40 | # - service/prometheus-operator 41 | 42 | # Have to create the CRD first 43 | kubectl apply -n default -f ${BASE_DIR}/prometheus/example/prometheus-operator-crd 44 | sleep 2 45 | kubectl apply -n default -f ${BASE_DIR}/prometheus/example/rbac/prometheus-operator 46 | 47 | # Installs a Prometheus (CRD) instance in the default namespace: 48 | # - clusterrolebinding.rbac.authorization.k8s.io/prometheus 49 | # - clusterrole.rbac.authorization.k8s.io/prometheus 50 | # - serviceaccount/prometheus 51 | # - prometheus.monitoring.coreos.com/prometheus 52 | 53 | kubectl apply -n default -f ${BASE_DIR}/prometheus/example/rbac/prometheus 54 | 55 | mkdir -p ${BASE_DIR}/prometheus/custom 56 | 57 | # Create a custom CR, with these changes: 58 | # - Patch with routePrefix and externalUrl 59 | # - Remove serviceMonitorSelector 60 | # - Add empty serviceMonitorSelector and serviceMonitorNamespaceSelector (yq doesn't support setting to empty) 61 | 62 | cp ${BASE_DIR}/prometheus/example/rbac/prometheus/prometheus.yaml ${BASE_DIR}/prometheus/custom/ 63 | 64 | tee ${BASE_DIR}/prometheus/custom/patch.yml <<-'EOF' 65 | spec: 66 | routePrefix: /prometheus 67 | externalUrl: https://PUBLIC_ENDPOINT/prometheus 68 | EOF 69 | 70 | sed -i.bak "s|PUBLIC_ENDPOINT|$(cat ${BASE_DIR}/.hal/public_endpoint)|g" ${BASE_DIR}/prometheus/custom/patch.yml 71 | yq m -i ${BASE_DIR}/prometheus/custom/prometheus.yaml ${BASE_DIR}/prometheus/custom/patch.yml 72 | 73 | yq d -i ${BASE_DIR}/prometheus/custom/prometheus.yaml spec.serviceMonitorSelector 74 | 75 | tee -a ${BASE_DIR}/prometheus/custom/prometheus.yaml <<-'EOF' 76 | serviceMonitorSelector: {} 77 | serviceMonitorNamespaceSelector: {} 78 | EOF 79 | 80 | kubectl apply -n default -f ${BASE_DIR}/prometheus/custom/prometheus.yaml 81 | 82 | # Set up ingress with auth (same username/password as Spinnaker) 83 | # Set up service for Kayenta to get to Prometheus 84 | 85 | kubectl -n default apply -f ${BASE_DIR}/templates/prometheus/prometheus-service.yaml 86 | kubectl -n default apply -f ${BASE_DIR}/templates/prometheus/prometheus-ingress-noauth.yaml -------------------------------------------------------------------------------- /scripts/addons/install_prometheus.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ################################################################################ 4 | # Copyright 2020 Armory, Inc. 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | ################################################################################ 18 | 19 | set -x 20 | set -e 21 | 22 | # The filename is intentionally prometheus_install and not install_prometheus so install.sh continues to autocomplete 23 | 24 | BASE_DIR=/etc/spinnaker 25 | PROJECT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )/../../" >/dev/null 2>&1 && pwd ) 26 | 27 | curl -L https://github.com/coreos/prometheus-operator/archive/v0.37.0.tar.gz -o /tmp/prometheus-operator.tgz 28 | tar -xzvf /tmp/prometheus-operator.tgz -C ${BASE_DIR}/ 29 | 30 | mv ${BASE_DIR}/prometheus-operator-* ${BASE_DIR}/prometheus 31 | 32 | cp -rv ${PROJECT_DIR}/templates/addons/prometheus ${BASE_DIR}/templates 33 | 34 | # Installs operator into default namespace. Has these resources: 35 | # - clusterrolebinding.rbac.authorization.k8s.io/prometheus-operator 36 | # - clusterrole.rbac.authorization.k8s.io/prometheus-operator 37 | # - deployment.apps/prometheus-operator 38 | # - serviceaccount/prometheus-operator 39 | # - servicemonitor.monitoring.coreos.com/prometheus-operator 40 | # - service/prometheus-operator 41 | 42 | # Have to create the CRD first 43 | kubectl apply -n default -f ${BASE_DIR}/prometheus/example/prometheus-operator-crd 44 | sleep 2 45 | kubectl apply -n default -f ${BASE_DIR}/prometheus/example/rbac/prometheus-operator 46 | 47 | # Installs a Prometheus (CRD) instance in the default namespace: 48 | # - clusterrolebinding.rbac.authorization.k8s.io/prometheus 49 | # - clusterrole.rbac.authorization.k8s.io/prometheus 50 | # - serviceaccount/prometheus 51 | # - prometheus.monitoring.coreos.com/prometheus 52 | 53 | kubectl apply -n default -f ${BASE_DIR}/prometheus/example/rbac/prometheus 54 | 55 | mkdir -p ${BASE_DIR}/prometheus/custom 56 | 57 | # Create a custom CR, with these changes: 58 | # - Patch with routePrefix and externalUrl 59 | # - Remove serviceMonitorSelector 60 | # - Add empty serviceMonitorSelector and serviceMonitorNamespaceSelector (yq doesn't support setting to empty) 61 | 62 | cp ${BASE_DIR}/prometheus/example/rbac/prometheus/prometheus.yaml ${BASE_DIR}/prometheus/custom/ 63 | 64 | tee ${BASE_DIR}/prometheus/custom/patch.yml <<-'EOF' 65 | spec: 66 | routePrefix: /prometheus 67 | externalUrl: https://PUBLIC_ENDPOINT/prometheus 68 | EOF 69 | 70 | sed -i "s|PUBLIC_ENDPOINT|$(cat ${BASE_DIR}/.hal/public_endpoint)|g" ${BASE_DIR}/prometheus/custom/patch.yml 71 | yq m -i ${BASE_DIR}/prometheus/custom/prometheus.yaml ${BASE_DIR}/prometheus/custom/patch.yml 72 | 73 | yq d -i ${BASE_DIR}/prometheus/custom/prometheus.yaml spec.serviceMonitorSelector 74 | 75 | tee -a ${BASE_DIR}/prometheus/custom/prometheus.yaml <<-'EOF' 76 | serviceMonitorSelector: {} 77 | serviceMonitorNamespaceSelector: {} 78 | EOF 79 | 80 | kubectl apply -n default -f ${BASE_DIR}/prometheus/custom/prometheus.yaml 81 | 82 | # Set up ingress with auth (same username/password as Spinnaker) 83 | # Set up service for Kayenta to get to Prometheus 84 | 85 | if [[ -f ${BASE_DIR}/.hal/.secret/spinnaker_password ]]; then 86 | sudo apt-get update 87 | sudo apt-get install apache2-utils -y 88 | htpasswd -b -c auth admin $(cat ${BASE_DIR}/.hal/.secret/spinnaker_password) 89 | kubectl -n default create secret generic prometheus-auth --from-file auth 90 | 91 | kubectl -n default apply -f ${BASE_DIR}/templates/prometheus/prometheus-service.yaml 92 | kubectl -n default apply -f ${BASE_DIR}/templates/prometheus/prometheus-ingress.yaml 93 | else 94 | kubectl -n default apply -f ${BASE_DIR}/templates/prometheus/prometheus-service.yaml 95 | kubectl -n default apply -f ${BASE_DIR}/templates/prometheus/prometheus-ingress-noauth.yaml 96 | fi -------------------------------------------------------------------------------- /scripts/addons/setup_demo_canary.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ################################################################################ 4 | # Copyright 2020 Armory, Inc. 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | ################################################################################ 18 | 19 | set -x 20 | set -e 21 | 22 | # Takes two parameteters: 23 | # - Filename for UUID 24 | # - Purpose of UUID (only used for the output text) 25 | function generate_or_use_uuid () { 26 | if [[ ! -s $1 ]]; then 27 | echo "Generating $2 UUID ($1)" 28 | uuidgen > ${1} 29 | else 30 | echo "$2 UUID already exists: $1: $(cat $1)" 31 | fi 32 | } 33 | 34 | BASE_DIR=/etc/spinnaker 35 | PROJECT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )/../../" >/dev/null 2>&1 && pwd ) 36 | 37 | PVC="minio-pvc" 38 | # We are using the bucket kayenta instead of the spinnaker bucket, because Kayenta crashes if it's using the same bucket (haven't dug into this yet) 39 | FRONT50_BUCKET="spinnaker" 40 | APPLICATION_NAME="democanary" 41 | 42 | KAYENTA_BUCKET="kayenta" 43 | 44 | cp -rv ${PROJECT_DIR}/templates/addons/demo ${BASE_DIR}/templates/ 45 | 46 | if [[ ! -s ${BASE_DIR}/.hal/.secret/demo_canary_pipeline_uuid ]]; then 47 | echo "Generating Canary Config UUID (${BASE_DIR}/.hal/.secret/demo_canary_pipeline_uuid)" 48 | uuidgen > ${BASE_DIR}/.hal/.secret/demo_canary_pipeline_uuid 49 | else 50 | echo "Canary Config UUID already exists (${BASE_DIR}/.hal/.secret/demo_canary_pipeline_uuid)" 51 | fi 52 | 53 | if [[ ! -s ${BASE_DIR}/.hal/.secret/demo_canary_config_uuid ]]; then 54 | echo "Generating Canary Config UUID (${BASE_DIR}/.hal/.secret/demo_canary_config_uuid)" 55 | uuidgen > ${BASE_DIR}/.hal/.secret/demo_canary_config_uuid 56 | else 57 | echo "Canary Config UUID already exists (${BASE_DIR}/.hal/.secret/demo_canary_config_uuid)" 58 | fi 59 | 60 | PIPELINE_UUID=$(cat ${BASE_DIR}/.hal/.secret/demo_canary_pipeline_uuid) 61 | CANARY_CONFIG_UUID=$(cat ${BASE_DIR}/.hal/.secret/demo_canary_config_uuid) 62 | 63 | MINIO_PATH=$(kubectl -n spinnaker get pv -ojsonpath="{.items[?(@.spec.claimRef.name==\"${PVC}\")].spec.hostPath.path}") 64 | 65 | FRONT50_PATH=${MINIO_PATH}/${FRONT50_BUCKET}/front50 66 | KAYENTA_PATH=${MINIO_PATH}/${KAYENTA_BUCKET}/kayenta 67 | mkdir -p ${KAYENTA_PATH}/canary_config 68 | 69 | mkdir -p ${FRONT50_PATH}/{applications,pipelines} 70 | mkdir -p ${FRONT50_PATH}/applications/${APPLICATION_NAME} 71 | mkdir -p ${FRONT50_PATH}/pipelines/${PIPELINE_UUID} 72 | 73 | TIMESTAMP=$(date +%s000) 74 | ISO_TIMESTAMP=$(date +"%Y-%m-%dT%T.000Z") 75 | 76 | # Create namespace(s) 77 | set +e 78 | kubectl create ns prod 79 | set -e 80 | 81 | # Create application 82 | sed -e "s|__TIMESTAMP__|${TIMESTAMP}|g" \ 83 | ${BASE_DIR}/templates/demo/${APPLICATION_NAME}/applications/${APPLICATION_NAME}/application-metadata.json.tmpl \ 84 | > ${FRONT50_PATH}/applications/${APPLICATION_NAME}/application-metadata.json 85 | 86 | sed -e "s|__TIMESTAMP__|${TIMESTAMP}|g" \ 87 | ${BASE_DIR}/templates/demo/${APPLICATION_NAME}/applications/${APPLICATION_NAME}/application-permissions.json.tmpl \ 88 | > ${FRONT50_PATH}/applications/${APPLICATION_NAME}/application-permissions.json 89 | 90 | # Bump last-modified for application 91 | sed -e "s|__TIMESTAMP__|${TIMESTAMP}|g" \ 92 | ${BASE_DIR}/templates/demo/${APPLICATION_NAME}/applications/last-modified.json.tmpl \ 93 | > ${FRONT50_PATH}/applications/last-modified.json 94 | 95 | # Create the pipeline 96 | sed -e "s|__TIMESTAMP__|${TIMESTAMP}|g" \ 97 | -e "s|__PIPELINE_UUID__|${PIPELINE_UUID}|g" \ 98 | -e "s|__CANARY_CONFIG_UUID__|${CANARY_CONFIG_UUID}|g" \ 99 | ${BASE_DIR}/templates/demo/${APPLICATION_NAME}/pipelines/PIPELINE_UUID/pipeline-metadata.json.tmpl \ 100 | > ${FRONT50_PATH}/pipelines/${PIPELINE_UUID}/pipeline-metadata.json 101 | 102 | # Bump last-modified for pipeline 103 | sed -e "s|__TIMESTAMP__|${TIMESTAMP}|g" \ 104 | ${BASE_DIR}/templates/demo/${APPLICATION_NAME}/pipelines/last-modified.json.tmpl \ 105 | > ${FRONT50_PATH}/pipelines/last-modified.json 106 | 107 | # Create canary config 108 | mkdir -p ${KAYENTA_PATH}/{canary_config,canary_archive,metric_pairs,metrics} 109 | mkdir -p ${KAYENTA_PATH}/canary_config/${CANARY_CONFIG_UUID} 110 | 111 | sed -e "s|__TIMESTAMP__|${TIMESTAMP}|g" \ 112 | -e "s|__ISO_TIMESTAMP__|${ISO_TIMESTAMP}|g" \ 113 | ${BASE_DIR}/templates/demo/${APPLICATION_NAME}/canary_config/Latency.json.tmpl \ 114 | > ${KAYENTA_PATH}/canary_config/${CANARY_CONFIG_UUID}/Latency.json 115 | 116 | # Restart Kayenta to pick it up (not necessary, but makes the pickup faster) 117 | kubectl -n spinnaker rollout restart deployment/spin-kayenta 118 | -------------------------------------------------------------------------------- /scripts/osx_install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ################################################################################ 4 | # Copyright 2020 Armory, Inc. 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | ################################################################################ 18 | 19 | # Install Minnaker in docker-desktop Kubernetes 20 | 21 | set -e 22 | 23 | ##### Functions 24 | print_help () { 25 | set +x 26 | echo "Usage: install.sh" 27 | echo " [-o|--oss] : Install Open Source Spinnaker (instead of Armory Spinnaker)" 28 | echo " [-B|--base-dir ] : Specify root directory to use for manifests" 29 | set -x 30 | } 31 | 32 | ######## Script starts here 33 | 34 | PROJECT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )/../" >/dev/null 2>&1 && pwd ) 35 | 36 | OPEN_SOURCE=0 37 | PUBLIC_ENDPOINT="" 38 | MAGIC_NUMBER=cafed00d 39 | DEAD_MAGIC_NUMBER=cafedead 40 | KUBERNETES_CONTEXT=docker-desktop 41 | NAMESPACE=spinnaker 42 | 43 | if [[ "$(uname -s)" != "Darwin" ]]; then 44 | echo "Use install.sh to install on Linux" 45 | exit 1 46 | fi 47 | 48 | BASE_DIR=~/minnaker 49 | 50 | while [ "$#" -gt 0 ]; do 51 | case "$1" in 52 | -o|--oss) 53 | printf "Using OSS Spinnaker" 54 | OPEN_SOURCE=1 55 | ;; 56 | -x) 57 | printf "Excluding from Minnaker metrics" 58 | MAGIC_NUMBER=${DEAD_MAGIC_NUMBER} 59 | ;; 60 | -B|--base-dir) 61 | if [ -n $2 ]; then 62 | BASE_DIR=$2 63 | else 64 | printf "Error: --base-dir requires a directory >&2" 65 | exit 1 66 | fi 67 | ;; 68 | -h|--help) 69 | print_help 70 | exit 1 71 | ;; 72 | esac 73 | shift 74 | done 75 | 76 | . ${PROJECT_DIR}/scripts/functions.sh 77 | 78 | if [[ ${OPEN_SOURCE} -eq 1 ]]; then 79 | printf "Using OSS Spinnaker" 80 | HALYARD_IMAGE="gcr.io/spinnaker-marketplace/halyard:stable" 81 | else 82 | printf "Using Armory Spinnaker" 83 | # This is defined in functions.sh 84 | HALYARD_IMAGE="${ARMORY_HALYARD_IMAGE}" 85 | fi 86 | 87 | echo "Setting the Halyard Image to ${HALYARD_IMAGE}" 88 | 89 | echo "Running minnaker setup for OSX" 90 | 91 | # Scaffold out directories 92 | # OSX / Docker Desktop has some fancy permissions so we do everything as ourselves 93 | mkdir -p ${BASE_DIR}/templates/{manifests,profiles,service-settings} 94 | mkdir -p ${BASE_DIR}/manifests 95 | mkdir -p ${BASE_DIR}/.kube 96 | mkdir -p ${BASE_DIR}/.hal/.secret 97 | mkdir -p ${BASE_DIR}/.hal/default/{profiles,service-settings} 98 | 99 | echo "localhost" > ${BASE_DIR}/.hal/public_endpoint 100 | 101 | # detect_endpoint 102 | # generate_passwords 103 | copy_templates 104 | # update_templates_for_linux 105 | hydrate_templates_osx 106 | conditional_copy 107 | 108 | ### Set up / check Kubernetes environment 109 | curl -L https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/static/provider/cloud/deploy.yaml -o ${BASE_DIR}/manifests/nginx-ingress-controller.yaml 110 | 111 | kubectl --context ${KUBERNETES_CONTEXT} get ns 112 | if [[ $? -ne 0 ]]; then 113 | echo "Docker desktop not detected; bailing." 114 | exit 1 115 | fi 116 | 117 | ### Create all manifests: 118 | # - namespace - must be created first 119 | # - NGINX ingress controller - must be created second 120 | # - halyard 121 | # - minio 122 | # - clusteradmin 123 | # - ingress 124 | kubectl --context ${KUBERNETES_CONTEXT} apply -f ${BASE_DIR}/manifests/namespace.yml 125 | kubectl --context ${KUBERNETES_CONTEXT} apply -f ${BASE_DIR}/manifests 126 | 127 | ######## Bootstrap 128 | while [[ $(kubectl --context ${KUBERNETES_CONTEXT} get statefulset -n ${NAMESPACE} halyard -ojsonpath='{.status.readyReplicas}') -ne 1 ]]; 129 | do 130 | echo "Waiting for Halyard pod to start" 131 | sleep 5; 132 | done 133 | 134 | sleep 5; 135 | 136 | VERSION=$(kubectl --context ${KUBERNETES_CONTEXT} -n ${NAMESPACE} exec -i halyard-0 -- sh -c "hal version latest -q") 137 | kubectl --context ${KUBERNETES_CONTEXT} -n ${NAMESPACE} exec -i halyard-0 -- sh -c "hal config version edit --version ${VERSION}" 138 | kubectl --context ${KUBERNETES_CONTEXT} -n ${NAMESPACE} exec -i halyard-0 -- sh -c "hal deploy apply" 139 | 140 | echo "https://$(cat ${BASE_DIR}/.hal/public_endpoint)" 141 | 142 | while [[ $(kubectl -n ${NAMESPACE} get pods --field-selector status.phase!=Running 2> /dev/null | wc -l) -ne 0 ]]; 143 | do 144 | echo "Waiting for all containers to be Running" 145 | kubectl -n ${NAMESPACE} get pods 146 | sleep 5 147 | done 148 | 149 | kubectl -n ${NAMESPACE} get pods 150 | set +x 151 | echo "It may take up to 10 minutes for this endpoint to work. You can check by looking at running pods: 'kubectl -n ${NAMESPACE} get pods'" 152 | echo "https://$(cat ${BASE_DIR}/.hal/public_endpoint)" 153 | -------------------------------------------------------------------------------- /scripts/no_auth_install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ################################################################################ 4 | # Copyright 2020 Armory, Inc. 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | ################################################################################ 18 | 19 | # Install Minnaker in Ubuntu VM (will first install k3s) 20 | 21 | set -e 22 | 23 | ##### Functions 24 | print_help () { 25 | set +x 26 | echo "Usage: install.sh" 27 | echo " [-o|--oss] : Install Open Source Spinnaker (instead of Armory Spinnaker)" 28 | echo " [-P|--public-endpoint ] : Specify public IP (or DNS name) for instance (rather than autodetection)" 29 | echo " [-B|--base-dir ] : Specify root directory to use for manifests" 30 | set -x 31 | } 32 | 33 | ######## Script starts here 34 | 35 | PROJECT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )/../" >/dev/null 2>&1 && pwd ) 36 | 37 | OPEN_SOURCE=0 38 | PUBLIC_ENDPOINT="" 39 | MAGIC_NUMBER=cafed00d 40 | DEAD_MAGIC_NUMBER=cafedead 41 | KUBERNETES_CONTEXT=default 42 | NAMESPACE=spinnaker 43 | 44 | if [[ "$(uname -s)" == "Darwin" ]]; then 45 | echo "Use osx_install.sh to install on OSX Docker Desktop" 46 | exit 1 47 | fi 48 | 49 | BASE_DIR=/etc/spinnaker 50 | 51 | while [ "$#" -gt 0 ]; do 52 | case "$1" in 53 | -o|--oss) 54 | printf "Using OSS Spinnaker" 55 | OPEN_SOURCE=1 56 | ;; 57 | -x) 58 | printf "Excluding from Minnaker metrics" 59 | MAGIC_NUMBER=${DEAD_MAGIC_NUMBER} 60 | ;; 61 | -P|--public-endpoint) 62 | if [ -n $2 ]; then 63 | PUBLIC_ENDPOINT=$2 64 | shift 65 | else 66 | printf "Error: --public-endpoint requires an IP address >&2" 67 | exit 1 68 | fi 69 | ;; 70 | -B|--base-dir) 71 | if [ -n $2 ]; then 72 | BASE_DIR=$2 73 | else 74 | printf "Error: --base-dir requires a directory >&2" 75 | exit 1 76 | fi 77 | ;; 78 | -h|--help) 79 | print_help 80 | exit 1 81 | ;; 82 | esac 83 | shift 84 | done 85 | 86 | . ${PROJECT_DIR}/scripts/functions.sh 87 | 88 | if [[ ${OPEN_SOURCE} -eq 1 ]]; then 89 | printf "Using OSS Spinnaker" 90 | HALYARD_IMAGE="gcr.io/spinnaker-marketplace/halyard:stable" 91 | else 92 | printf "Using Armory Spinnaker" 93 | # This is defined in functions.sh 94 | HALYARD_IMAGE="${ARMORY_HALYARD_IMAGE}" 95 | fi 96 | 97 | echo "Setting the Halyard Image to ${HALYARD_IMAGE}" 98 | 99 | echo "Running minnaker setup for Linux" 100 | 101 | # Scaffold out directories 102 | # OSS Halyard uses 1000; we're using 1000 for everything 103 | sudo mkdir -p ${BASE_DIR}/templates/{manifests,profiles,service-settings} 104 | sudo mkdir -p ${BASE_DIR}/manifests 105 | sudo mkdir -p ${BASE_DIR}/.kube 106 | sudo mkdir -p ${BASE_DIR}/.hal/.secret 107 | sudo mkdir -p ${BASE_DIR}/.hal/default/{profiles,service-settings} 108 | 109 | sudo chown -R 1000 ${BASE_DIR} 110 | 111 | detect_endpoint 112 | # generate_passwords 113 | copy_templates 114 | # update_templates_for_auth 115 | hydrate_templates 116 | conditional_copy 117 | 118 | ### Set up Kubernetes environment 119 | echo "Installing K3s" 120 | install_k3s 121 | echo "Setting kubernetes context to Spinnaker namespace" 122 | sudo env "PATH=$PATH" kubectl config set-context ${KUBERNETES_CONTEXT} --namespace ${NAMESPACE} 123 | echo "Installing yq" 124 | install_yq 125 | 126 | ### Create all manifests: 127 | # - namespace - must be created first 128 | # - halyard 129 | # - minio 130 | # - clusteradmin 131 | # - ingress 132 | echo "Creating manifests" 133 | kubectl --context ${KUBERNETES_CONTEXT} apply -f ${BASE_DIR}/manifests/namespace.yml 134 | kubectl --context ${KUBERNETES_CONTEXT} apply -f ${BASE_DIR}/manifests 135 | 136 | ######## Bootstrap 137 | while [[ $(kubectl --context ${KUBERNETES_CONTEXT} get statefulset -n ${NAMESPACE} halyard -ojsonpath='{.status.readyReplicas}') -ne 1 ]]; 138 | do 139 | echo "Waiting for Halyard pod to start" 140 | sleep 5; 141 | done 142 | 143 | sleep 5; 144 | create_hal_shortcut 145 | create_spin_endpoint 146 | 147 | VERSION=$(kubectl --context ${KUBERNETES_CONTEXT} -n ${NAMESPACE} exec -i halyard-0 -- sh -c "hal version latest -q") 148 | kubectl --context ${KUBERNETES_CONTEXT} -n ${NAMESPACE} exec -i halyard-0 -- sh -c "hal config version edit --version ${VERSION}" 149 | kubectl --context ${KUBERNETES_CONTEXT} -n ${NAMESPACE} exec -i halyard-0 -- sh -c "hal deploy apply" 150 | 151 | spin_endpoint 152 | 153 | while [[ $(kubectl -n ${NAMESPACE} get pods --field-selector status.phase!=Running 2> /dev/null | wc -l) -ne 0 ]]; 154 | do 155 | echo "Waiting for all containers to be Running" 156 | kubectl -n ${NAMESPACE} get pods 157 | sleep 5 158 | done 159 | 160 | kubectl -n ${NAMESPACE} get pods 161 | 162 | echo 'source <(kubectl completion bash)' >>~/.bashrc 163 | 164 | set +x 165 | echo "It may take up to 10 minutes for this endpoint to work. You can check by looking at running pods: 'kubectl -n ${NAMESPACE} get pods'" 166 | spin_endpoint 167 | -------------------------------------------------------------------------------- /scripts/utils/external_service_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ################################################################################ 4 | # Copyright 2020 Armory, Inc. 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | ################################################################################ 18 | 19 | set -e 20 | 21 | PROJECT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )/../../" >/dev/null 2>&1 && pwd ) 22 | KUBERNETES_CONTEXT=default 23 | NAMESPACE=spinnaker 24 | BASE_DIR=/etc/spinnaker 25 | 26 | EXTERNAL_IP=$(ip -4 route list 0/0 | awk '{print $3}') 27 | MINNAKER_IP=$(cat /etc/spinnaker/.hal/public_endpoint) 28 | 29 | # TODO: Add backups of existing configs 30 | 31 | echo "Removing previous custom configs..." 32 | > ${BASE_DIR}/.hal/external_services 33 | printf "# Generated by external_service_setup.sh\n# For use in Minnaker" > ${BASE_DIR}/.hal/default/profiles/spinnaker-local.yml 34 | printf "# Generated by external_service_setup.sh\n# For use in Dev Environment" > ${BASE_DIR}/.hal/spinnaker-local.yml 35 | yq d -i ${BASE_DIR}/.hal/config deploymentConfigurations[0].deploymentEnvironment.customSizing 36 | 37 | kubectl --context ${KUBERNETES_CONTEXT} -n ${NAMESPACE} get service -l app=spin -oname | awk -F'-' '{print $2}' > ${BASE_DIR}/.hal/all_services 38 | 39 | while [ "$#" -gt 0 ]; do 40 | echo $1 >> ${BASE_DIR}/.hal/external_services 41 | shift 42 | done 43 | 44 | for svc in $(cat ${BASE_DIR}/.hal/all_services); do 45 | echo "Adding Minnaker reference to ${svc} to dev config..." 46 | PORT=$(kubectl --context ${KUBERNETES_CONTEXT} -n ${NAMESPACE} get svc spin-${svc} -ojsonpath='{.spec.ports[0].port}') 47 | yq w -i ${BASE_DIR}/.hal/spinnaker-local.yml services.${svc}.baseUrl http://${MINNAKER_IP}:${PORT} 48 | echo "Configuring svc/spin-${svc} to type LoadBalancer" 49 | touch ${BASE_DIR}/.hal/default/service-settings/${SVC}.yml 50 | yq w -i ${BASE_DIR}/.hal/default/service-settings/${SVC}.yml kubernetes.serviceType LoadBalancer 51 | done 52 | 53 | echo "Configuring Minnaker for these services:" 54 | for svc in $(cat ${BASE_DIR}/.hal/external_services); do 55 | echo "Adding external reference to ${svc} to Minnaker config..." 56 | PORT=$(kubectl --context ${KUBERNETES_CONTEXT} -n ${NAMESPACE} get svc spin-${svc} -ojsonpath='{.spec.ports[0].port}') 57 | yq w -i ${BASE_DIR}/.hal/default/profiles/spinnaker-local.yml services.${svc}.baseUrl http://${EXTERNAL_IP}:${PORT} 58 | echo "Scaling Minnaker ${svc} to 0 instances..." 59 | yq w -i ${BASE_DIR}/.hal/config deploymentConfigurations[0].deploymentEnvironment.customSizing.spin-${svc}.replicas 0 60 | echo "Updating external config to listen on 0.0.0.0 for ${svc} and removing custom baseUrl..." 61 | yq w -i ${BASE_DIR}/.hal/spinnaker-local.yml services.${svc}.host 0.0.0.0 62 | yq d -i ${BASE_DIR}/.hal/spinnaker-local.yml services.${svc}.baseUrl 63 | done 64 | 65 | echo "Updating Minnaker endpoint..." 66 | yq w -i ${BASE_DIR}/.hal/config deploymentConfigurations[0].security.uiSecurity.overrideBaseUrl http://${MINNAKER_IP}:9000 67 | yq w -i ${BASE_DIR}/.hal/config deploymentConfigurations[0].security.apiSecurity.overrideBaseUrl http://${MINNAKER_IP}:8084 68 | yq w -i ${BASE_DIR}/.hal/config deploymentConfigurations[0].security.apiSecurity.corsAccessPattern "http://.*" 69 | yq w -i ${BASE_DIR}/.hal/default/profiles/gate-local.yml server.servlet.context-path "/" 70 | yq w -i ${BASE_DIR}/.hal/default/service-settings/gate.yml healthEndpoint "/health" 71 | 72 | echo "--------------" 73 | echo "Deck:" 74 | yq r ${BASE_DIR}/.hal/config deploymentConfigurations[0].security.uiSecurity 75 | echo "--------------" 76 | echo "Gate:" 77 | yq r ${BASE_DIR}/.hal/config deploymentConfigurations[0].security.apiSecurity 78 | echo "--------------" 79 | 80 | echo "Generated deploymentConfigurations[0].deploymentEnvironment.customSizing:" 81 | echo "--------------" 82 | yq r ${BASE_DIR}/.hal/config deploymentConfigurations[0].deploymentEnvironment.customSizing 83 | echo "--------------" 84 | 85 | echo "Generated local ${BASE_DIR}/.hal/default/profiles/spinnaker-local.yml:" 86 | echo "--------------" 87 | cat ${BASE_DIR}/.hal/default/profiles/spinnaker-local.yml 88 | echo "--------------" 89 | echo "Place this file at '~/.spinnaker/spinnaker-local.yml' on your workstation" 90 | echo "--------------" 91 | cat ${BASE_DIR}/.hal/spinnaker-local.yml 92 | echo "--------------" 93 | 94 | echo "Deleting old service objects..." 95 | kubectl --context ${KUBERNETES_CONTEXT} --namespace ${NAMESPACE} delete svc -l app=spin 96 | 97 | echo "Applying changes:" 98 | kubectl --context ${KUBERNETES_CONTEXT} -n ${NAMESPACE} exec -i halyard-0 -- sh -c "hal deploy apply" 99 | 100 | # Printing again, just because... 101 | 102 | echo "Generated deploymentConfigurations[0].deploymentEnvironment.customSizing:" 103 | yq r ${BASE_DIR}/.hal/config deploymentConfigurations[0].deploymentEnvironment.customSizing 104 | 105 | echo "Generated local ${BASE_DIR}/.hal/default/profiles/spinnaker-local.yml:" 106 | echo "--------------" 107 | cat ${BASE_DIR}/.hal/default/profiles/spinnaker-local.yml 108 | echo "--------------" 109 | echo "Place this file at '~/.spinnaker/spinnaker-local.yml' on your workstation" 110 | echo "--------------" 111 | cat ${BASE_DIR}/.hal/spinnaker-local.yml 112 | echo "--------------" 113 | 114 | # TODO: change overridebaseurls so we can do http things 115 | -------------------------------------------------------------------------------- /scripts/install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ################################################################################ 4 | # Copyright 2021 Armory, Inc. 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | ################################################################################ 18 | 19 | # Install Minnaker in Ubuntu VM (will first install k3s) 20 | 21 | #set -e 22 | 23 | ##### Functions 24 | print_help () { 25 | set +x 26 | echo "Usage: install.sh" 27 | echo " [-o|--oss] : Install Open Source Spinnaker (instead of Armory Spinnaker)" 28 | echo " [-P|--public-endpoint ] : Specify public IP (or DNS name) for instance (rather than autodetection)" 29 | echo " [-B|--base-dir ] : Specify root directory to use for manifests" 30 | echo " [-G|--git-spinnaker] : Git Spinnaker Kustomize URL (instead of https://github.com/armory/spinnaker-kustomize-patches)" 31 | echo " [--branch] : Branch to clone (default 'minnaker')" 32 | echo " [-n|--nowait] : Don't wait for Spinnaker to come up" 33 | set -x 34 | } 35 | 36 | ######## Script starts here 37 | 38 | PROJECT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )/../" >/dev/null 2>&1 && pwd ) 39 | 40 | OPEN_SOURCE=0 41 | PUBLIC_ENDPOINT="" 42 | PUBLIC_IP="" 43 | MAGIC_NUMBER=cafed00d 44 | DEAD_MAGIC_NUMBER=cafedead 45 | KUBERNETES_CONTEXT=default 46 | NAMESPACE=spinnaker 47 | BASE_DIR=$PROJECT_DIR/spinsvc 48 | SPIN_GIT_REPO="https://github.com/armory/spinnaker-kustomize-patches" 49 | BRANCH=minnaker 50 | SPIN_WATCH=1 # Wait for Spinnaker to come up 51 | OUT="$PROJECT_DIR/minnaker.log" 52 | 53 | ### Load Helper Functions 54 | . "${PROJECT_DIR}/scripts/functions.sh" 55 | 56 | ### Check if running on Mac - if so complain... 57 | ### ToDo: Skip k3s install , and use docker-desktop or minikube context 58 | if [[ "$(uname -s)" == "Darwin" ]]; then 59 | error "Use osx_install.sh to install on OSX Docker Desktop" 60 | exit 1 61 | fi 62 | 63 | while [ "$#" -gt 0 ]; do 64 | case "$1" in 65 | -o|--oss) 66 | info "Using OSS Spinnaker" 67 | OPEN_SOURCE=1 68 | ;; 69 | -x) 70 | info "Excluding from Minnaker metrics" 71 | MAGIC_NUMBER=${DEAD_MAGIC_NUMBER} 72 | ;; 73 | -P|--public-endpoint) 74 | if [[ -n $2 ]]; then 75 | PUBLIC_IP=$2 76 | shift 77 | else 78 | error "--public-endpoint requires an IP address >&2" 79 | exit 1 80 | fi 81 | ;; 82 | -B|--base-dir) 83 | if [[ -n $2 ]]; then 84 | BASE_DIR=$2 85 | warn "Contents in $2 will be erased" 86 | else 87 | error "--base-dir requires a directory >&2" 88 | exit 1 89 | fi 90 | ;; 91 | -G|--git-spinnaker) 92 | if [[ -n $2 ]]; then 93 | SPIN_GIT_REPO=$2 94 | BRANCH=master 95 | else 96 | error "--git-spinnaker requires a git url >&2" 97 | exit 1 98 | fi 99 | ;; 100 | --branch) 101 | if [[ -n $2 ]]; then 102 | BRANCH=$2 103 | else 104 | info "Defaulting to branch 'minnaker' for $SPIN_GIT_REPO" 105 | BRANCH=minnaker 106 | fi 107 | ;; 108 | -n|--nowait) 109 | info "Will not wait for Spinnaker to come up" 110 | SPIN_WATCH=0 111 | ;; 112 | -h|--help) 113 | print_help 114 | exit 1 115 | ;; 116 | esac 117 | shift 118 | done 119 | 120 | if [[ ${OPEN_SOURCE} == 1 ]]; then 121 | info "Using OSS Spinnaker" 122 | SPIN_FLAVOR=oss 123 | VERSION=$(curl -s https://spinnaker.io/community/releases/versions/ | grep 'id="version-' | head -1 | sed -e 's/\(<[^<][^<]*>\)//g; /^$/d' | cut -d' ' -f2) 124 | else 125 | info "Using Armory Spinnaker" 126 | SPIN_FLAVOR=armory 127 | VERSION=$(curl -sL https://halconfig.s3-us-west-2.amazonaws.com/versions.yml | grep 'version: ' | awk '{print $NF}' | sort | tail -1) 128 | fi 129 | 130 | info "Running minnaker setup for Linux" 131 | info "Cloning repo: ${SPIN_GIT_REPO}#${BRANCH} into ${BASE_DIR}" 132 | 133 | if [ -d "${BASE_DIR}" ]; then 134 | warn "${BASE_DIR} exists already. FOLDER CONTENTS WILL GET OVERWRITTEN!" 135 | warn "PROCEEDING in 3 secs... (ctrl-C to cancel; use -B option to specify a different directory)" 136 | sleep 3 137 | fi 138 | rm -rf ${BASE_DIR} 139 | git clone -b ${BRANCH} "${SPIN_GIT_REPO}" "${BASE_DIR}" 140 | cd "${BASE_DIR}" 141 | 142 | ### Installing helper tools 143 | install_yq 144 | install_jq 145 | 146 | detect_endpoint 147 | generate_passwords 148 | update_endpoint 149 | hydrate_templates 150 | create_spin_endpoint 151 | 152 | ### Set up Kubernetes environment 153 | install_k3s 154 | info "Setting Kubernetes context to Spinnaker namespace" 155 | sudo env "PATH=$PATH" kubectl config set-context ${KUBERNETES_CONTEXT} --namespace ${NAMESPACE} 156 | 157 | ### Deploy Spinnaker with Spinnaker Operator 158 | cd "${BASE_DIR}" 159 | SPIN_FLAVOR=${SPIN_FLAVOR} SPIN_WATCH=0 ./deploy.sh 160 | 161 | # Install PACRD 162 | exec_kubectl_mutating "kubectl apply -f https://engineering.armory.io/manifests/pacrd-1.0.1.yaml -n spinnaker" handle_generic_kubectl_error 163 | exec_kubectl_mutating "kubectl apply -k ${PROJECT_DIR}/pipelines -n spinnaker" handle_generic_kubectl_error 164 | 165 | echo '' >>~/.bashrc # need to add empty line in case file doesn't end in newline 166 | echo 'source <(kubectl completion bash)' >>~/.bashrc 167 | echo 'alias k=kubectl' >>~/.bashrc 168 | echo 'complete -F __start_kubectl k' >>~/.bashrc 169 | 170 | spin_endpoint 171 | 172 | if [[ ${SPIN_WATCH} != 0 ]]; then 173 | watch kubectl get pods,spinsvc -n spinnaker 174 | fi 175 | -------------------------------------------------------------------------------- /guides/add-kubernetes-cluster.md: -------------------------------------------------------------------------------- 1 | # Adding additional deployment targets (Kubernetes clusters) to Spinnaker 2 | 3 | Minnaker installs a local distribution of Kubernetes (K3s) on your VM, which can be deployed to, but once Spinnaker is up and running, you can configure Spinnaker to be able to deploy to additional Kubernetes clusters. Each of these is added as a Clouddriver **account**, which is information about a Kubernetes cluster (API server URL, certificate, credentials) that Spinnaker uses to interact with that Kubernetes cluster. 4 | 5 | In order to do this, you basically need to generate a `kubeconfig` file that has credentials for your target Kubernetes cluster, and then give that to Spinnaker. 6 | 7 | 8 | ## Overview 9 | 10 | We're going to use the `spinnaker-tools` tool (which is a kubectl wrapper) to do the following: 11 | 12 | In the target Kubernetes cluster: 13 | * Create a `ServiceAccount` in the `kube-system` namespace (`spinnaker-service-account`) 14 | * Create a `ClusterRoleBinding` to grant the service account access to the Kubernetes `cluster-admin` role (`kube-system-spinnaker-service-account-admin`) 15 | 16 | The tool will also do this: 17 | * Create a `kubeconfig` file with the token for the generated service account 18 | 19 | Then we will take the generated kubeconfig, copy it to Minnaker, and configure Minnaker to use the kubeconfig to be able to deploy to your Kubernetes cluster. 20 | 21 | ## Prerequisities 22 | 23 | This process should be run from your local workstation, *not from the Minnaker VM*. You must have access to the Kubernetes cluster you would like to deploy to, and you need cluster admin permissions on the Kubernetes cluster. 24 | 25 | You should be able to run the following (again, from your local workstation, not the Minnaker VM). 26 | 27 | ```bash 28 | kubectl get ns 29 | ``` 30 | 31 | You should also be able to copy files from your local workstation to the Minnaker VM. 32 | 33 | ## Using `spinnaker-tools` 34 | 35 | On your local workstation (where you currently have access to Kubernetes), download the spinnaker-tools binary: 36 | 37 | If you're on a Mac: 38 | 39 | ```bash 40 | curl -L https://github.com/armory/spinnaker-tools/releases/download/0.0.7/spinnaker-tools-darwin -o spinnaker-tools 41 | chmod +x spinnaker-tools 42 | ``` 43 | 44 | If you're on Linux: 45 | 46 | ```bash 47 | curl -L https://github.com/armory/spinnaker-tools/releases/download/0.0.7/spinnaker-tools-linux -o spinnaker-tools 48 | chmod +x spinnaker-tools 49 | ``` 50 | 51 | Then, run it: 52 | 53 | ```bash 54 | ./spinnaker-tools create-service-account 55 | ``` 56 | 57 | This will prompt for the following: 58 | * Select the Kubernetes cluster to deploy to (this helps if you have multiple Kubernetes clusters configured in your local kubeconfig) 59 | * Select the namespace (choose the `kube-system` namespace, or select some other namespace or select the option to create a new namespace). This is the namespace that the Kubernetes ServiceAccount will be created in. 60 | * Enter a name for the service account. You can use the default `spinnaker-service-account`, or enter a new (unique) name. 61 | * Enter a name for the output file. You can use the default `kubeconfig-sa`, or you can enter a unique name. You should use something that identifies the Kubernetes cluster you are deploying to (for example, if you are setting up Spinnaker to deploy to your us-west-2 dev cluster, then you could do something like `kubeconfig-us-west-2-dev`) 62 | 63 | This will create the service account (and namespace, if applicable), and the ClusterRoleBinding, then create the kubeconfig file with the specified name. 64 | 65 | Copy this file from your local workstation to your Minnaker VM. You can use scp or some other copy mechanism. 66 | 67 | ## Add the kubeconfig to Spinnaker's Halyard Configuration 68 | 69 | On the Minnaker VM, move or copy the file to `/etc/spinnaker/.hal/.secret` (make sure you are creating a new file, not overwriting an existing one). 70 | 71 | Then, run this command: 72 | 73 | ```bash 74 | hal config provider kubernetes account add us-west-2-dev \ 75 | --provider-version v2 \ 76 | --kubeconfig-file /home/spinnaker/.hal/.secret/kubeconfig-us-west-2-dev \ 77 | --only-spinnaker-managed true 78 | ``` 79 | 80 | Note two things: 81 | * Replace us-west-2-dev with something that identifies your Kubernetes cluster 82 | * Update the `--kubeconfig-file` path with the correct filename. Note that the path will be `/home/spinnaker/...` **not** `/etc/spinnaker/...` - this is because this command will be run inside the Halyard container, which has local volumes mounted into it. 83 | 84 | ## Apply your changes 85 | 86 | Run this command to apply your changes to Spinnaker: 87 | 88 | ```bash 89 | hal deploy apply --wait-for-completion 90 | ``` 91 | 92 | ## Use the new cluster 93 | 94 | Log into the Spinnaker UI (you should first do this in incognito, or do a hard refresh of your browser, as Spinnaker very aggressively caches information in your browser). 95 | 96 | When you go to set up a Kubernetes deployment stage, you should see your new Kubernetes deployment target in the `Account` dropdown. 97 | 98 | ## Additional options / Alternate configurations 99 | 100 | All Halyard / Spinnaker really needs is a way to communicate with your Kubernetes cluster with a Kubeconfig. You can customize this configuration in a number of different ways: 101 | 102 | ### Automate this process 103 | The `spinnaker-tools` binary supports command-line flags. You can use `-h` to see the options (as in `./spinnaker-tools create-service-account -h`), and could run the above command as something like this: 104 | 105 | ```bash 106 | ./spinnaker-tools create-service-account \ 107 | --kubeconfig ~/.kube/config \ 108 | --context my-kubernetes-context \ 109 | --namespace kube-system \ 110 | --service-account-name minnaker-service-account \ 111 | --output kubeconfig-my-kubernetes-cluster 112 | ``` 113 | 114 | Spinnaker tools also supports using existing ServiceAccounts using the `./spinnaker-tools create-kubeconfig` command). Setting up permissions here is left as an exercise to the reader. 115 | 116 | ### Set up per-namespace access 117 | 118 | One option you can do, as you're setting up Spinnaker with RBAC, is set up different Clouddriver `account`s for different namespaces. For example, you could set up something like this: 119 | 120 | Prod Cluster 121 | * `frontend` namespace 122 | * `backend` namespace 123 | 124 | Dev Cluster 125 | * `frontend` namespace 126 | * `backend` namespace 127 | 128 | To set up the service account and user, you can use the --target-namespaces flag for `./spinnaker-tools create-service-account`. For example: 129 | 130 | ```bash 131 | ./spinnaker-tools create-service-account \ 132 | --kubeconfig ~/.kube/config \ 133 | --context prod-cluster \ 134 | --namespace kube-system \ 135 | --service-account-name minnaker-prod-frontend-access \ 136 | --output kubeconfig-prod-frontend \ 137 | --target-namespaces frontend 138 | ``` 139 | 140 | (Repeat the above four times, with different parameters) 141 | 142 | Then set up a different Kubeconfig for each cluster/namespace (four total kubeconfigs), and add four accounts using `hal config provider`, using the `--namespaces` flag: 143 | 144 | ```bash 145 | hal config provider kubernetes account add prod-frontend \ 146 | --provider-version v2 \ 147 | --kubeconfig-file /home/spinnaker/.hal/.secret/kubeconfig-prod-frontend \ 148 | --only-spinnaker-managed true \ 149 | --namespaces frontend 150 | ``` 151 | 152 | (Again, repeat four times) 153 | 154 | ### Use IAM roles for AWS EKS 155 | 156 | AWS EKS supports the use of AWS IAM roles to access your Kubernetes cluster. To do this, you can do the following: 157 | 158 | * Attach an IAM role to the VM where Minnaker is running 159 | * Add that role to the `aws-auth` configmap in your target EKS cluster (this lives in the `kube-system` namespace). 160 | * Generate a kubeconfig that uses `aws-iam-authenticator` to generate tokens (look at your existing `~/.kube/config` for an example of this) 161 | * Use this kubeconfig as opposed to the one generated above. 162 | 163 | This is left as an exercise to the reader (or reach out to us for help and we can get you up and running!) 164 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2020 Armory 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /scripts/functions.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | ################################################################################ 3 | # Copyright 2020 Armory, Inc. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | ################################################################################ 17 | 18 | function log() { 19 | RED='\033[0;31m' 20 | GREEN='\033[0;32m' 21 | ORANGE='\033[0;33m' 22 | CYAN='\033[0;36m' 23 | NC='\033[0m' 24 | LEVEL=$1 25 | MSG=$2 26 | case $LEVEL in 27 | "INFO") HEADER_COLOR=$GREEN MSG_COLOR=$NS ;; 28 | "WARN") HEADER_COLOR=$ORANGE MSG_COLOR=$NS ;; 29 | "KUBE") HEADER_COLOR=$ORANGE MSG_COLOR=$CYAN ;; 30 | "ERROR") HEADER_COLOR=$RED MSG_COLOR=$NS ;; 31 | esac 32 | printf "${HEADER_COLOR}[%-5.5s]${NC} ${MSG_COLOR}%b${NC}" "${LEVEL}" "${MSG}" 33 | printf "$(date +"%D %T") [%-5.5s] %b" "${LEVEL}" "${MSG}" >>"$OUT" 34 | } 35 | 36 | function info() { 37 | log "INFO" "$1\n" 38 | } 39 | 40 | function warn() { 41 | log "WARN" "$1\n" 42 | } 43 | 44 | function error() { 45 | log "ERROR" "$1\n" && exit 1 46 | } 47 | 48 | function handle_generic_kubectl_error() { 49 | error "Error executing command:\n$ERR_OUTPUT" 50 | } 51 | 52 | function exec_kubectl_mutating() { 53 | log "KUBE" "$1\n" 54 | ERR_OUTPUT=$({ $1 >>"$OUT"; } 2>&1) 55 | EXIT_CODE=$? 56 | [[ $EXIT_CODE != 0 ]] && $2 57 | } 58 | 59 | install_k3s () { 60 | info "--- Installing K3s ---" 61 | curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="--tls-san $(cat ${BASE_DIR}/secrets/public_ip)" INSTALL_K3S_VERSION="v1.19.7+k3s1" K3S_KUBECONFIG_MODE="644" sh - 62 | #curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION="v1.19.7+k3s1" K3S_KUBECONFIG_MODE=644 sh - 63 | info " --- END K3s --- " 64 | } 65 | 66 | install_yq () { 67 | info "Installing yq" 68 | sudo curl -sfL https://github.com/mikefarah/yq/releases/download/v4.7.1/yq_linux_amd64 -o /usr/local/bin/yq 69 | sudo chmod +x /usr/local/bin/yq 70 | if [[ ! -e "/usr/local/bin/yq" ]]; then 71 | error "failed to install yq - please manually install https://github.com/mikefarah/yq/" 72 | exit 1 73 | fi 74 | } 75 | 76 | install_jq () { 77 | info "Installing jq" 78 | 79 | # install prereqs jq 80 | # if jq is not installed 81 | if ! jq --help > /dev/null 2>&1; then 82 | # only try installing if a Debian system 83 | if apt-get -v > /dev/null 2>&1; then 84 | info "Using apt-get to install jq" 85 | sudo apt-get update && sudo apt-get install -y jq 86 | else 87 | error "ERROR: Unsupported OS! Cannot automatically install jq. Please try install jq first before rerunning this script" 88 | exit 2 89 | fi 90 | fi 91 | } 92 | 93 | detect_endpoint () { 94 | info "Trying to detect endpoint" 95 | if [[ ! -s ${BASE_DIR}/secrets/public_ip || -n "$1" ]]; then 96 | if [[ -n "${PUBLIC_IP}" ]]; then 97 | info "Using provided public IP ${PUBLIC_IP}" 98 | echo "${PUBLIC_IP}" > ${BASE_DIR}/secrets/public_ip 99 | else 100 | if [[ $(curl -m 1 169.254.169.254 -sSfL &>/dev/null; echo $?) -eq 0 ]]; then 101 | # change to ask AWS public metadata? http://169.254.169.254/latest/meta-data/public_ipv4 102 | #rm ${BASE_DIR}/secrets/public_ip 103 | #while [[ ! -s ${BASE_DIR}/secrets/public_ip ]]; do 104 | info "Detected cloud metadata endpoint" 105 | info "Trying to determine public IP address (using 'curl -m http://169.254.169.254/latest/meta-data/public-ipv4')" 106 | info "IP: $(curl -s http://169.254.169.254/latest/meta-data/public-ipv4 | tee ${BASE_DIR}/secrets/public_ip)" 107 | # info "Trying to determine public IP address (using 'dig +short TXT o-o.myaddr.l.google.com @ns1.google.com')" 108 | # dig +short TXT o-o.myaddr.l.google.com @ns1.google.com | sed 's|"||g' | tee ${BASE_DIR}/secrets/public_ip 109 | #done 110 | else 111 | info "No cloud metadata endpoint detected, detecting interface IP (and storing in ${BASE_DIR}/secrets/public_ip): $(ip r get 8.8.8.8 | awk 'NR==1{print $7}' | tee ${BASE_DIR}/secrets/public_ip)" 112 | fi 113 | fi 114 | else 115 | info "Using existing Public IP from ${BASE_DIR}/secrets/public_ip" 116 | cat ${BASE_DIR}/secrets/public_ip 117 | fi 118 | } 119 | 120 | update_endpoint () { 121 | #PUBLIC_ENDPOINT="spinnaker.$(cat "${BASE_DIR}/secrets/public_ip").nip.io" # use nip.io which is a DNS that will always resolve. 122 | PUBLIC_ENDPOINT="$(cat "${BASE_DIR}/secrets/public_ip")" 123 | 124 | info "Updating spinsvc templates with new endpoint: ${PUBLIC_ENDPOINT}" 125 | #yq eval -i '.spec.rules[0].host = "'${PUBLIC_ENDPOINT}'"' ${BASE_DIR}/expose/ingress-traefik.yml 126 | yq eval -i 'del(.spec.rules[0].host)' ${BASE_DIR}/expose/ingress-traefik.yml 127 | yq eval -i '.spec.spinnakerConfig.config.security.uiSecurity.overrideBaseUrl = "'https://${PUBLIC_ENDPOINT}'"' ${BASE_DIR}/expose/patch-urls.yml 128 | yq eval -i '.spec.spinnakerConfig.config.security.apiSecurity.overrideBaseUrl = "'https://${PUBLIC_ENDPOINT}/api'"' ${BASE_DIR}/expose/patch-urls.yml 129 | yq eval -i '.spec.spinnakerConfig.config.security.apiSecurity.corsAccessPattern = "'https://${PUBLIC_ENDPOINT}'"' ${BASE_DIR}/expose/patch-urls.yml 130 | } 131 | 132 | generate_passwords () { 133 | # for PASSWORD_ITEM in spinnaker_password minio_password mysql_password; do 134 | for PASSWORD_ITEM in spinnaker_password; do 135 | if [[ ! -s ${BASE_DIR}/secrets/${PASSWORD_ITEM} ]]; then 136 | info "Generating password [${BASE_DIR}/secrets/${PASSWORD_ITEM}]:" 137 | openssl rand -base64 36 | tee ${BASE_DIR}/secrets/${PASSWORD_ITEM} 138 | else 139 | warn "Password already exists: [${BASE_DIR}/secrets/${PASSWORD_ITEM}]" 140 | fi 141 | done 142 | 143 | SPINNAKER_PASSWORD=$(cat "${BASE_DIR}/secrets/spinnaker_password") 144 | } 145 | 146 | # copy_templates () { 147 | # # Directory structure: 148 | # ## BASE_DIR/templates/manifests/*: will by hydrated locally, conditionally copied to BASE_DIR/manifests 149 | # ## BASE_DIR/templates/profiles/*: will be hydrated locally, conditionally copied to BASE_DIR/.hal/default/profiles 150 | # ## BASE_DIR/templates/service-settings/*: will be hydrated locally, conditionally copied to BASE_DIR/.hal/default/service-settings 151 | # ## BASE_DIR/templates/config: will be hydrated locally, conditionally copied to BASE_DIR/.hal/config 152 | # cp -rpv ${PROJECT_DIR}/templates/manifests ${BASE_DIR}/templates/ 153 | 154 | # cp -rpv ${PROJECT_DIR}/templates/profiles ${BASE_DIR}/templates/ 155 | # cp -rpv ${PROJECT_DIR}/templates/service-settings ${BASE_DIR}/templates/ 156 | 157 | # cp ${PROJECT_DIR}/templates/config ${BASE_DIR}/templates/ 158 | # if [[ ${OPEN_SOURCE} -eq 0 ]]; then 159 | # cat ${PROJECT_DIR}/templates/config-armory >> ${BASE_DIR}/templates/config 160 | # fi 161 | # } 162 | 163 | update_templates_for_auth () { 164 | for f in $(ls -1 ${PROJECT_DIR}/templates/profiles-auth/); do 165 | cat ${PROJECT_DIR}/templates/profiles-auth/${f} | tee -a ${BASE_DIR}/templates/profiles/${f} 166 | done 167 | } 168 | 169 | hydrate_templates () { 170 | sed -i "s|^http-password=.*|http-password=${SPINNAKER_PASSWORD}|g" ${BASE_DIR}/secrets/secrets-example.env 171 | #sed -i "s|username2replace|admin|g" security/patch-basic-auth.yml 172 | yq eval -i '.spec.spinnakerConfig.profiles.gate.spring.security.user.name = "admin"' ${BASE_DIR}/security/patch-basic-auth.yml 173 | #sed -i -r "s|(^.*)version: .*|\1version: ${VERSION}|" core_config/patch-version.yml 174 | yq eval -i '.spec.spinnakerConfig.config.version = "'${VERSION}'"' ${BASE_DIR}/core_config/patch-version.yml 175 | sed -i "s|token|# token|g" accounts/git/patch-github.yml 176 | sed -i "s|username|# username|g" accounts/git/patch-gitrepo.yml 177 | sed -i "s|token|# token|g" accounts/git/patch-gitrepo.yml 178 | 179 | if [[ ${OPEN_SOURCE} -eq 0 ]]; then 180 | sed -i "s|xxxxxxxx-.*|${MAGIC_NUMBER}$(uuidgen | cut -c 9-)|" armory/patch-diagnostics.yml 181 | sed -i "s|#- armory|- armory|g" kustomization.yml 182 | else 183 | # remove armory related patches 184 | sed -i "s|- armory|#- armory|g" kustomization.yml 185 | fi 186 | } 187 | 188 | ## Not necessary anymore - using yq and kustomize 189 | # # The primary difference is i.bak, cause OSX sed is stupid 190 | # hydrate_templates_osx () { 191 | # PUBLIC_ENDPOINT=$(cat ${BASE_DIR}/secrets/public_ip) 192 | 193 | # # TODO: Decide whether to replace with find | xargs sed 194 | # # TODO: Fix the sed i.bak, collapse with hydrate_templates 195 | # for f in ${BASE_DIR}/templates/config ${BASE_DIR}/templates/{manifests,profiles,service-settings}/*; do 196 | # sed -i.bak \ 197 | # -e "s|NAMESPACE|${NAMESPACE}|g" \ 198 | # -e "s|BASE_DIR|${BASE_DIR}|g" \ 199 | # -e "s|HALYARD_IMAGE|${HALYARD_IMAGE}|g" \ 200 | # -e "s|PUBLIC_ENDPOINT|${PUBLIC_ENDPOINT}|g" \ 201 | # -e "s|uuid.*|uuid: ${MAGIC_NUMBER}$(uuidgen | cut -c 9-)|g" \ 202 | # ${f} 203 | # rm ${f}.bak 204 | # done 205 | # } 206 | 207 | # conditional_copy () { 208 | # ## BASE_DIR/templates/manifests/* conditionally copied to BASE_DIR/manifests 209 | # ## BASE_DIR/templates/profiles/* conditionally copied to BASE_DIR/.hal/default/profiles 210 | # ## BASE_DIR/templates/service-settings/* conditionally copied to BASE_DIR/.hal/default/service-settings 211 | # ## BASE_DIR/templates/config conditionally copied to BASE_DIR/.hal/config 212 | # for f in $(ls -1 ${BASE_DIR}/templates/manifests/); do 213 | # if [[ ! -e ${BASE_DIR}/manifests/${f} ]]; then 214 | # cp ${BASE_DIR}/templates/manifests/${f} ${BASE_DIR}/manifests/ 215 | # fi 216 | # done 217 | 218 | # for f in $(ls -1 ${BASE_DIR}/templates/profiles/); do 219 | # if [[ ! -e ${BASE_DIR}/.hal/default/profiles/${f} ]]; then 220 | # cp ${BASE_DIR}/templates/profiles/${f} ${BASE_DIR}/.hal/default/profiles/ 221 | # fi 222 | # done 223 | 224 | # for f in $(ls -1 ${BASE_DIR}/templates/service-settings/); do 225 | # if [[ ! -e ${BASE_DIR}/.hal/default/service-settings/${f} ]]; then 226 | # cp ${BASE_DIR}/templates/service-settings/${f} ${BASE_DIR}/.hal/default/service-settings/ 227 | # fi 228 | # done 229 | 230 | # if [[ ! -e ${BASE_DIR}/.hal/config ]]; then 231 | # cp ${BASE_DIR}/templates/config ${BASE_DIR}/.hal/ 232 | # fi 233 | # } 234 | 235 | # create_hal_shortcut () { 236 | # sudo tee /usr/local/bin/hal <<-'EOF' 237 | # #!/bin/bash 238 | # POD_NAME=$(kubectl -n spinnaker get pod -l app=halyard -oname | cut -d'/' -f 2) 239 | # # echo $POD_NAME 240 | # set -x 241 | # kubectl -n spinnaker exec -i ${POD_NAME} -- sh -c "hal $*" 242 | # EOF 243 | # sudo chmod 755 /usr/local/bin/hal 244 | # } 245 | 246 | create_spin_endpoint () { 247 | 248 | info "Creating spin_endpoint helper function" 249 | 250 | sudo tee /usr/local/bin/spin_endpoint <<-'EOF' 251 | #!/bin/bash 252 | #echo "$(kubectl get spinsvc spinnaker -n spinnaker -ojsonpath='{.spec.spinnakerConfig.config.security.uiSecurity.overrideBaseUrl}')" 253 | echo "$(yq e '.spec.spinnakerConfig.config.security.uiSecurity.overrideBaseUrl' BASE_DIR/expose/patch-urls.yml)" 254 | [[ -f BASE_DIR/secrets/spinnaker_password ]] && echo "username: 'admin'" 255 | [[ -f BASE_DIR/secrets/spinnaker_password ]] && echo "password: '$(cat BASE_DIR/secrets/spinnaker_password)'" 256 | EOF 257 | sudo chmod 755 /usr/local/bin/spin_endpoint 258 | 259 | sudo sed -i "s|BASE_DIR|${BASE_DIR}|g" /usr/local/bin/spin_endpoint 260 | } 261 | 262 | restart_k3s (){ 263 | info "Restarting k3s" 264 | /usr/local/bin/k3s-killall.sh 265 | sudo systemctl restart k3s 266 | } 267 | 268 | 269 | ####### These are not currently used 270 | 271 | install_git () { 272 | set +e 273 | if [[ $(command -v snap >/dev/null; echo $?) -eq 0 ]]; 274 | then 275 | sudo snap install git 276 | elif [[ $(command -v apt-get >/dev/null; echo $?) -eq 0 ]]; 277 | then 278 | sudo apt-get install git -y 279 | else 280 | sudo yum install git -y 281 | fi 282 | set -e 283 | } 284 | 285 | get_metrics_server_manifest () { 286 | # TODO: detect existence and skip if existing 287 | rm -rf ${BASE_DIR}/manifests/metrics-server 288 | git clone https://github.com/kubernetes-incubator/metrics-server.git ${BASE_DIR}/metrics-server 289 | } 290 | -------------------------------------------------------------------------------- /templates/addons/demo/democanary/pipelines/PIPELINE_UUID/pipeline-metadata.json.tmpl: -------------------------------------------------------------------------------- 1 | { 2 | "application": "democanary", 3 | "name": "Canary Demo", 4 | "id": "__PIPELINE_UUID__", 5 | "updateTs": "__TIMESTAMP__", 6 | "index": 0, 7 | "expectedArtifacts": [], 8 | "keepWaitingPipelines": false, 9 | "lastModifiedBy": "demo", 10 | "limitConcurrent": true, 11 | "parameterConfig": [ 12 | { 13 | "default": "random", 14 | "description": "", 15 | "hasOptions": true, 16 | "label": "", 17 | "name": "tag", 18 | "options": [ 19 | { 20 | "value": "monday" 21 | }, 22 | { 23 | "value": "tuesday" 24 | }, 25 | { 26 | "value": "wednesday" 27 | }, 28 | { 29 | "value": "thursday" 30 | }, 31 | { 32 | "value": "friday" 33 | }, 34 | { 35 | "value": "saturday" 36 | }, 37 | { 38 | "value": "sunday" 39 | }, 40 | { 41 | "value": "random" 42 | } 43 | ], 44 | "pinned": true, 45 | "required": true 46 | } 47 | ], 48 | "stages": [ 49 | { 50 | "account": "spinnaker", 51 | "app": "democanary", 52 | "cloudProvider": "kubernetes", 53 | "comments": "The first time this pipeline runs; this stage may fail. This is fine.", 54 | "completeOtherBranchesThenFail": false, 55 | "continuePipeline": true, 56 | "expectedArtifacts": [], 57 | "failPipeline": false, 58 | "location": "prod", 59 | "manifestName": "deployment hello-world-prod", 60 | "mode": "static", 61 | "name": "Get Info", 62 | "refId": "2", 63 | "requisiteStageRefIds": [], 64 | "type": "findArtifactsFromResource" 65 | }, 66 | { 67 | "comments": "
\nCurrent Image: Get current image from hello-world-prod deployment (if it's valid), otherwise default to 'justinrlee/hello-world:monday'\nCurrent Instances: Get current replica count from hello-world-prod deployment (if it's valid), otherwise default to 4\nNew Image: Build from trigger.\n
", 68 | "failOnFailedExpressions": true, 69 | "name": "Evaluate Variables", 70 | "refId": "3", 71 | "requisiteStageRefIds": [ 72 | "2", 73 | "5" 74 | ], 75 | "type": "evaluateVariables", 76 | "variables": [ 77 | { 78 | "key": "current_image", 79 | "value": "${#stage(\"Get Info\").status == \"FAILED_CONTINUE\" ? \"justinrlee/hello-world:monday\" : #stage(\"Get Info\").context.artifacts.^[type== \"docker/image\"].reference}" 80 | }, 81 | { 82 | "key": "current_instances", 83 | "value": "${#stage(\"Get Info\").status == \"FAILED_CONTINUE\" ? 4 : #stage(\"Get Info\").context.manifest.spec.replicas}" 84 | }, 85 | { 86 | "key": "new_image", 87 | "value": "justinrlee/hello-world:${trigger.parameters.tag == \"random\" ? new String[7]{\"monday\",\"tuesday\",\"wednesday\",\"thursday\",\"friday\",\"saturday\",\"sunday\"}[new java.util.Random().nextInt(7)] : trigger.parameters.tag}" 88 | }, 89 | { 90 | "key": "random_day", 91 | "value": "${new String[7]{\"monday\",\"tuesday\",\"wednesday\",\"thursday\",\"friday\",\"saturday\",\"sunday\"}[new java.util.Random().nextInt(7)]}" 92 | } 93 | ] 94 | }, 95 | { 96 | "account": "spinnaker", 97 | "cloudProvider": "kubernetes", 98 | "manifests": [ 99 | { 100 | "apiVersion": "apps/v1", 101 | "kind": "Deployment", 102 | "metadata": { 103 | "annotations": { 104 | "strategy.spinnaker.io/max-version-history": "2" 105 | }, 106 | "name": "hello-world-baseline" 107 | }, 108 | "spec": { 109 | "replicas": 1, 110 | "selector": { 111 | "matchLabels": { 112 | "app": "hello-world", 113 | "group": "baseline" 114 | } 115 | }, 116 | "template": { 117 | "metadata": { 118 | "annotations": { 119 | "prometheus.io/path": "/metrics", 120 | "prometheus.io/port": "8080", 121 | "prometheus.io/scrape": "true" 122 | }, 123 | "labels": { 124 | "app": "hello-world", 125 | "group": "baseline" 126 | } 127 | }, 128 | "spec": { 129 | "containers": [ 130 | { 131 | "image": "${current_image}", 132 | "imagePullPolicy": "Always", 133 | "name": "hello-world", 134 | "ports": [ 135 | { 136 | "containerPort": 8080 137 | } 138 | ] 139 | } 140 | ] 141 | } 142 | } 143 | } 144 | } 145 | ], 146 | "moniker": { 147 | "app": "democanary" 148 | }, 149 | "name": "Deploy Baseline", 150 | "namespaceOverride": "prod", 151 | "refId": "4", 152 | "requisiteStageRefIds": [ 153 | "3" 154 | ], 155 | "skipExpressionEvaluation": false, 156 | "source": "text", 157 | "trafficManagement": { 158 | "enabled": false, 159 | "options": { 160 | "enableTraffic": false, 161 | "services": [] 162 | } 163 | }, 164 | "type": "deployManifest" 165 | }, 166 | { 167 | "account": "spinnaker", 168 | "cloudProvider": "kubernetes", 169 | "manifests": [ 170 | { 171 | "apiVersion": "v1", 172 | "kind": "Service", 173 | "metadata": { 174 | "labels": { 175 | "app": "hello-world" 176 | }, 177 | "name": "hello-world" 178 | }, 179 | "spec": { 180 | "ports": [ 181 | { 182 | "name": "web", 183 | "port": 8080 184 | } 185 | ], 186 | "selector": { 187 | "app": "hello-world" 188 | } 189 | } 190 | }, 191 | { 192 | "apiVersion": "monitoring.coreos.com/v1", 193 | "kind": "ServiceMonitor", 194 | "metadata": { 195 | "name": "hello-world" 196 | }, 197 | "spec": { 198 | "endpoints": [ 199 | { 200 | "port": "web" 201 | } 202 | ], 203 | "namespaceSelector": { 204 | "any": true 205 | }, 206 | "podTargetLabels": [ 207 | "group", 208 | "app_version" 209 | ], 210 | "selector": { 211 | "matchLabels": { 212 | "app": "hello-world" 213 | } 214 | } 215 | } 216 | } 217 | ], 218 | "moniker": { 219 | "app": "democanary" 220 | }, 221 | "name": "Deploy Service and ServiceMonitor", 222 | "namespaceOverride": "prod", 223 | "refId": "5", 224 | "requisiteStageRefIds": [], 225 | "skipExpressionEvaluation": false, 226 | "source": "text", 227 | "trafficManagement": { 228 | "enabled": false, 229 | "options": { 230 | "enableTraffic": false 231 | } 232 | }, 233 | "type": "deployManifest" 234 | }, 235 | { 236 | "account": "spinnaker", 237 | "cloudProvider": "kubernetes", 238 | "manifests": [ 239 | { 240 | "apiVersion": "apps/v1", 241 | "kind": "Deployment", 242 | "metadata": { 243 | "annotations": { 244 | "strategy.spinnaker.io/max-version-history": "2" 245 | }, 246 | "name": "hello-world-canary" 247 | }, 248 | "spec": { 249 | "replicas": 1, 250 | "selector": { 251 | "matchLabels": { 252 | "app": "hello-world", 253 | "group": "canary" 254 | } 255 | }, 256 | "template": { 257 | "metadata": { 258 | "annotations": { 259 | "prometheus.io/path": "/metrics", 260 | "prometheus.io/port": "8080", 261 | "prometheus.io/scrape": "true" 262 | }, 263 | "labels": { 264 | "app": "hello-world", 265 | "group": "canary" 266 | } 267 | }, 268 | "spec": { 269 | "containers": [ 270 | { 271 | "image": "${new_image}", 272 | "imagePullPolicy": "Always", 273 | "name": "hello-world", 274 | "ports": [ 275 | { 276 | "containerPort": 8080 277 | } 278 | ] 279 | } 280 | ] 281 | } 282 | } 283 | } 284 | } 285 | ], 286 | "moniker": { 287 | "app": "democanary" 288 | }, 289 | "name": "Deploy Canary", 290 | "namespaceOverride": "prod", 291 | "refId": "6", 292 | "requisiteStageRefIds": [ 293 | "3" 294 | ], 295 | "skipExpressionEvaluation": false, 296 | "source": "text", 297 | "trafficManagement": { 298 | "enabled": false, 299 | "options": { 300 | "enableTraffic": false, 301 | "services": [] 302 | } 303 | }, 304 | "type": "deployManifest" 305 | }, 306 | { 307 | "name": "Gather", 308 | "refId": "7", 309 | "requisiteStageRefIds": [ 310 | "4", 311 | "6" 312 | ], 313 | "type": "wait", 314 | "waitTime": 1 315 | }, 316 | { 317 | "name": "Wait", 318 | "comments": "This is a dummy stage that could be substituted with other testing stages", 319 | "refId": "8", 320 | "requisiteStageRefIds": [ 321 | "7" 322 | ], 323 | "type": "wait", 324 | "waitTime": 2 325 | }, 326 | { 327 | "completeOtherBranchesThenFail": false, 328 | "continuePipeline": true, 329 | "failPipeline": false, 330 | "comments": "This is a manual judgment will only occur if the canary fails; otherwise, the app will be promoted automatically", 331 | "instructions": "Click \"Continue\" to promote and \"Stop\" to not promote.", 332 | "judgmentInputs": [], 333 | "name": "Manual Judgment", 334 | "notifications": [], 335 | "refId": "9", 336 | "requisiteStageRefIds": [ 337 | "8", 338 | "13" 339 | ], 340 | "stageEnabled": { 341 | "expression": "${#stage(\"Canary Analysis\").status != \"SUCCEEDED\"}", 342 | "type": "expression" 343 | }, 344 | "type": "manualJudgment" 345 | }, 346 | { 347 | "account": "spinnaker", 348 | "app": "democanary", 349 | "cloudProvider": "kubernetes", 350 | "location": "prod", 351 | "manifestName": "deployment hello-world-canary", 352 | "mode": "static", 353 | "name": "Destroy Canary", 354 | "options": { 355 | "cascading": true 356 | }, 357 | "refId": "10", 358 | "requisiteStageRefIds": [ 359 | "9" 360 | ], 361 | "type": "deleteManifest" 362 | }, 363 | { 364 | "account": "spinnaker", 365 | "app": "democanary", 366 | "cloudProvider": "kubernetes", 367 | "location": "prod", 368 | "manifestName": "deployment hello-world-baseline", 369 | "mode": "static", 370 | "name": "Destroy Baseline", 371 | "options": { 372 | "cascading": true 373 | }, 374 | "refId": "11", 375 | "requisiteStageRefIds": [ 376 | "9" 377 | ], 378 | "type": "deleteManifest" 379 | }, 380 | { 381 | "account": "spinnaker", 382 | "cloudProvider": "kubernetes", 383 | "manifests": [ 384 | { 385 | "apiVersion": "apps/v1", 386 | "kind": "Deployment", 387 | "metadata": { 388 | "annotations": { 389 | "strategy.spinnaker.io/max-version-history": "4" 390 | }, 391 | "name": "hello-world-prod" 392 | }, 393 | "spec": { 394 | "replicas": "${current_instances.intValue()}", 395 | "selector": { 396 | "matchLabels": { 397 | "app": "hello-world", 398 | "group": "prod" 399 | } 400 | }, 401 | "template": { 402 | "metadata": { 403 | "annotations": { 404 | "prometheus.io/path": "/metrics", 405 | "prometheus.io/port": "8080", 406 | "prometheus.io/scrape": "true" 407 | }, 408 | "labels": { 409 | "app": "hello-world", 410 | "group": "prod" 411 | } 412 | }, 413 | "spec": { 414 | "containers": [ 415 | { 416 | "image": "${new_image}", 417 | "imagePullPolicy": "Always", 418 | "name": "hello-world", 419 | "ports": [ 420 | { 421 | "containerPort": 8080 422 | } 423 | ] 424 | } 425 | ] 426 | } 427 | } 428 | } 429 | } 430 | ], 431 | "moniker": { 432 | "app": "democanary" 433 | }, 434 | "name": "Promote Prod", 435 | "namespaceOverride": "prod", 436 | "refId": "12", 437 | "requisiteStageRefIds": [ 438 | "9" 439 | ], 440 | "skipExpressionEvaluation": false, 441 | "source": "text", 442 | "stageEnabled": { 443 | "expression": "${#stage(\"Manual Judgment\").context.judgmentStatus != \"stop\"}", 444 | "type": "expression" 445 | }, 446 | "trafficManagement": { 447 | "enabled": false, 448 | "options": { 449 | "enableTraffic": false, 450 | "services": [] 451 | } 452 | }, 453 | "type": "deployManifest" 454 | }, 455 | { 456 | "analysisType": "realTime", 457 | "canaryConfig": { 458 | "beginCanaryAnalysisAfterMins": "1", 459 | "canaryAnalysisIntervalMins": "4", 460 | "canaryConfigId": "__CANARY_CONFIG_UUID__", 461 | "lifetimeDuration": "PT0H20M", 462 | "metricsAccountName": "prometheus", 463 | "scopes": [ 464 | { 465 | "controlLocation": "prod", 466 | "controlScope": "baseline", 467 | "experimentLocation": "prod", 468 | "experimentScope": "canary", 469 | "extendedScopeParams": {}, 470 | "scopeName": "default", 471 | "step": 5 472 | } 473 | ], 474 | "scoreThresholds": { 475 | "marginal": "0", 476 | "pass": "70" 477 | }, 478 | "storageAccountName": "minio" 479 | }, 480 | "completeOtherBranchesThenFail": false, 481 | "continuePipeline": true, 482 | "failPipeline": false, 483 | "name": "Canary Analysis", 484 | "refId": "13", 485 | "requisiteStageRefIds": [ 486 | "7" 487 | ], 488 | "type": "kayentaCanary" 489 | } 490 | ], 491 | "triggers": [ 492 | ] 493 | } -------------------------------------------------------------------------------- /templates/addons/demo/demok8s/pipelines/PIPELINE_UUID/pipeline-metadata.json.tmpl: -------------------------------------------------------------------------------- 1 | { 2 | "application": "demok8s", 3 | "name": "Kubernetes Demo", 4 | "id": "__PIPELINE_UUID__", 5 | "updateTs": "__TIMESTAMP__", 6 | "description": "This is a sample pipeline that does the following:\n* Sets up a number of Kubernetes Services and Ingresses (using the default Traefik Ingress controller included with K3s)\n* Deploys a hello world Deployment to the dev namespace (called 'demok8s')\n* Pauses for approval before deploying to test\n* Deploys the hello world Deployment to the test namespace (also called 'demok8s')\n* Runs, in parallel, a webhook and a wait stage (these could be substituted for calls out to other test tools or endpoints)\n* Pauses for approval before deploying to prod\n* Deploys the hello world app as a ReplicaSet to the prod namespace (also called 'demok8s'), using the Spinnaker blue/green (red/black) capability.", 7 | "keepWaitingPipelines": false, 8 | "lastModifiedBy": "demo", 9 | "limitConcurrent": true, 10 | "parameterConfig": [ 11 | { 12 | "default": "monday", 13 | "description": "", 14 | "hasOptions": true, 15 | "label": "", 16 | "name": "tag", 17 | "options": [ 18 | { 19 | "value": "monday" 20 | }, 21 | { 22 | "value": "tuesday" 23 | }, 24 | { 25 | "value": "wednesday" 26 | }, 27 | { 28 | "value": "thursday" 29 | }, 30 | { 31 | "value": "friday" 32 | }, 33 | { 34 | "value": "saturday" 35 | }, 36 | { 37 | "value": "sunday" 38 | } 39 | ], 40 | "pinned": false, 41 | "required": true 42 | } 43 | ], 44 | "stages": [ 45 | { 46 | "account": "spinnaker", 47 | "cloudProvider": "kubernetes", 48 | "comments": "

Deploying to the dev environment (access the app at https://MINNAKER_URL/dev/demok8s)

\n\n

The endpoint is accessed through a Kubernetes Ingress (listening on path /dev/demok8s) and accompany Kubernetes Service, both in the \"dev\" namespace.

", 49 | "manifests": [ 50 | { 51 | "apiVersion": "apps/v1", 52 | "kind": "Deployment", 53 | "metadata": { 54 | "name": "demok8s" 55 | }, 56 | "spec": { 57 | "replicas": 3, 58 | "selector": { 59 | "matchLabels": { 60 | "app": "demok8s" 61 | } 62 | }, 63 | "template": { 64 | "metadata": { 65 | "labels": { 66 | "app": "demok8s", 67 | "lb": "demok8s" 68 | } 69 | }, 70 | "spec": { 71 | "containers": [ 72 | { 73 | "image": "justinrlee/nginx:${parameters[\"tag\"]}", 74 | "name": "primary", 75 | "ports": [ 76 | { 77 | "containerPort": 80 78 | } 79 | ] 80 | } 81 | ] 82 | } 83 | } 84 | } 85 | } 86 | ], 87 | "moniker": { 88 | "app": "demok8s" 89 | }, 90 | "name": "Deploy Dev", 91 | "namespaceOverride": "dev", 92 | "refId": "1", 93 | "requisiteStageRefIds": [], 94 | "skipExpressionEvaluation": false, 95 | "source": "text", 96 | "trafficManagement": { 97 | "enabled": false, 98 | "options": { 99 | "enableTraffic": false, 100 | "services": [] 101 | } 102 | }, 103 | "type": "deployManifest" 104 | }, 105 | { 106 | "failPipeline": true, 107 | "instructions": "Please verify Dev (https://MINNAKER_URL/dev/demok8s) and click 'Continue' to continue deploying to Test.", 108 | "judgmentInputs": [], 109 | "name": "Manual Judgment: Deploy to Test", 110 | "notifications": [], 111 | "refId": "2", 112 | "requisiteStageRefIds": [ 113 | "1", 114 | "8", 115 | "9" 116 | ], 117 | "type": "manualJudgment" 118 | }, 119 | { 120 | "account": "spinnaker", 121 | "cloudProvider": "kubernetes", 122 | "comments": "

Deploying to the test environment (access the app at https://MINNAKER_URL/test/demok8s)

\n\n

The endpoint is accessed through a Kubernetes Ingress (listening on path /test/demok8s) and accompany Kubernetes Service, both in the \"test\" namespace.

", 123 | "manifests": [ 124 | { 125 | "apiVersion": "apps/v1", 126 | "kind": "Deployment", 127 | "metadata": { 128 | "name": "demok8s" 129 | }, 130 | "spec": { 131 | "replicas": 3, 132 | "selector": { 133 | "matchLabels": { 134 | "app": "demok8s" 135 | } 136 | }, 137 | "template": { 138 | "metadata": { 139 | "labels": { 140 | "app": "demok8s", 141 | "lb": "demok8s" 142 | } 143 | }, 144 | "spec": { 145 | "containers": [ 146 | { 147 | "image": "justinrlee/nginx:${parameters[\"tag\"]}", 148 | "name": "primary", 149 | "ports": [ 150 | { 151 | "containerPort": 80 152 | } 153 | ] 154 | } 155 | ] 156 | } 157 | } 158 | } 159 | } 160 | ], 161 | "moniker": { 162 | "app": "demok8s" 163 | }, 164 | "name": "Deploy Test", 165 | "namespaceOverride": "test", 166 | "refId": "3", 167 | "requisiteStageRefIds": [ 168 | "2" 169 | ], 170 | "skipExpressionEvaluation": false, 171 | "source": "text", 172 | "trafficManagement": { 173 | "enabled": false, 174 | "options": { 175 | "enableTraffic": false, 176 | "services": [] 177 | } 178 | }, 179 | "type": "deployManifest" 180 | }, 181 | { 182 | "name": "Wait", 183 | "refId": "4", 184 | "requisiteStageRefIds": [ 185 | "3" 186 | ], 187 | "type": "wait", 188 | "waitTime": 30 189 | }, 190 | { 191 | "failPipeline": true, 192 | "instructions": "Please verify Test (https://MINNAKER_URL/test/demok8s) and click 'Continue' to continue deploying to Prod", 193 | "judgmentInputs": [], 194 | "name": "Manual Judgment: Deploy to Prod", 195 | "notifications": [], 196 | "refId": "5", 197 | "requisiteStageRefIds": [ 198 | "4", 199 | "7" 200 | ], 201 | "type": "manualJudgment" 202 | }, 203 | { 204 | "account": "spinnaker", 205 | "cloudProvider": "kubernetes", 206 | "comments": "

Deploying to the prod environment (access the app at https://MINNAKER_URL/prod/demok8s)

\n\n

The endpoint is accessed through a Kubernetes Ingress (listening on path /prod/demok8s) and accompany Kubernetes Service, both in the \"prod\" namespace.

", 207 | "manifests": [ 208 | { 209 | "apiVersion": "apps/v1", 210 | "kind": "ReplicaSet", 211 | "metadata": { 212 | "name": "demok8s" 213 | }, 214 | "spec": { 215 | "replicas": 3, 216 | "selector": { 217 | "matchLabels": { 218 | "app": "demok8s" 219 | } 220 | }, 221 | "template": { 222 | "metadata": { 223 | "labels": { 224 | "app": "demok8s" 225 | } 226 | }, 227 | "spec": { 228 | "containers": [ 229 | { 230 | "image": "justinrlee/nginx:${parameters[\"tag\"]}", 231 | "name": "primary", 232 | "ports": [ 233 | { 234 | "containerPort": 80, 235 | "protocol": "TCP" 236 | } 237 | ] 238 | } 239 | ] 240 | } 241 | } 242 | } 243 | } 244 | ], 245 | "moniker": { 246 | "app": "demok8s" 247 | }, 248 | "name": "Deploy Prod (Blue/Green)", 249 | "namespaceOverride": "prod", 250 | "refId": "6", 251 | "requisiteStageRefIds": [ 252 | "5" 253 | ], 254 | "skipExpressionEvaluation": false, 255 | "source": "text", 256 | "trafficManagement": { 257 | "enabled": true, 258 | "options": { 259 | "enableTraffic": true, 260 | "namespace": "prod", 261 | "services": [ 262 | "service demok8s" 263 | ], 264 | "strategy": "redblack" 265 | } 266 | }, 267 | "type": "deployManifest" 268 | }, 269 | { 270 | "method": "GET", 271 | "name": "Webhook", 272 | "refId": "7", 273 | "requisiteStageRefIds": [ 274 | "3" 275 | ], 276 | "statusUrlResolution": "getMethod", 277 | "type": "webhook", 278 | "url": "https://www.google.com/" 279 | }, 280 | { 281 | "account": "spinnaker", 282 | "cloudProvider": "kubernetes", 283 | "manifests": [ 284 | { 285 | "apiVersion": "v1", 286 | "kind": "Service", 287 | "metadata": { 288 | "name": "demok8s", 289 | "namespace": "dev" 290 | }, 291 | "spec": { 292 | "ports": [ 293 | { 294 | "name": "http", 295 | "port": 80, 296 | "protocol": "TCP", 297 | "targetPort": 80 298 | } 299 | ], 300 | "selector": { 301 | "lb": "demok8s" 302 | } 303 | } 304 | }, 305 | { 306 | "apiVersion": "extensions/v1beta1", 307 | "kind": "Ingress", 308 | "metadata": { 309 | "annotations": { 310 | "nginx.ingress.kubernetes.io/rewrite-target": "/", 311 | "traefik.ingress.kubernetes.io/rule-type": "PathPrefixStrip" 312 | }, 313 | "labels": { 314 | "app": "demok8s" 315 | }, 316 | "name": "demok8s", 317 | "namespace": "dev" 318 | }, 319 | "spec": { 320 | "rules": [ 321 | { 322 | "http": { 323 | "paths": [ 324 | { 325 | "backend": { 326 | "serviceName": "demok8s", 327 | "servicePort": "http" 328 | }, 329 | "path": "/dev/demok8s" 330 | } 331 | ] 332 | } 333 | } 334 | ] 335 | } 336 | } 337 | ], 338 | "moniker": { 339 | "app": "demok8s" 340 | }, 341 | "name": "Deploy Dev Service and Ingress", 342 | "namespaceOverride": "", 343 | "refId": "8", 344 | "requisiteStageRefIds": [], 345 | "skipExpressionEvaluation": false, 346 | "source": "text", 347 | "trafficManagement": { 348 | "enabled": false, 349 | "options": { 350 | "enableTraffic": false, 351 | "services": [] 352 | } 353 | }, 354 | "type": "deployManifest" 355 | }, 356 | { 357 | "account": "spinnaker", 358 | "cloudProvider": "kubernetes", 359 | "manifests": [ 360 | { 361 | "apiVersion": "v1", 362 | "kind": "Service", 363 | "metadata": { 364 | "name": "demok8s", 365 | "namespace": "test" 366 | }, 367 | "spec": { 368 | "ports": [ 369 | { 370 | "name": "http", 371 | "port": 80, 372 | "protocol": "TCP", 373 | "targetPort": 80 374 | } 375 | ], 376 | "selector": { 377 | "lb": "demok8s" 378 | } 379 | } 380 | }, 381 | { 382 | "apiVersion": "extensions/v1beta1", 383 | "kind": "Ingress", 384 | "metadata": { 385 | "annotations": { 386 | "nginx.ingress.kubernetes.io/rewrite-target": "/", 387 | "traefik.ingress.kubernetes.io/rule-type": "PathPrefixStrip" 388 | }, 389 | "labels": { 390 | "app": "demok8s" 391 | }, 392 | "name": "demok8s", 393 | "namespace": "test" 394 | }, 395 | "spec": { 396 | "rules": [ 397 | { 398 | "http": { 399 | "paths": [ 400 | { 401 | "backend": { 402 | "serviceName": "demok8s", 403 | "servicePort": "http" 404 | }, 405 | "path": "/test/demok8s" 406 | } 407 | ] 408 | } 409 | } 410 | ] 411 | } 412 | } 413 | ], 414 | "moniker": { 415 | "app": "demok8s" 416 | }, 417 | "name": "Deploy Test Service and Ingress", 418 | "refId": "9", 419 | "requisiteStageRefIds": [], 420 | "skipExpressionEvaluation": false, 421 | "source": "text", 422 | "trafficManagement": { 423 | "enabled": false, 424 | "options": { 425 | "enableTraffic": false, 426 | "services": [] 427 | } 428 | }, 429 | "type": "deployManifest" 430 | }, 431 | { 432 | "account": "spinnaker", 433 | "cloudProvider": "kubernetes", 434 | "manifests": [ 435 | { 436 | "apiVersion": "v1", 437 | "kind": "Service", 438 | "metadata": { 439 | "name": "demok8s", 440 | "namespace": "prod" 441 | }, 442 | "spec": { 443 | "ports": [ 444 | { 445 | "name": "http", 446 | "port": 80, 447 | "protocol": "TCP", 448 | "targetPort": 80 449 | } 450 | ], 451 | "selector": { 452 | "lb": "demok8s" 453 | } 454 | } 455 | }, 456 | { 457 | "apiVersion": "extensions/v1beta1", 458 | "kind": "Ingress", 459 | "metadata": { 460 | "annotations": { 461 | "nginx.ingress.kubernetes.io/rewrite-target": "/", 462 | "traefik.ingress.kubernetes.io/rule-type": "PathPrefixStrip" 463 | }, 464 | "labels": { 465 | "app": "demok8s" 466 | }, 467 | "name": "demok8s", 468 | "namespace": "prod" 469 | }, 470 | "spec": { 471 | "rules": [ 472 | { 473 | "http": { 474 | "paths": [ 475 | { 476 | "backend": { 477 | "serviceName": "demok8s", 478 | "servicePort": "http" 479 | }, 480 | "path": "/prod/demok8s" 481 | } 482 | ] 483 | } 484 | } 485 | ] 486 | } 487 | } 488 | ], 489 | "moniker": { 490 | "app": "demok8s" 491 | }, 492 | "name": "Deploy Prod Service and Ingress", 493 | "namespaceOverride": "", 494 | "refId": "10", 495 | "requisiteStageRefIds": [], 496 | "skipExpressionEvaluation": false, 497 | "source": "text", 498 | "trafficManagement": { 499 | "enabled": false, 500 | "options": { 501 | "enableTraffic": false, 502 | "services": [] 503 | } 504 | }, 505 | "type": "deployManifest" 506 | } 507 | ], 508 | "triggers": [] 509 | } -------------------------------------------------------------------------------- /guides/first-pipeline-jenkins.md: -------------------------------------------------------------------------------- 1 | # Deploy a Jenkins-built container to Kubernetes with Spinnaker (with ECR) 2 | 3 | ### This document is still in *draft* form 4 | 5 | In this codelab, we will perform the following: 6 | 7 | * Configure Spinnaker with a GitHub credential 8 | 9 | Then, we'll build out the process to do the following: 10 | * Jenkins build a Docker image 11 | * Jenkins push the Docker image to ECR (alternately, to Docker Hub) 12 | * Jenkins send a webhook to Spinnaker to trigger a Spinnaker pipeline, with a Docker image artifact 13 | * Spinnaker receive the webhook, pull a Kubernets manifest from GitHub, hydrate the Docker image, and deploy the manifest to a Kubernetes cluster 14 | 15 | We assume the following in this document: 16 | * You have the following set up and configured: 17 | * A Kubernetes cluster 18 | * A Jenkins instance 19 | * A Spinnaker instance that has access to your Kubernetes cluster 20 | * A GitHub account or a GitHub Enterprise instance with an account 21 | * Jenkins slaves are configured with the Docker daemon (to build Docker images) 22 | * Jenkins slaves have credentials to push to your Docker registry of choice 23 | * Your Kubernetes cluster is able to run images from your Docker registry 24 | 25 | ## Set up 26 | ### Configure Spinnaker with GitHub credentials 27 | 28 | (OSS documentation for this is here: https://www.spinnaker.io/setup/artifacts/github/) 29 | 30 | First, create a credential for Spinnaker to use to access GitHub: 31 | 32 | * In your GitHub, go to Settings (click on your user icon in the top right) > Developer Settings > Personal Access Tokens 33 | * Click "Generate new token" 34 | * Give your token a name, and give it the 'repo' access 35 | * Copy the token down 36 | 37 | Then, using Halyard, add the credential to Spinnaker as "GitHub artifact Account" (these all take place in Halyard): 38 | 39 | * In your Halyard, enable the artifacts feature: 40 | 41 | ```bash 42 | hal config features edit --artifacts true 43 | ``` 44 | 45 | * In your Halyard, enable the new artifact UI feature: 46 | 47 | ```bash 48 | hal config features edit --artifacts-rewrite true 49 | ``` 50 | 51 | * Enable the GitHub artifact account type: 52 | 53 | ```bash 54 | hal config artifact github enable 55 | ``` 56 | 57 | * Add the credential as a "GitHub Artifact Account": 58 | 59 | *You will be prompted for a token; enter your token at the prompt.* 60 | 61 | ```bash 62 | hal config artifact github account add my-github-credential \ 63 | --token 64 | ``` 65 | 66 | * Appl (Deploy) your changes: 67 | 68 | ```bash 69 | hal deploy apply 70 | ``` 71 | 72 | ## Create an ECR Repository 73 | 74 | * In the AWS console, go to Compute > ECR 75 | * Click "Create repository" 76 | * Give it a name and namespace (for example, hello-world/nginx) 77 | * You'll get a repository formatted like this: `111122223333.dkr.ecr.us-west-2.amazonaws.com/hello-world/nginx` 78 | 79 | ## Configure Jenkins to push to the repository 80 | 81 | ### Set up Cross-Account Access 82 | 83 | Assuming Jenkins is running in a different AWS account from your ECR repository, you'll need to set up cross-account access. 84 | 85 | * Get the AWS account ID for the AWS account where Jenkins is running (you can use the command `aws sts get-caller-identity` to see what account you're accesssing from; for example, an ARN of `arn:aws:sts::222233334444:assumed-role/ec2-role/i-00001111222233334` means you're in `222233334444`) 86 | * Log into to the AWS console account where your ECR repository exists, and go to Compute > ECR 87 | * Click on your repository 88 | * On the left side, click on "Permissions" 89 | * Edit the policy JSON to include this: 90 | 91 | ```json 92 | { 93 | "Version": "2008-10-17", 94 | "Statement": [ 95 | { 96 | "Sid": "AllowCrossAccountPush", 97 | "Effect": "Allow", 98 | "Principal": { 99 | "AWS": "arn:aws:iam::222233334444:root" 100 | }, 101 | "Action": [ 102 | "ecr:BatchCheckLayerAvailability", 103 | "ecr:CompleteLayerUpload", 104 | "ecr:GetDownloadUrlForLayer", 105 | "ecr:InitiateLayerUpload", 106 | "ecr:PutImage", 107 | "ecr:UploadLayerPart" 108 | ] 109 | } 110 | ] 111 | } 112 | ``` 113 | 114 | *This will allow entities in the `222233334444` AWS account to push to this repo* 115 | 116 | * Save your changes 117 | 118 | ## Set up ECR Repository Access 119 | 120 | **There are many ways to do this; this is an insecure, temporary way to do this (ideally you'd set up use an AWS helper function to dynamically generate Docker creds)** 121 | 122 | The machine where you are doing Docker builds will need permissions and credentials to push to your ECR repository. In this case, this will likely be the Jenkins slave where your Docker builds will be taking place. 123 | 124 | * If you don't have an IAM role attached to the EC2 instance, go to the instance in the EC2 console, add an EC2 IAM role (either use an existing role, or create a new role), and add the `AmazonEC2ContainerRegistryFullAccess` policy to the role. 125 | 126 | ## Build the Docker image in Jenkins 127 | 128 | *Something very similar to this could be achieved with the Docker plugin, or whatever other Docker build and push mechanism your organization uses. This is meant to be more illustrative of the process than efficient; you can tweak this significantly with better build triggers, pipelines, plugins, and so forth.* 129 | 130 | In your Jenkins instance, create a new item of type "Freestyle project". Set up a "Build" step of type "Shell" with something like this: 131 | 132 | ```bash 133 | # This is a basic 'hello world' index page for nginx 134 | tee index.html <<-'EOF' 135 | hello world 136 | EOF 137 | 138 | # This is a basic Dockerfile that starts with nginx, and adds our hello world page 139 | tee Dockerfile <<-'EOF' 140 | FROM nginx:latest 141 | COPY index.html /usr/share/nginx/html/index.html 142 | EOF 143 | 144 | TAG=$(date +%s) 145 | 146 | # This removes any AWS creds if they're present in the environment; remove this if you want to use the creds baked into Jenkins 147 | 148 | unset AWS_ACCESS_KEY_ID 149 | unset AWS_SECRET_ACCESS_KEY 150 | 151 | # Replace 111122223333 with the account ID where your ECR repo is, and the region with the region where your ECR repo is 152 | $(aws ecr get-login --no-include-email --region us-west-2 --registry-ids 111122223333) 153 | 154 | # Replace 111122223333 with the account ID where your ECR repo is, and the region with the region where your ECR repo is 155 | docker build . -t 111122223333.dkr.ecr.us-west-2.amazonaws.com/hello-world/nginx:${TAG} 156 | 157 | # Replace 111122223333 with the account ID where your ECR repo is, and the region with the region where your ECR repo is 158 | docker push 111122223333.dkr.ecr.us-west-2.amazonaws.com/hello-world/nginx:${TAG} 159 | ``` 160 | 161 | Build your Jenkins job, and you should see a new Docker image tag show up in your ECR repo. 162 | 163 | ## Deploy an initial (static) manifest from Spinnaker 164 | 165 | * In Spinnaker, create a new application, then a new pipeline 166 | * Add a stage "Deploy (Manifest)" 167 | * Select your Kubernetes cluster from the Account drop down 168 | * Select the "Override Namespace" checkbox, and select a namespace that Spinnaker is allowed to deploy to 169 | * In the manifest, put this (replace the image with the produced image and tag) 170 | 171 | ```yml 172 | apiVersion: apps/v1 173 | kind: Deployment 174 | metadata: 175 | name: hello-today 176 | spec: 177 | replicas: 3 178 | selector: 179 | matchLabels: 180 | app: hello-today 181 | template: 182 | metadata: 183 | labels: 184 | app: hello-today 185 | lb: hello-today 186 | spec: 187 | containers: 188 | - image: '111122223333.dkr.ecr.us-west-2.amazonaws.com/hello-world/nginx:1581376100' 189 | name: primary 190 | ports: 191 | - containerPort: 80 192 | ``` 193 | 194 | Go to your pipelines page, and trigger the pipeline. Verify that it deploys (this will validate that Kubernetes can run images from your ECR repo) 195 | 196 | This should do the following: 197 | * Start the Spinnaker pipeline 198 | * Deploy the manifest 199 | * Wait for the pods to be fully up 200 | 201 | ## Update the manifest to use a dynamic image, and add a trigger with a default tag 202 | 203 | * Go back to the pipeline configuration. 204 | * In the pipeline stage UI, click on "Configuration" on the left 205 | * Click "Add Trigger" 206 | * Select "Webhook" for the "Type" 207 | * Add "hello-world" to the "source". This will create a URL like https://your-spinnaker-url/api/v1/webhooks/webhook/hello-world - this is the webhook endpoint used to trigger the pipeline. Remember this URL. 208 | * Add a payload constraint with a "key" of "secret" and a "value" of "my-secret-value" 209 | * Click on "Artifact Constraints" > Define a new artifact 210 | * Enter these values: 211 | * Account: "custom-artifact" 212 | * Type: "docker/image" 213 | * Name: "hello-world/nginx" 214 | * Check the "Use default artifact" checkbox. Enter these values: 215 | * Account: "custom-artifact" 216 | * Type: "docker/image" 217 | * Name: "hello-world/nginx" 218 | * Reference: "111122223333.dkr.ecr.us-west-2.amazonaws.com/hello-world/nginx:1581376100" (replace with a valid tag) 219 | * The artifact will result in a "Display Name" (like "mean-eel-993") 220 | * Navigate back to the "Deploy (Manifest)" stage, and make these changes: 221 | * Change the image field in the manifest to be just "hello-world/nginx" 222 | * Add a "Required artifacts to bind" indicating the display name of your artifact 223 | 224 | Your full manifest should look like this: 225 | 226 | ```yaml 227 | apiVersion: apps/v1 228 | kind: Deployment 229 | metadata: 230 | name: hello-today 231 | spec: 232 | replicas: 3 233 | selector: 234 | matchLabels: 235 | app: hello-today 236 | template: 237 | metadata: 238 | labels: 239 | app: hello-today 240 | lb: hello-today 241 | spec: 242 | containers: 243 | - image: hello-world/nginx 244 | name: primary 245 | ports: 246 | - containerPort: 80 247 | ``` 248 | 249 | Trigger the pipeline, and ensure the Docker image in the hydrated manifest is the fully qualified manifest 250 | 251 | Here's what this is doing: 252 | * When you run the pipeline, it's looking an input artifact matching this pattern: 253 | ```json 254 | { 255 | "type": "docker/image", 256 | "name": "hello-world/nginx" 257 | } 258 | ``` 259 | 260 | * Since you are triggering the pipeline manually, it's not finding the input artifact, so it's using the default artifact: 261 | ```json 262 | { 263 | "type": "docker/image", 264 | "name": "hello-world/nginx", 265 | "reference": "111122223333.dkr.ecr.us-west-2.amazonaws.com/hello-world/nginx:1581376505" 266 | } 267 | ``` 268 | 269 | * Because your manifest is configured to "Bind" the artifact, it will look for images populated with "hello-world/nginx", and replacing them with the "found" reference (in this case, the reference from the default artifact) before the deployment. 270 | 271 | This should do the following: 272 | * Start the Spinnaker pipeline 273 | * Parse the default artifact 274 | * Replace the `hello-world/nginx` with the reference from your passed-in artifact 275 | * Deploy the hydrated manifest 276 | * Wait for the pods to be fully up 277 | 278 | ## Trigger the pipeline from a CLI 279 | 280 | Do another build, and get another Docker image. For example: `111122223333.dkr.ecr.us-west-2.amazonaws.com/hello-world/nginx:1581376505` (note the different tag). 281 | 282 | Then, using the URL from above, as well as the key/value pair, do this from a shell terminal: 283 | 284 | ```bash 285 | tee body.json <<-EOF 286 | { 287 | "secret": "my-secret-value", 288 | "artifacts": [ 289 | { 290 | "type": "docker/image", 291 | "name": "hello-world/nginx", 292 | "reference": "111122223333.dkr.ecr.us-west-2.amazonaws.com/hello-world/nginx:1581376505" 293 | } 294 | ] 295 | } 296 | EOF 297 | 298 | curl -k -X POST \ 299 | -H 'content-type:application/json' \ 300 | -d @body.json \ 301 | https://your-spinnaker-url/api/v1/webhooks/webhook/hello-world 302 | ``` 303 | 304 | Go to Spinnaker, and your pipeline should trigger with the new tag (check the deployed manifest) 305 | 306 | This should do the following: 307 | * Start the Spinnaker pipeline 308 | * Parse the artifact with your generated tag 309 | * Pull the Kubernetes manifest from GitHub (Enterprise) 310 | * Replace the `hello-world/nginx` with the reference from your passed-in artifact 311 | * Deploy the hydrated manifest 312 | * Wait for the pods to be fully up 313 | 314 | ## Trigger the pipeline from Jenkins 315 | 316 | Go back into Jenkins, and add the above curl command to the end of your shell command. Replace the tag with your dynamically generated tag, so it'll look something like this: 317 | 318 | ```bash 319 | tee body.json <<-EOF 320 | { 321 | "secret": "my-secret-value", 322 | "artifacts": [ 323 | { 324 | "type": "docker/image", 325 | "name": "hello-world/nginx", 326 | "reference": "111122223333.dkr.ecr.us-west-2.amazonaws.com/hello-world/nginx:${TAG}" 327 | } 328 | ] 329 | } 330 | EOF 331 | 332 | curl -k -X POST \ 333 | -H 'content-type:application/json' \ 334 | -d @body.json \ 335 | https://your-spinnaker-url/api/v1/webhooks/webhook/hello-world 336 | ``` 337 | 338 | Trigger your Jenkins build, and it should do the following: 339 | * Build a new Docker image 340 | * Push it to your ECR repo 341 | * Trigger the Spinnaker pipeline 342 | 343 | Then the Spinnaker pipeline will: 344 | * Start the Spinnaker pipeline 345 | * Parse the artifact with your generated tag 346 | * Replace the `hello-world/nginx` with the reference from your passed-in artifact 347 | * Deploy the hydrated manifest 348 | 349 | ## Put the Kubernetes Manifest in GitHub 350 | 351 | Go into Spinnaker to your pipeline, and grab the Kubernetes manifest 352 | 353 | Go into your GitHub repo (or create a repo), and create the manifest as a file somewhere in the repo (for example, at `/app/manifests/manifest.yml`). 354 | 355 | It should look something like this: 356 | 357 | ```yml 358 | apiVersion: apps/v1 359 | kind: Deployment 360 | metadata: 361 | name: hello-today 362 | spec: 363 | replicas: 3 364 | selector: 365 | matchLabels: 366 | app: hello-today 367 | template: 368 | metadata: 369 | labels: 370 | app: hello-today 371 | lb: hello-today 372 | spec: 373 | containers: 374 | - image: hello-world/nginx 375 | name: primary 376 | ports: 377 | - containerPort: 80 378 | ``` 379 | 380 | * In Spinnaker, go to your "Deploy (Manifest)" stage, and go down to the "Manifest" section. 381 | * Select "Artifact" for the "Manifest Source" 382 | * Select "Define New Artifact". Populate these fields: 383 | * Account: "my-github-credential" (or whatever you specified for the credential name) 384 | * Content URL: `https://api.github.com/repos/$ORG/$REPO/contents/$FILEPATH` (`https://github.mydomain.com/api/v3/repos/$ORG/$REPO/$FILEPATH` for GHE). Replace the org, repo, and filepath with relevant entries for your file. For example: 385 | * GitHub.com: `https://api.github.com/repos/baxterthehacker/public-repo/contents/path/to/file.yml`. 386 | * GHE: `https://github.mydomain.com/api/v3/repos/baxterthehacker/public-repo/contents/path/to/file.yml`) 387 | 388 | Save the pipeline, and re-trigger the Jenkins build. This should do the following: 389 | 390 | Trigger your Jenkins build, and it should do the following: 391 | * Build a new Docker image 392 | * Push it to your ECR repo 393 | * Trigger the Spinnaker pipeline 394 | 395 | Then the Spinnaker pipeline will: 396 | * Start the Spinnaker pipeline 397 | * Parse the artifact with your generated tag 398 | * Pull the Kubernetes manifest from GitHub (Enterprise) 399 | * Replace the `hello-world/nginx` with the reference from your passed-in artifact 400 | * Deploy the hydrated manifest 401 | * Wait for the pods to be fully up 402 | 403 | Verify your deployed image is the most recent tag. 404 | 405 | ## Remove the default artifact 406 | 407 | * Go into the configuration for your pipeline 408 | * Go to your artifact constraint, and click the pencil icon 409 | * Uncheck the default artifact checkbox 410 | 411 | Save the pipeline. 412 | 413 | Do a hard refresh of the page to verify the default artifact is removed (click on the pencil again). 414 | 415 | Save the pipeline, and re-trigger the Jenkins build. This should do the following: 416 | 417 | Trigger your Jenkins build, and it should do the following: 418 | * Build a new Docker image 419 | * Push it to your ECR repo 420 | * Trigger the Spinnaker pipeline 421 | 422 | Then the Spinnaker pipeline will: 423 | * Start the Spinnaker pipeline 424 | * Parse the artifact with your generated tag 425 | * Pull the Kubernetes manifest from GitHub (Enterprise) 426 | * Replace the `hello-world/nginx` with the reference from your passed-in artifact 427 | * Deploy the hydrated manifest 428 | * Wait for the pods to be fully up -------------------------------------------------------------------------------- /guides/setup-dev-environment.md: -------------------------------------------------------------------------------- 1 | # Setup Local Debugging for Spinnaker Services 2 | 3 | ## Minimum System requirements 4 | - Windows or Mac OS X 5 | - 16GB of Memory 6 | - 30GB of Available Storage 7 | 8 | This allows you to do something like this: 9 | 10 | * OSX/Windows workstation, with an Ubuntu VM running in multipass, with everything directly wired up. 11 | * Some services running locally in your workstation (via IntelliJ) 12 | * All other services running in Minnaker (on the VM) 13 | 14 | For example: 15 | * OSX/Windows using IP 192.168.64.1 and the VM using 192.168.64.6 16 | * Orca running on http://192.168.64.1:8083 17 | * All other services running on 192.168.64.6 (for example, Clouddriver will be on http://192.168.64.6:7002) 18 | 19 | # Install Instructions 20 | 21 | ## Mac OS X 22 | 23 | * Install [homebrew](https://brew.sh/) 24 | 25 | ```bash 26 | /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh)" 27 | ``` 28 | 29 | ## Windows or Mac OS X 30 | 31 | * Install a [JDK](https://adoptopenjdk.net/installation.html) 11.0.8 32 | 33 | * Mac OS X 34 | 35 | ```bash 36 | brew tap AdoptOpenJDK/openjdk 37 | brew cask install adoptopenjdk11 38 | ``` 39 | 40 | * Windows [instructions](https://www.oracle.com/java/technologies/javase-jdk11-downloads.html) 41 | 42 | 43 | * Install [Multipass](https://multipass.run/) 44 | 45 | * Mac instructions 46 | ```bash 47 | brew cask install multipass 48 | ``` 49 | * Windows [instructions](https://multipass.run/download/windows) 50 | 51 | * Install [IntelliJ Community Edition](https://www.jetbrains.com/idea/download/) 52 | 53 | * Mac instructions 54 | ```bash 55 | brew cask install intellij-idea-ce 56 | ``` 57 | * Windows [instructions](https://adoptopenjdk.net/installation.html#x64_win-jdk) 58 | 59 | * Install Yarn (installs Node.js if not installed). 60 | * Mac [instructions](https://classic.yarnpkg.com/en/docs/install#mac-stable) 61 | ```bash 62 | brew install yarn 63 | ``` 64 | * Windows [instructions](https://classic.yarnpkg.com/en/docs/install#windows-stable) 65 | 66 | * Install `kubectl`. 67 | * Mac instructions 68 | ```bash 69 | brew install kubectl 70 | ``` 71 | * Windows [instructions](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl-on-windows) 72 | 73 | 74 | # Getting Spinnaker Up and Running 75 | 76 | Open two terminals one will be for shell access into minnaker-vm the other will be for host machine. 77 | - Windows or Mac OS X terminal will be referred to as [host] 78 | - minnaker-vm terminal will be referred to as [minnaker-vm] 79 | 80 | ## Install Spinnaker in a Multipass VM 81 | 82 | 1. [minnaker-vm] Start a multipass vm **with 2 cores, 10GB of memory, 30GB of storage** 83 | 84 | ```bash 85 | multipass launch -c 2 -m 10G -d 30G --name minnaker-vm 86 | ``` 87 | 88 | 1. [minnaker-vm] Shell into your multipass vm 89 | 90 | ```bash 91 | multipass shell minnaker-vm 92 | ``` 93 | 94 | 1. [minnaker-vm] Download and install Minnaker (use open source, no-auth mode) 95 | 96 | ```bash 97 | curl -LO https://github.com/armory/minnaker/releases/latest/download/minnaker.tgz 98 | tar -xzvf minnaker.tgz 99 | ./minnaker/scripts/no_auth_install.sh -o 100 | ``` 101 | 102 | 1. [minnaker-vm] When it's done, you'll get the IP address of Minnaker. Remember this (or you can always just run `cat /etc/spinnaker/.hal/public_endpoint`) 103 | 104 | *(if you accidentally forget to use no auth or open source, you can run `./minnaker/scripts/utils/remove_auth.sh` and `./minnaker/scripts/utils/switch_to_oss.sh`)* 105 | 106 | ## Prepare Host machine to connect to the Minnaker-VM 107 | 108 | 1. [minnaker-vm] Run this script to ensure each Spinnaker service gets a K8s LoadBalancer and can be accessed from your host machine. 109 | 110 | ```bash 111 | ./minnaker/scripts/utils/expose_local.sh 112 | ``` 113 | 114 | 6. [minnaker-vm] Check on the status of spinnaker 115 | 116 | ``` 117 | kubectl get pods -n spinnaker 118 | ``` 119 | All pods need to show `1/1` for `READY`. 120 | 121 | 7. [host] You can now browse to spinnaker at https://192.168.64.6 122 | - Troubleshooting: 123 | - `Service Unavailable`: wait until spinnaker starts up, it can take a while to start up (download all docker images) the above step will show you if it is up and running. 124 | 125 | 8. [minnaker-vm] Expose the service you want to debug (example here is orca) 126 | ```bash 127 | ./minnaker/scripts/utils/external_service_setup.sh orca 128 | ``` 129 | 130 | You can also expose multiple services 131 | ```bash 132 | ./minnaker/scripts/utils/external_service_setup.sh orca echo 133 | ``` 134 | 135 | 9. [host] Setup your host config files 136 | - Create/edit the file `~/.spinnaker/spinnaker-local.yml`, and paste the previously copied output. 137 | ``` 138 | services: 139 | front50: 140 | baseUrl: http://192.168.64.6:8080 141 | redis: 142 | baseUrl: http://192.168.64.6:6379 143 | clouddriver: 144 | baseUrl: http://192.168.64.6:7002 145 | orca: 146 | host: 0.0.0.0 147 | echo: 148 | baseUrl: http://192.168.64.6:8089 149 | deck: 150 | baseUrl: http://192.168.64.6:9000 151 | rosco: 152 | baseUrl: http://192.168.64.6:8087 153 | gate: 154 | baseUrl: http://192.168.64.6:8084 155 | ``` 156 | - Create/edit the config file for the service you are going to debug (example orca). 157 | - [minnaker-vm] 158 | ```bash 159 | cat /etc/spinnaker/.hal/default/staging/orca.yml 160 | ``` 161 | - [host] create a `~/.spinnaker/orca.yml` file with the above files contents. 162 | 163 | 10. Choose a working directory, and go there. I usually use `~/git/spinnaker` 164 | 165 | ```bash 166 | mkdir -p ~/git/spinnaker 167 | cd ~/git/spinnaker 168 | ``` 169 | 170 | 11. Clone the service you want 171 | 172 | ```bash 173 | git clone https://github.com/spinnaker/orca.git 174 | ``` 175 | 176 | _or, if you have a Git SSH key set up_ 177 | 178 | ```bash 179 | git clone git@github.com:spinnaker/orca.git 180 | ``` 181 | 182 | 12. Change the branch 183 | 184 | ```bash 185 | cd orca 186 | git branch -a 187 | ``` 188 | 189 | You'll see a list of branches (like `remotes/origin/release-1.22.x`). The last bit (after the last slash) is the branch name. Check out that branch. 190 | 191 | ```bash 192 | git checkout release-1.22.x 193 | ``` 194 | 195 | 13. Open IntelliJ 196 | 197 | 14. Open your project 198 | 199 | * If you don't have a project open, you'll see a "Welcome to IntellJ IDEA". 200 | 201 | 1. Click "Open or Import" 202 | 203 | 2. Navigate to your directory (e.g., `~/git/spinnaker/orca`) 204 | 205 | 3. Click on `build.gradle` and click "Open" 206 | 207 | 4. Select "Open as Project" 208 | 209 | * If you already have one or more projects open, do the following: 210 | 211 | 1. Use the menu "File" > "Open" 212 | 213 | 2. Navigate to your directory (e.g., `~/git/spinnaker/orca`) 214 | 215 | 3. Click on `build.gradle` and click "Open" 216 | 217 | 4. Select "Open as Project" 218 | 219 | 15. Wait for the thing to do the thing. It's gotta load the stuff. 220 | 221 | 16. Through the next few steps, if you hit an "Unable to find Main" or fields are grayed out, reimport the project: 222 | 223 | 1. View > Tool Windows > Gradle 224 | 225 | 2. In the Gradle window, right click "Orca" and then click "Reimport Gradle Project" 226 | 227 | 17. In the top right corner of the project window, there's a "Add Configuration" button. Click it. 228 | 229 | 18. Click the little '+' sign in the top left corner, and select "Application" 230 | 231 | 19. Give it a name. Like "Main" or "Run Orca" 232 | 233 | 20. Click the three dots next to "Main Class". Either wait for it to load and select "Main (com.netflix.spinnaker.orca) or click on "Project" and navigate to `orca > orca-web > src > main > groovy > com.netflix.spinnaker > orca > Main` 234 | 235 | 21. In the dropdown for "Use classpath of module", select "orca-web_main" 236 | 237 | 22. Click "Apply" and then "OK" 238 | 239 | 23. To build and run the thing, click the little green triangle next to your configuration (top right corner, kinda) 240 | 241 | Now magic happens. 242 | 243 | ## Some Cleanup Commands for later 244 | 245 | ### How to reset your minnaker-vm 246 | 247 | [minnaker-vm] Run the following to no longer debug from host 248 | 249 | ```bash 250 | ./minnaker/scripts/utils/external_service_setup.sh 251 | ``` 252 | 253 | ### How to stop spinnaker 254 | 255 | [host] Run the following to stop the minnaker-vm (spinnaker) 256 | 257 | ```bash 258 | multipass stop minnaker-vm 259 | ``` 260 | 261 | ## [Optional] Setup kubectl on host 262 | 263 | 1. [minnaker-vm] Get your kubernetes config file 264 | 265 | ```bash 266 | kubectl config view --raw 267 | ``` 268 | 269 | Example Output: 270 | ``` 271 | apiVersion: v1 272 | clusters: 273 | - cluster: 274 | certificate-authority-data: YOUR_CERT_HERE 275 | server: https://127.0.0.1:6443 276 | name: default 277 | contexts: 278 | - context: 279 | cluster: default 280 | namespace: spinnaker 281 | user: default 282 | name: default 283 | current-context: default 284 | kind: Config 285 | preferences: {} 286 | users: 287 | - name: default 288 | user: 289 | password: YOUR_PASSWORD_HERE 290 | username: admin 291 | ``` 292 | 293 | 1. [host] Save the command output from above command `kubectl config view --raw` to `~/.kube/minnaker` on host machine 294 | 295 | 1. [minnaker-vm] To get the IP of minnaker-vm 296 | 297 | ```bash 298 | cat /etc/spinnaker/.hal/public_endpoint 299 | ``` 300 | 301 | 1. [host] Edit `~/.kube/minnaker` to have the IP address of the minnaker-vm 302 | New File: 303 | ``` 304 | apiVersion: v1 305 | clusters: 306 | - cluster: 307 | certificate-authority-data: YOUR_CERT_HERE 308 | server: https://192.168.64.6:6443 309 | name: default 310 | contexts: 311 | - context: 312 | cluster: default 313 | namespace: spinnaker 314 | user: default 315 | name: default 316 | current-context: default 317 | kind: Config 318 | preferences: {} 319 | users: 320 | - name: default 321 | user: 322 | password: YOUR_PASSWORD_HERE 323 | username: admin 324 | ``` 325 | 326 | 1. [host] Setup `kubectl` from HOST to check on the deploy 327 | 328 | ``` 329 | export KUBECONFIG=~/.kube/minnaker 330 | kubectl get pods -n spinnaker 331 | 332 | ``` 333 | or always specify `--kubeconfig ~/.kube/minnaker` 334 | ``` 335 | kubectl --kubeconfig ~/.kube/minnaker get pods -n spinnaker 336 | ``` 337 | 338 | 2. [host] Now you can run local kubectl command 339 | ```bash 340 | kubectl get pods -n spinnaker 341 | ``` 342 | 343 | ## Start doing plugin-ey things 344 | 345 | Follow the "debugging" section here: https://github.com/spinnaker-plugin-examples/pf4jStagePlugin 346 | 347 | notes: 348 | * Create the `plugins` directory in the git repo (e.g., `~/git/spinnaker/orca/plugins`) and put the `.plugin-ref` in there 349 | * If you don't see the gradle tab, you can get to it with View > Tool Windows > Gradle 350 | 351 | ## Build and test the randomWait stage 352 | 353 | This assumes you have a Github account, and are logged in. 354 | 355 | 1. You *probably* want to work on a fork. Go to github.com/spinnaker-plugin-examples/pf4jStagePlugin 356 | 357 | 1. In the top right corner, click "Fork" and choose your username to create a fork. For example, mine is `justinrlee` so I end up with github.com/justinrlee/pf4jStagePlugin 358 | 359 | 1. On your workstation, choose a working directory. For example, `~/git/justinrlee` 360 | 361 | ```bash 362 | mkdir -p ~/git/justinrlee 363 | cd ~/git/justinrlee 364 | ``` 365 | 366 | 1. Clone the repo 367 | 368 | ```bash 369 | git clone https://github.com/justinrlee/pf4jStagePlugin.git 370 | ``` 371 | 372 | _or, if you have a Git SSH key set up_ 373 | 374 | ```bash 375 | git clone git@github.com:justinrlee/pf4jStagePlugin.git 376 | ``` 377 | 378 | 1. Check out a tag. 379 | 380 | If you are using Spinnaker 1.19.x, you probably need a 1.0.x tag (1.0.x is compatible 1.19, 1.1.x is compatible with 1.20) 381 | 382 | List available tags: 383 | 384 | ```bash 385 | cd pf4jStagePlugin 386 | git tag -l 387 | ``` 388 | 389 | Check out the tag you want: 390 | 391 | ```bash 392 | git checkout v1.0.17 393 | ``` 394 | 395 | Create a branch off of it (optional, but good if you're gonna be making changes). This creates a branch called custom-stage 396 | 397 | ```bash 398 | git switch -c custom-stage 399 | ``` 400 | 401 | 1. Build the thing from the CLI 402 | 403 | ```bash 404 | ./gradlew releaseBundle 405 | ``` 406 | 407 | This will generate an orca .plugin-ref file (`random-wait-orca/build/orca.plugin-ref`). 408 | 409 | 1. Copy the `orca.plugin-ref` file to the `plugins` directory in your `orca` repo. 410 | 411 | Create the destination directory - this will depend on where you cloned the orca repo 412 | 413 | ```bash 414 | mkdir -p ~/git/spinnaker/orca/plugins 415 | ``` 416 | 417 | Copy the file 418 | 419 | ```bash 420 | cp random-wait-orca/build/orca.plugin-ref ~/git/spinnaker/orca/plugins/ 421 | ``` 422 | 423 | 1. Create the orca-local.yml file in `~/.spinnaker/` 424 | 425 | This tells Spinnaker to enable and use the plugin 426 | 427 | Create this file at `~/.spinnaker/orca-local.yml`: 428 | 429 | ```bash 430 | # ~/.spinnaker/orca-local.yml 431 | spinnaker: 432 | extensibility: 433 | plugins: 434 | Armory.RandomWaitPlugin: 435 | enabled: true 436 | version: 1.0.17 437 | extensions: 438 | armory.randomWaitStage: 439 | enabled: true 440 | config: 441 | defaultMaxWaitTime: 60 442 | ``` 443 | 444 | 1. In IntelliJ (where you have the Orca project open), Link the plugin project to your current project 445 | 446 | 1. Open the Gradle window if it's not already open (View > Tool Windows > Gradle) 447 | 448 | 1. In the Gradle window, click the little '+' sign 449 | 450 | 1. Navigate to your plugin directory (e.g., `/git/justinrlee/pf4jStagePlugin`), and select `build.gradle` and click Open 451 | 452 | 1. In the Gradle window, right click "orca" and click "Reimport Gralde Project" 453 | 454 | 1. In IntelliJ, create a new build configuration 455 | 456 | 1. In the top right, next to the little hammer icon, there's a dropdown. Click "Edit Configurations..." 457 | 458 | 1. Click the '+' sign in the top left, and select "Application" 459 | 460 | 1. Call it something cool. Like "Build and Test Plugin" 461 | 462 | 1. Select the main class (Either wait for it to load and select "Main (com.netflix.spinnaker.orca) or click on "Project" and navigate to `orca > orca-web > src > main > groovy > com.netflix.spinnaker > orca > Main`) 463 | 464 | 1. In the dropdown for "Use classpath of module", select "orca-web_main" 465 | 466 | 1. Put this in the "VM Options" field put this: '`-Dpf4j.mode=development`' 467 | 468 | 1. In the "Before launch" section of the window, click the '+' sign and add "Build Project" 469 | 470 | 1. Select "Build" in the "Before launch" section and click the '-' sign to remove it (you don't need both "Build" and "Build Project") 471 | 472 | 1. Click "Apply" and then "OK" 473 | 474 | 1. Run your stuff. 475 | 476 | 1. If the unmodified Orca is still running, click the little stop icon (red square in top right corner) 477 | 478 | 1. Select your new build configuration in the dropdown 479 | 480 | 1. Click the runicon (little green triangle) 481 | 482 | 1. In the console output you should see something that looks like this: 483 | 484 | ``` 485 | 2020-04-30 10:17:41.242 INFO 53937 --- [ main] com.netflix.spinnaker.orca.Main : [] Starting Main on justin-mbp-16.lan with PID 53937 (/Users/justin/dev/spinnaker/orca/orca-web/build/classes/groovy/main started by justin in /Users/justin/dev/spinnaker/orca) 486 | 2020-04-30 10:17:41.245 INFO 53937 --- [ main] com.netflix.spinnaker.orca.Main : [] The following profiles are active: test,local 487 | 488 | ... 489 | 490 | 2020-04-30 10:17:44.276 WARN 53937 --- [ main] c.n.s.config.PluginsAutoConfiguration : [] No remote repositories defined, will fallback to looking for a 'repositories.json' file next to the application executable 491 | 2020-04-30 10:17:44.410 INFO 53937 --- [ main] org.pf4j.AbstractPluginManager : [] Plugin 'Armory.RandomWaitPlugin@unspecified' resolved 492 | 2020-04-30 10:17:44.411 INFO 53937 --- [ main] org.pf4j.AbstractPluginManager : [] Start plugin 'Armory.RandomWaitPlugin@unspecified' 493 | 2020-04-30 10:17:44.413 INFO 53937 --- [ main] i.a.p.s.wait.random.RandomWaitPlugin : [] RandomWaitPlugin.start() 494 | ``` 495 | 496 | 1. If you see "no class Main.main" or something, in the Gradle window, try right click on "orca" and reimport Gradle project and try again. 497 | 498 | 1. Test your stuff 499 | 500 | 1. Go into the Spinnaker UI (should be http://your-VM-ip:9000) 501 | 502 | 1. Go to applications > spin > pipelines 503 | 504 | 1. Create a new pipeline 505 | 506 | 1. Add stage 507 | 508 | 1. Edit stage as JSON (bottom right) 509 | 510 | 1. Paste this in there: 511 | 512 | ```json 513 | { 514 | "maxWaitTime": 15, 515 | "name": "Test RandomWait", 516 | "type": "randomWait" 517 | } 518 | ``` 519 | 520 | 1. Update stage 521 | 522 | 1. Save changes 523 | 524 | 1. Click back to pipelines (pipelines tab at top) 525 | 526 | Magic. Maybe. Maybe not. 527 | -------------------------------------------------------------------------------- /readme.md: -------------------------------------------------------------------------------- 1 | # Spinnaker All-In-One (Minnaker) Quick Start 2 | 3 | Minnaker is a simple way to install Spinnaker inside a VM. 4 | 5 | ## Background 6 | 7 | Minnaker performs the following actions when run on a single Linux instance: 8 | 9 | * Installs [k3s](https://k3s.io/) with Traefik. 10 | * Installs minio in k3s with a local volume. 11 | * Installs mysql in k3s. 12 | * Installs redis in k3s. 13 | * Installs **[Spinnaker Operator](https://github.com/armory/spinnaker-operator)**. 14 | * Clones the "minnaker" branch in https://github.com/armory/spinnaker-kustomize-patches for the purposes of configuring Spinnaker. 15 | * Installs and configures **[Spinnaker](https://github.com/spinnaker)** or **[Armory](https://armory.io)** using the **Spinnaker Operator**. 16 | * Exposes Spinnaker using an Ingress. NOTE: If you're using an AWS EC2 instance, make sure you add port 443 to the security group. 17 | * Minnaker uses local authentication. The username is `admin` and the password is randomly generated when you install Minnaker. Find more details about getting the password in [Accessing Spinnaker](#accessing-spinnaker). 18 | * For the full list of customizations and configurations - please check out the [kustomization-minnaker.yml] (https://github.com/armory/spinnaker-kustomize-patches/blob/minnaker/recipes/kustomization-minnaker.yml) file. 19 | 20 | ## Requirements 21 | 22 | To use Minnaker, make sure your Linux instance meets the following requirements: 23 | 24 | * Linux distribution running in a VM or bare metal 25 | * Ubuntu 18.04 or Debian 10 (VM or bare metal) 26 | * 2 vCPUs (recommend 4) 27 | * 8GiB of RAM (recommend 16) 28 | * 30GiB of HDD (recommend 40+) 29 | * NAT or Bridged networking with access to the internet 30 | * Install `curl`, `git`, and `tar` (if they're not already installed): 31 | * `sudo apt-get install curl git tar` 32 | * Port `443` on your VM needs to be accessible from your workstation / browser. By default, Minnaker installs Spinnaker and configures it to listen on port `443`, using paths `/` and `/api/v1`(for the UI and API). 33 | * OSX 34 | * Docker Desktop local Kubernetes cluster enabled 35 | * At least 6 GiB of memory allocated to Docker Desktop 36 | 37 | * On Ubuntu, the Minnaker installer will install K3s for you (a minimal installation of Kubernetes), so you do not have to pre-install Docker or Kubernetes. 38 | 39 | ## Changelog 40 | 41 | * 2/XX/2021 - Major update - install.sh has been replaced to use the spinnaker operator as the default installation method. Todo: Many of the convience scripts will also need to be updated to use the operator as well. If you would still like to use Halyard - please reference [Release 0.0.23](https://github.com/armory/minnaker/releases/tag/0.0.22) 42 | * operator_install.sh replaces install.sh 43 | * removing operator_install.sh 44 | * ToDo: Clean up all other scripts to remove dependency on halyard. 45 | * see notes below on currently supported scripts 46 | 47 | --- 48 | 49 | ## Installation 50 | 51 | 1. Login (SSH) to your VM or bare metal box. 52 | 2. Download the minnaker tarball and untar: 53 | 54 | ```bash 55 | curl -L https://github.com/armory/minnaker/archive/v0.1.3.tar.gz | tar -zxv 56 | ``` 57 | 58 | 3. Change into the directory: 59 | 60 | ```bash 61 | cd minnaker-0.1.* 62 | ``` 63 | 64 | 4. Execute the install script. Note the following options before running the script: 65 | * Add the `-o` flag if you want to install open source Spinnaker. 66 | * By default, the script installs Armory Spinnaker and uses your public IP address (determined by `curl`ing `ifconfig.co`) as the endpoint for Spinnaker. 67 | * For bare metal or a local VM, specify the IP address for your server with `-P` flag. `-P` is the 'Public Endpoint' and must be an address or DNS name you will use to access Spinnaker (an IP address reachable by your end users). 68 | 69 | ```bash 70 | ./scripts/install.sh 71 | ``` 72 | 73 | For example, the following command installs OSS Spinnaker on a VM with the IP address of `192.168.10.1`: 74 | 75 | ```bash 76 | export PRIVATE_IP=192.168.10.1 77 | ./scripts/install.sh -o -P $PRIVATE_IP 78 | ``` 79 | 80 | Installation can take between 5-10 minutes to complete depending on VM size. 81 | 82 | 5. Once Minnaker is up and running, you can make changes to its configuration using `kustomize` and the `spinnaker-operator` under the folder `~/minnaker-1.0.1/spinsvc`. For example, to change the version of Spinnaker that is installed, you can do this: 83 | 84 | * Using your favorite editor, edit the file: `~/minnaker-1.0.1/spinsvc/core_config/patch-version.yml` 85 | * Update line 8 to the version you desire. e.g. `version: 2.24.0` 86 | * Then either run `cd ~/minnaker-1.0.1/spinsvc && ./deploy.sh` or `kubectl apply -k ~/minnaker-1.0.1/spinsvc` 87 | * To find the latest versions available: 88 | * [Spinnaker](https://spinnaker.io/community/releases/versions/#latest-stable) 89 | * [Armory](https://docs.armory.io/docs/release-notes/rn-armory-spinnaker/) 90 | * *By default, Minnaker will install the latest GA version of Spinnaker or Armory available.* 91 | 92 | ## Accessing Spinnaker 93 | 94 | 1. A helper script called `spin_endpoint` was created during the installation process that prints out the URL associated with your spinnaker instance as well as the credentials (as necessary). 95 | 96 | ```bash 97 | spin_endpoint 98 | ``` 99 | 100 | outputs: 101 | ```bash 102 | https://192.168.64.3 103 | username: 'admin' 104 | password: 'xxxxx' 105 | ``` 106 | 107 | 2. In your browser, navigate to the address (https://192.168.64.3/) for Spinnaker from step 1. This is Deck, the Spinnaker UI. 108 | 109 | If you installed Minnaker on a local VM, you must access it from your local machine. If you deployed Minnaker in the cloud, such as an EC2 instance, you can access Spinnaker from any machine that has access to that 'Public IP'. 110 | 111 | 3. Log in to Deck with the following credentials: 112 | 113 | Username: `admin` 114 | 115 | Password: 116 | 117 | ## Changing Your Spinnaker Configuration 118 | 119 | 1. SSH into the machine where you have installed Spinnaker 120 | 2. Modify the contents of `~/spinnaker/spinsvc/kustomization.yml` and the associated patch files. 121 | 122 | ** PRO TIP: Use [VS Code - Remote SSH extension](https://code.visualstudio.com/docs/remote/ssh) to interact with your minnaker instance, and manage and edit multiple files ** 123 | 124 | See [Armory's Spinnaker Operator] (https://docs.armory.io/docs/installation/operator/). 125 | 126 | By default, the install script clones [Armory's Spinnaker Kustomize Patches repo (branch: minnaker)](https://github.com/armory/spinnaker-kustomize-patches/tree/minnaker). This branch has been pre-configured with many features to make learning Spinnaker easy. 127 | 128 | [Armory Operator Reference](https://docs.armory.io/docs/installation/operator-reference/) 129 | 130 | 4. When finished save your changes, and run `deploy.sh` located under `~/spinnaker/spinsvc`. 131 | 132 | ## Next Steps 133 | 134 | After you finish your installation of Minnaker, go through our [AWS QuickStart](https://docs.armory.io/spinnaker/Armory-Spinnaker-Quickstart-1/) to learn how to deploy applications to AWS with Spinnaker. 135 | 136 | Alternatively, take a look at the available Minnaker [guides](/guides/). 137 | 138 | To learn more about the Spinnaker Operator check out the docs here: https://docs.armory.io/docs/installation/operator/ 139 | 140 | Also check out the [`spinnaker-kustomize-patches`](https://github.com/armory/spinnaker-kustomize-patches#kustomize-patches-for-armory) repo 141 | 142 | ## Details 143 | 144 | * If you shut down and restart the instance and it gets different IP addresses, you'll have to update Spinnaker with the new IP address(es): 145 | * Run `refresh_endpoint.sh` and this will try to detect your new IP address and update the spinnaker configuration to your new IP address. 146 | 147 | * Certificate support isn't yet documented. There are several ways to achieve this: 148 | * Using actual cert files: create certs that Traefik can use in the ingress definition(s) 149 | * Using ACM or equivalent: put a certificate in front of the instance and change the overrides 150 | * Either way, you *must* use certificates that your browser will trust that match your DNS name (your browser may not prompt to trust the untrusted API certificate) 151 | 152 | * If you need to get the password again, you can execute the command `spin_endpoint` (this was added by the install script under `/usr/local/bin`) 153 | 154 | ## Troubleshooting 155 | 156 | Under the hood, Minnaker just wraps Spinnaker Operator, so it still runs all the components of Spinnaker as Kubernetes pods in the `spinnaker` namespace. You can use standard Kubernetes troubleshooting steps to troubleshoot Spinnaker components. 157 | 158 | For example, to see all the components of Minnaker: 159 | 160 | ```bash 161 | $ kubectl -n spinnaker get all -o wide 162 | NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES 163 | pod/minio-0 1/1 Running 0 2d11h 10.42.0.11 ip-172-31-19-10 164 | pod/mariadb-0 1/1 Running 0 2d11h 10.42.0.12 ip-172-31-19-10 165 | pod/spin-redis-57966d86df-qfn9m 1/1 Running 0 2d11h 10.42.0.16 ip-172-31-19-10 166 | pod/spin-deck-778577cb65-7m6mw 1/1 Running 0 2d11h 10.42.0.13 ip-172-31-19-10 167 | pod/spin-gate-75c99f6b9d-fcgth 1/1 Running 0 2d11h 10.42.0.14 ip-172-31-19-10 168 | pod/spin-rosco-86b4b4d6b5-h4vgf 1/1 Running 0 2d11h 10.42.0.20 ip-172-31-19-10 169 | pod/spin-orca-84dd94c7f9-ch2t5 1/1 Running 0 2d11h 10.42.0.18 ip-172-31-19-10 170 | pod/spin-clouddriver-564d98585-p9m76 1/1 Running 0 2d11h 10.42.0.17 ip-172-31-19-10 171 | pod/spin-front50-955856785-tr8pw 1/1 Running 0 2d11h 10.42.0.19 ip-172-31-19-10 172 | pod/spin-echo-5b5dc87b4c-ldv97 1/1 Running 0 2d11h 10.42.0.15 ip-172-31-19-10 173 | 174 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR 175 | service/mariadb ClusterIP 10.43.69.47 3306/TCP 2d11h app=mariadb 176 | service/minio ClusterIP 10.43.44.26 9000/TCP 2d11h app=minio 177 | service/spin-deck ClusterIP 10.43.68.156 9000/TCP 2d11h app=spin,cluster=spin-deck 178 | service/spin-gate ClusterIP 10.43.230.74 8084/TCP 2d11h app=spin,cluster=spin-gate 179 | service/spin-redis ClusterIP 10.43.102.9 6379/TCP 2d11h app=spin,cluster=spin-redis 180 | service/spin-echo ClusterIP 10.43.147.178 8089/TCP 2d11h app=spin,cluster=spin-echo 181 | service/spin-orca ClusterIP 10.43.27.1 8083/TCP 2d11h app=spin,cluster=spin-orca 182 | service/spin-clouddriver ClusterIP 10.43.181.214 7002/TCP 2d11h app=spin,cluster=spin-clouddriver 183 | service/spin-rosco ClusterIP 10.43.187.43 8087/TCP 2d11h app=spin,cluster=spin-rosco 184 | service/spin-front50 ClusterIP 10.43.121.22 8080/TCP 2d11h app=spin,cluster=spin-front50 185 | 186 | NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR 187 | deployment.apps/spin-redis 1/1 1 1 2d11h redis gcr.io/kubernetes-spinnaker/redis-cluster:v2 app=spin,cluster=spin-redis 188 | deployment.apps/spin-deck 1/1 1 1 2d11h deck docker.io/armory/deck:2.14.0-5f306f6-df9097d-rc6 app=spin,cluster=spin-deck 189 | deployment.apps/spin-gate 1/1 1 1 2d11h gate docker.io/armory/gate:1.14.0-42ccb4f-a2428e6-rc5 app=spin,cluster=spin-gate 190 | deployment.apps/spin-rosco 1/1 1 1 2d11h rosco docker.io/armory/rosco:0.16.0-7c38ed6-508e253-rc5 app=spin,cluster=spin-rosco 191 | deployment.apps/spin-orca 1/1 1 1 2d11h orca docker.io/armory/orca:2.12.0-67f03ef-c3b6f15-rc8 app=spin,cluster=spin-orca 192 | deployment.apps/spin-clouddriver 1/1 1 1 2d11h clouddriver docker.io/armory/clouddriver:6.5.1-f969aaf-2f123de-rc6 app=spin,cluster=spin-clouddriver 193 | deployment.apps/spin-front50 1/1 1 1 2d11h front50 docker.io/armory/front50:0.21.0-cca684d-4e0f6fc-rc5 app=spin,cluster=spin-front50 194 | deployment.apps/spin-echo 1/1 1 1 2d11h echo docker.io/armory/echo:2.10.0-48991a0-e3df630-rc6 app=spin,cluster=spin-echo 195 | 196 | NAME DESIRED CURRENT READY AGE CONTAINERS IMAGES SELECTOR 197 | replicaset.apps/spin-redis-57966d86df 1 1 1 2d11h redis gcr.io/kubernetes-spinnaker/redis-cluster:v2 app=spin,cluster=spin-redis,pod-template-hash=57966d86df 198 | replicaset.apps/spin-deck-778577cb65 1 1 1 2d11h deck docker.io/armory/deck:2.14.0-5f306f6-df9097d-rc6 app=spin,cluster=spin-deck,pod-template-hash=778577cb65 199 | replicaset.apps/spin-gate-75c99f6b9d 1 1 1 2d11h gate docker.io/armory/gate:1.14.0-42ccb4f-a2428e6-rc5 app=spin,cluster=spin-gate,pod-template-hash=75c99f6b9d 200 | replicaset.apps/spin-rosco-86b4b4d6b5 1 1 1 2d11h rosco docker.io/armory/rosco:0.16.0-7c38ed6-508e253-rc5 app=spin,cluster=spin-rosco,pod-template-hash=86b4b4d6b5 201 | replicaset.apps/spin-orca-84dd94c7f9 1 1 1 2d11h orca docker.io/armory/orca:2.12.0-67f03ef-c3b6f15-rc8 app=spin,cluster=spin-orca,pod-template-hash=84dd94c7f9 202 | replicaset.apps/spin-clouddriver-564d98585 1 1 1 2d11h clouddriver docker.io/armory/clouddriver:6.5.1-f969aaf-2f123de-rc6 app=spin,cluster=spin-clouddriver,pod-template-hash=564d98585 203 | replicaset.apps/spin-front50-955856785 1 1 1 2d11h front50 docker.io/armory/front50:0.21.0-cca684d-4e0f6fc-rc5 app=spin,cluster=spin-front50,pod-template-hash=955856785 204 | replicaset.apps/spin-echo-5b5dc87b4c 1 1 1 2d11h echo docker.io/armory/echo:2.10.0-48991a0-e3df630-rc6 app=spin,cluster=spin-echo,pod-template-hash=5b5dc87b4c 205 | 206 | NAME READY AGE CONTAINERS IMAGES 207 | statefulset.apps/minio 1/1 2d11h minio minio/minio 208 | statefulset.apps/mariadb 1/1 2d11h mariadb mariadb:10.4.12-bionic 209 | ``` 210 | 211 | To list all of the pods: 212 | 213 | ```bash 214 | $ kubectl -n spinnaker get pods 215 | NAME READY STATUS RESTARTS AGE 216 | minio-0 1/1 Running 0 2d11h 217 | mariadb-0 1/1 Running 0 2d11h 218 | spin-redis-57966d86df-qfn9m 1/1 Running 0 2d11h 219 | spin-deck-778577cb65-7m6mw 1/1 Running 0 2d11h 220 | spin-gate-75c99f6b9d-fcgth 1/1 Running 0 2d11h 221 | spin-rosco-86b4b4d6b5-h4vgf 1/1 Running 0 2d11h 222 | spin-orca-84dd94c7f9-ch2t5 1/1 Running 0 2d11h 223 | spin-clouddriver-564d98585-p9m76 1/1 Running 0 2d11h 224 | spin-front50-955856785-tr8pw 1/1 Running 0 2d11h 225 | spin-echo-5b5dc87b4c-ldv97 1/1 Running 0 2d11h 226 | ``` 227 | 228 | To see information about a specific pod: 229 | 230 | ```bash 231 | $ kubectl -n spinnaker describe pod spin-gate-75c99f6b9d-fcgth 232 | Name: spin-gate-75c99f6b9d-fcgth 233 | Namespace: spinnaker 234 | Priority: 0 235 | Node: ip-172-31-19-10/172.31.19.10 236 | Start Time: Tue, 18 Feb 2020 16:49:51 +0000 237 | Labels: app=spin 238 | app.kubernetes.io/managed-by=halyard 239 | app.kubernetes.io/name=gate 240 | app.kubernetes.io/part-of=spinnaker 241 | app.kubernetes.io/version=2.18.0 242 | cluster=spin-gate 243 | pod-template-hash=75c99f6b9d 244 | Annotations: 245 | Status: Running 246 | IP: 10.42.0.14 247 | IPs: 248 | IP: 10.42.0.14 249 | Controlled By: ReplicaSet/spin-gate-75c99f6b9d 250 | Containers: 251 | gate: 252 | Container ID: containerd://86aeeaa76477b83a36466f9267c3319caca7ea410928a9d5206d1e1e893cb850 253 | Image: docker.io/armory/gate:1.14.0-42ccb4f-a2428e6-rc5 254 | Image ID: docker.io/armory/gate@sha256:29fe06df04a21cb00a0cd94af95db8c441b42078b94648af07a46a98264057aa 255 | Port: 8084/TCP 256 | Host Port: 0/TCP 257 | State: Running 258 | Started: Tue, 18 Feb 2020 16:50:29 +0000 259 | Ready: True 260 | Restart Count: 0 261 | Readiness: exec [wget --no-check-certificate --spider -q http://localhost:8084/api/v1/health] delay=0s timeout=1s period=10s #success=1 #failure=3 262 | Environment: 263 | SPRING_PROFILES_ACTIVE: local 264 | Mounts: 265 | /opt/spinnaker/config from spin-gate-files-1546480033 (rw) 266 | /var/run/secrets/kubernetes.io/serviceaccount from default-token-tj4cz (ro) 267 | Conditions: 268 | Type Status 269 | Initialized True 270 | Ready True 271 | ContainersReady True 272 | PodScheduled True 273 | Volumes: 274 | spin-gate-files-1546480033: 275 | Type: Secret (a volume populated by a Secret) 276 | SecretName: spin-gate-files-1546480033 277 | Optional: false 278 | default-token-tj4cz: 279 | Type: Secret (a volume populated by a Secret) 280 | SecretName: default-token-tj4cz 281 | Optional: false 282 | QoS Class: BestEffort 283 | Node-Selectors: 284 | Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s 285 | node.kubernetes.io/unreachable:NoExecute for 300s 286 | Events: 287 | ``` 288 | 289 | And to see the logs for a given pod: 290 | 291 | ```bash 292 | $ kubectl -n spinnaker logs -f spin-gate-75c99f6b9d-fcgth 293 | 2020-02-21 01:06:20.802 INFO 1 --- [applications-10] c.n.s.g.s.internal.Front50Service : ---> HTTP GET http://spin-front50.spinnaker:8080/v2/applications?restricted=false 294 | 2020-02-21 01:06:20.802 INFO 1 --- [-applications-9] c.n.s.g.s.internal.ClouddriverService : ---> HTTP GET http://spin-clouddriver.spinnaker:7002/applications?restricted=false&expand=true 295 | 2020-02-21 01:06:20.805 INFO 1 --- [-applications-9] c.n.s.g.s.internal.ClouddriverService : <--- HTTP 200 http://spin-clouddriver.spinnaker:7002/applications?restricted=false&expand=true (2ms) 296 | 2020-02-21 01:06:20.806 INFO 1 --- [applications-10] c.n.s.g.s.internal.Front50Service : <--- HTTP 200 http://spin-front50.spinnaker:8080/v2/applications?restricted=false (4ms) 297 | 2020-02-21 01:06:25.808 INFO 1 --- [applications-10] c.n.s.g.s.internal.Front50Service : ---> HTTP GET http://spin-front50.spinnaker:8080/v2/applications?restricted=false 298 | 2020-02-21 01:06:25.808 INFO 1 --- [-applications-9] c.n.s.g.s.internal.ClouddriverService : ---> HTTP GET http://spin-clouddriver.spinnaker:7002/applications?restricted=false&expand=true 299 | 2020-02-21 01:06:25.810 INFO 1 --- [-applications-9] c.n.s.g.s.internal.ClouddriverService : <--- HTTP 200 http://spin-clouddriver.spinnaker:7002/applications?restricted=false&expand=true (2ms) 300 | 2020-02-21 01:06:25.813 INFO 1 --- [applications-10] c.n.s.g.s.internal.Front50Service : <--- HTTP 200 http://spin-front50.spinnaker:8080/v2/applications?restricted=false (4ms) 301 | ``` 302 | 303 | ## Uninstalling K3s 304 | * This will kill your kubernetes cluster: `/usr/local/bin/k3s-killall.sh` 305 | 306 | ## Uninstall Minnaker for OSX 307 | * Delete the `spinnaker` and `spinnaker-operator` namespace. 308 | ```bash 309 | kubectl --context docker-desktop delete ns spinnaker 310 | kubectl --context docker-desktop delete ns spinnaker-operator 311 | ``` 312 | * (Optionally) delete the `ingress-nginx` namespace: `kubectl --context docker-desktop delete ns ingress-nginx` 313 | * (Optionally) delete the local resources (including all pipeline defs): `rm -rf ~/minnaker` 314 | --------------------------------------------------------------------------------