├── .gitignore ├── LICENSE ├── README.md ├── common ├── docker-images │ ├── awscli-ubuntu │ │ ├── Dockerfile │ │ └── README.md │ ├── cfops-runner │ │ ├── Dockerfile │ │ └── Readme.md │ ├── docker-image-update │ │ ├── README.md │ │ └── ci │ │ │ ├── pipeline.yml │ │ │ └── tasks │ │ │ ├── dummy-task.sh │ │ │ └── dummy-task.yml │ ├── multi-purpose-ubuntu │ │ ├── Dockerfile │ │ └── README.md │ ├── pcf-usage-ubuntu │ │ └── Dockerfile │ └── send-email-task │ │ └── Dockerfile ├── images │ ├── artifactory-pipeline1.jpg │ ├── azure-blobstore-pipeline1.jpg │ ├── azure-blobstore-portal.jpg │ ├── bg-pipeline-01.jpg │ ├── bg-pipeline-01a.jpg │ ├── bg-pipeline-icon.jpg │ ├── bg-pipeline.psd │ ├── bgapp-screenshot-b.jpg │ ├── bgapp-screenshot-g.jpg │ ├── c0-logo-01.png │ ├── cfops-icon.psd │ ├── cfops-pipeline.jpg │ ├── cilounge_top1.jpg │ ├── cilounge_top2.jpg │ ├── cilounge_top3.jpg │ ├── cilounge_top4.jpg │ ├── concourse-and-artifactory.png │ ├── concourse-and-azureblob.png │ ├── concourse-and-bosh-1.0.jpg │ ├── concourse-and-bosh-lite.jpg │ ├── concourse-and-gcs.png │ ├── concourse-and-private-registry.jpg │ ├── concourse-and-s3-images.png │ ├── concourse-local-files75.jpg │ ├── docker-and-bosh.jpg │ ├── double_icons.psd │ ├── email_with_attachment.png │ ├── fly_execute_01.png │ ├── gcp-dashboard.jpg │ ├── google-cloud-storage-pipeline1.jpg │ ├── multi-spaces-icon.png │ ├── multi-spaces-icon.psd │ ├── multi-spaces-pipeline.jpg │ ├── pcf-cfops-backup-cleanup.jpg │ ├── pcf-cfops-backup-pipeline.jpg │ └── remote_workers_icon.png └── tasks │ ├── dummy-task.sh │ ├── dummy-task.yml │ └── placeholder-task.yml ├── concourse-deployment-patterns └── concourse-remote-worker │ ├── README.md │ └── remote-worker-vsphere.yml ├── concourse-pipeline-hacks ├── README.md ├── check-resource │ ├── README.md │ └── pipeline.yml ├── concourse-atom-plugin │ └── README.md ├── deprecated │ ├── concourse-on-bosh-1.0 │ │ ├── README.md │ │ ├── concourse-with-router-registrar.yml │ │ └── concourse.yml │ └── concourse-on-bosh-lite │ │ └── README.md ├── docker-images-from-repo │ ├── README.md │ ├── inspect-docker-image.yml │ ├── resource-curl-with-image-from-git.yml │ ├── resource-pivnet-with-image-from-git.yml │ ├── resource-with-image-from-git.yml │ └── task-with-image-from-git.yml ├── docker-images-from-s3 │ ├── README.md │ ├── hello-s3-with-resource.yml │ ├── hello-s3.yml │ └── package-docker-images.yml ├── fly-execute │ └── README.md ├── http-proxy-config │ └── README.md ├── private-docker-registry │ ├── README.md │ ├── docker-registry-release │ │ ├── README.md │ │ ├── cloud-config.yml │ │ └── docker-registry.yml │ ├── inline-pipeline.yml │ ├── pipeline.yml │ └── tasks │ │ ├── find-command.sh │ │ ├── find-command.yml │ │ ├── ls-command.sh │ │ ├── ls-command.yml │ │ ├── os-version.sh │ │ └── os-version.yml ├── tagger │ ├── README.md │ └── tagger.py └── task-run-user │ ├── README.md │ └── pipeline.yml ├── concourse-pipeline-patterns ├── README.md ├── gated-pipelines │ ├── 01-simple │ │ ├── README.md │ │ └── gated-pipeline-01-simple.yml │ ├── 02-shipit │ │ ├── README.md │ │ └── gated-pipeline-02-shipit.yml │ ├── 03-shipit-enhanced │ │ ├── README.md │ │ ├── gated-pipeline-03-shipit-enhanced.yml │ │ └── params.yml │ ├── 04-github-pull-request │ │ ├── README.md │ │ ├── gated-pipeline-04-github-pr.yml │ │ └── params.yml │ └── README.md ├── parameterized-pipeline-tasks │ ├── README.md │ ├── package-tutorials.yml │ └── tasks │ │ └── package-with-dockerfile.yml ├── time-triggered-pipelines │ ├── 01-single-time-trigger │ │ ├── README.md │ │ └── scheduled-pipeline-01.yml │ ├── 02-multiple-time-triggers │ │ ├── README.md │ │ ├── params-for-s3.yml │ │ ├── scheduled-pipeline-02-with-s3.yml │ │ └── scheduled-pipeline-02.yml │ └── README.md ├── uaa-authentication │ └── README.md └── vault-integration │ └── README.md ├── pipelines ├── appdev │ └── blue-green-app-deployment │ │ ├── README.md │ │ ├── bgd-app │ │ ├── images │ │ │ ├── Blue-Green-icon.png │ │ │ ├── Blue-station.png │ │ │ └── Green-station.png │ │ ├── index.js │ │ ├── package.json │ │ ├── src │ │ │ └── NumberBlackBox.js │ │ └── tests │ │ │ └── NumberBlackBox-tests.js │ │ ├── ci │ │ ├── credentials.yml.sample │ │ ├── pipeline.yml │ │ └── tasks │ │ │ ├── app-manifest-prep │ │ │ ├── app-manifest-prep.yml │ │ │ ├── current-app-get-info │ │ │ ├── current-app-get-info.yml │ │ │ ├── load-tests │ │ │ ├── load-tests.yml │ │ │ ├── run-unit-tests │ │ │ ├── unit-tests.yml │ │ │ ├── update-routes │ │ │ └── update-routes.yml │ │ └── manifests │ │ └── manifest.yml ├── azure │ └── azure-blobstore-integration │ │ ├── README.md │ │ └── pipeline.yml ├── deprecated │ └── pcf-cfops-backup │ │ ├── README.md │ │ ├── ci │ │ ├── pipelines │ │ │ ├── credentials.yml.sample │ │ │ ├── pcf_cfops_backup.yml │ │ │ └── pcf_cfops_s3backup.yml │ │ └── tasks │ │ │ ├── cfops_backup.yml │ │ │ ├── cfops_s3backup.yml │ │ │ └── cleanup-backup.yml │ │ └── scripts │ │ ├── cfops_backup.sh │ │ ├── cfops_s3backup.sh │ │ └── cleanup-old-files-in-repo.sh ├── docker │ └── pks-kubectl-image │ │ ├── Dockerfile │ │ ├── README.md │ │ ├── pipeline.yml │ │ └── pks_kubectl_image_params.sh ├── google │ └── google-cloud-storage-integration │ │ ├── README.md │ │ └── pipeline.yml ├── jfrog │ └── artifactory-integration │ │ ├── README.md │ │ └── pipeline.yml ├── notifications │ └── email-with-attachments │ │ ├── README.md │ │ ├── build-metadata.md │ │ ├── ci │ │ ├── credentials.yml.sample │ │ ├── email_with_metadata.yml │ │ ├── pipeline.yml │ │ ├── scripts │ │ │ └── send-email.js │ │ └── tasks │ │ │ └── send-email.yml │ │ └── my-attachment.txt ├── pcf │ ├── certificates │ │ ├── monitor-expiring-certificates │ │ │ ├── README.md │ │ │ ├── pcf_params.yml │ │ │ └── pipeline.yml │ │ └── rotate-internal-certificates │ │ │ ├── README.md │ │ │ ├── pcf_params.yml │ │ │ └── pipeline.yml │ ├── install-opsmgr │ │ └── vsphere │ │ │ ├── README.md │ │ │ ├── global_params.yml │ │ │ ├── opsmgr_vault_params.sh │ │ │ ├── pcf_params.yml │ │ │ └── pipeline.yml │ ├── pks │ │ ├── configure-ingress-kubo │ │ │ ├── README.md │ │ │ ├── params.yml │ │ │ ├── pipeline.yml │ │ │ └── pks_configure_ingress_kubo_params.sh │ │ ├── configure-pks-cluster │ │ │ ├── README.md │ │ │ ├── params.yml │ │ │ ├── pipeline.yml │ │ │ └── pks_create_cluster_params.sh │ │ ├── install-pks │ │ │ ├── README.md │ │ │ ├── global_params.yml │ │ │ ├── pcf_params.yml │ │ │ ├── pipeline.yml │ │ │ ├── pks_params.yml │ │ │ ├── pks_params_1.0.yml │ │ │ ├── pks_params_1.2.yml │ │ │ └── pks_vault_params.sh │ │ └── vsphere │ │ │ ├── configure-pks-api-lb │ │ │ ├── README.md │ │ │ ├── params.yml │ │ │ ├── pipeline.yml │ │ │ └── pks_api_nsxv_lb_params.sh │ │ │ └── nsxt │ │ │ ├── README.md │ │ │ ├── nsxt_params.sh │ │ │ ├── params.yml │ │ │ └── pipeline.yml │ └── vsphere │ │ ├── nsxt │ │ ├── README.md │ │ ├── nsxt_params.sh │ │ ├── params.yml │ │ └── pipeline.yml │ │ └── nsxv │ │ ├── README.md │ │ ├── nsxv_vault_params.sh │ │ ├── params.yml │ │ └── pipeline.yml └── vmware │ └── install-harbor │ ├── README.md │ ├── global_params.yml │ ├── harbor_params.yml │ ├── harbor_vault_params.sh │ ├── pcf_params.yml │ ├── pipeline.yml │ └── vsphere │ └── configure-harbor-api-lb │ ├── README.md │ ├── harbor_api_nsxv_lb_params.sh │ ├── params.yml │ └── pipeline.yml └── tasks ├── concourse └── will-worker-connect │ ├── README.md │ └── task.yml ├── generate-yaml-file.yml ├── pcf ├── apply-changes-single-product │ ├── task.sh │ └── task.yml ├── apply-changes │ ├── task.sh │ └── task.yml ├── certificates │ ├── check-expiring-certificates │ │ ├── task.sh │ │ └── task.yml │ └── regenerate-internal-certificates │ │ ├── task.sh │ │ └── task.yml ├── configure-tile │ ├── task.sh │ └── task.yml ├── delete-tile │ ├── task.sh │ └── task.yml ├── disable-errands │ ├── task.sh │ └── task.yml ├── pks │ ├── configure-pks-cli-user │ │ ├── task.sh │ │ └── task.yml │ ├── create-pks-cluster │ │ ├── task.sh │ │ └── task.yml │ ├── delete-all-pks-clusters │ │ ├── task.sh │ │ └── task.yml │ └── delete-pks-cluster │ │ ├── task.sh │ │ └── task.yml ├── stage-product │ ├── task.sh │ └── task.yml └── upload-product-and-stemcell │ ├── task.sh │ └── task.yml └── vsphere ├── nsxt ├── create-ip-block │ ├── task.sh │ └── task.yml ├── create-ip-pool │ ├── task.sh │ └── task.yml ├── create-logical-router-port │ ├── task.sh │ └── task.yml ├── create-logical-router │ ├── task.sh │ └── task.yml ├── create-logical-switch │ ├── task.sh │ └── task.yml ├── create-nat-rule │ ├── task.sh │ └── task.yml └── create-static-route │ ├── task.sh │ └── task.yml └── nsxv ├── configure-nsxv-lb-profile ├── task.sh └── task.yml ├── configure-nsxv-lb-rules-vip ├── task.sh └── task.yml ├── configure-nsxv-lb-rules ├── task.sh └── task.yml ├── create-edge ├── task.sh └── task.yml └── destroy-edge ├── task.sh └── task.yml /.gitignore: -------------------------------------------------------------------------------- 1 | credentials.yml 2 | id_rsa 3 | id_rsa.pub 4 | fly 5 | pipeline-uc*.yml 6 | .DS* 7 | node_modules/ 8 | -------------------------------------------------------------------------------- /common/docker-images/awscli-ubuntu/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:14.04 2 | 3 | ENV LAST_UPDATE=2017-03-01 4 | 5 | RUN localedef -i en_US -f UTF-8 en_US.UTF-8 6 | 7 | # install base tools, including cfcli, jq, and cf-uaac 8 | RUN \ 9 | apt-get update && \ 10 | apt-get install -y software-properties-common && \ 11 | apt-add-repository ppa:brightbox/ruby-ng && \ 12 | apt-get update && \ 13 | apt-get -y upgrade && \ 14 | apt-get -y install build-essential curl ruby2.4 ruby2.4-dev libxml2-dev libsqlite3-dev libxslt1-dev libpq-dev libmysqlclient-dev zlib1g-dev wget nfs-common cifs-utils smbclient python python-pip && \ 15 | gem install bosh_cli --no-ri --no-rdoc && \ 16 | wget -O cfcli.tgz "https://cli.run.pivotal.io/stable?release=linux64-binary&source=github" && \ 17 | tar -xvzf cfcli.tgz && \ 18 | chmod 755 cf && \ 19 | mv cf /usr/bin && \ 20 | wget -O jq "https://github.com/stedolan/jq/releases/download/jq-1.5/jq-linux64" && \ 21 | chmod 755 ./jq && \ 22 | mv ./jq /usr/bin && \ 23 | apt-get -y install git && \ 24 | apt-get -y install sshpass && \ 25 | gem install cf-uaac 26 | 27 | # install cfops and plugins 28 | RUN \ 29 | wget https://github.com/pivotalservices/cfops/releases/download/v3.0.5/cfops && \ 30 | mv ./cfops /usr/bin && \ 31 | chmod 755 /usr/bin/cfops && \ 32 | mkdir /usr/bin/plugins && \ 33 | wget https://github.com/pivotalservices/cfops-mysql-plugin/releases/download/v0.0.22/cfops-mysql-plugin_binaries.tgz && \ 34 | tar xvf ./cfops-mysql-plugin_binaries.tgz && \ 35 | mv ./pipeline/output/builds/linux64/cfops-mysql-plugin /usr/bin/plugins && \ 36 | chmod 755 /usr/bin/plugins/cfops-mysql-plugin && \ 37 | wget https://github.com/pivotalservices/cfops-redis-plugin/releases/download/v0.0.14/cfops-redis-plugin_binaries.tgz && \ 38 | tar xvf ./cfops-redis-plugin_binaries.tgz && \ 39 | mv ./pipeline/output/builds/linux64/cfops-redis-plugin /usr/bin/plugins && \ 40 | chmod 755 /usr/bin/plugins/cfops-redis-plugin && \ 41 | wget https://github.com/pivotalservices/cfops-rabbitmq-plugin/releases/download/v0.0.5/cfops-rabbitmq-plugin_binaries.tgz && \ 42 | tar xvf ./cfops-rabbitmq-plugin_binaries.tgz && \ 43 | mv ./pipeline/output/linux64/cfops-rabbitmq-plugin /usr/bin/plugins && \ 44 | chmod 755 /usr/bin/plugins/cfops-rabbitmq-plugin && \ 45 | wget https://github.com/pivotalservices/cfops-nfs-plugin/releases/download/v0.0.4/cfops-nfs-plugin_binaries.tgz && \ 46 | tar xvf ./cfops-nfs-plugin_binaries.tgz && \ 47 | mv ./pipeline/output/builds/linux64/cfops-nfs-plugin /usr/bin/plugins && \ 48 | chmod 755 /usr/bin/plugins/cfops-nfs-plugin && \ 49 | cd / 50 | 51 | # install awscli 52 | RUN \ 53 | pip install awscli --upgrade 54 | -------------------------------------------------------------------------------- /common/docker-images/awscli-ubuntu/README.md: -------------------------------------------------------------------------------- 1 | # AWS CLI Ubuntu Docker image 2 | 3 | This Dockerfile contains the image definition to build a multi-purpose Ubuntu image containing AWS CLI, Bosh CLI, Bosh Init, Git client, CF CLI, Spruce, Spiff and CFOps cli. 4 | 5 | A complete description of the image and current sofware versions installed in it can be found at its Docker Hub location: 6 | https://hub.docker.com/r/pcfservices/docker-concourse-cf-tools/ 7 | 8 | In Concourse task's definition, refer to the image as docker:///pcfservices/docker-concourse-cf-tools 9 | 10 | In order to create your own image with this file, perform the following steps after saving it on your local system: 11 | - Install the [Docker Toolbox](https://www.docker.com/products/docker-toolbox) on your system 12 | - In the same directory where the Dockerfile is located, run command ```docker build .``` 13 | - Check the created image ID by running command ```docker images``` 14 | - Rename the image: ```docker tag /:latest``` 15 | - Login into Docker Hub using your Docker credentials: ```docker login``` 16 | - Push the new image to Docker Hub: ```docker push /:latest``` 17 | -------------------------------------------------------------------------------- /common/docker-images/cfops-runner/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:14.04 2 | 3 | ENV LAST_UPDATE=2017-03-01 4 | 5 | # Install. 6 | RUN \ 7 | apt-get update && \ 8 | apt-get upgrade -y && \ 9 | apt-get install -y ruby ruby-dev && \ 10 | wget https://github.com/pivotalservices/cfops/releases/download/v3.0.5/cfops && \ 11 | mv cfops /usr/bin && \ 12 | gem install cf-uaac && \ 13 | 14 | RUN localedef -i en_US -f UTF-8 en_US.UTF-8 \ 15 | && useradd -m -s /bin/bash pcfdev \ 16 | && echo 'pcfdev ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers 17 | 18 | USER pcfdev 19 | -------------------------------------------------------------------------------- /common/docker-images/cfops-runner/Readme.md: -------------------------------------------------------------------------------- 1 | cfops Ubuntu Docker image 2 | 3 | This Dockerfile just contains the latest cfops, since the multi-purpose docker image had a lot of old dependencies 4 | -------------------------------------------------------------------------------- /common/docker-images/docker-image-update/README.md: -------------------------------------------------------------------------------- 1 | ![Concourse and a Private Docker Registry](https://raw.githubusercontent.com/pivotalservices/concourse-pipeline-samples/master/common/images/concourse-and-private-registry.jpg) 2 | 3 | # Pipeline for updating a Docker image in Docker Hub 4 | 5 | TBD 6 | 7 | ### Read more 8 | 9 | - [Application pipeline deploying to multiple CF spaces](https://github.com/pivotalservices/sample-app-pipeline) 10 | 11 | - [Blue-Green application deployment pipeline with Concourse](https://github.com/pivotalservices/concourse-pipeline-samples/tree/master/blue-green-app-deployment) 12 | 13 | - [PCF Backup CI pipeline using CFOps](https://github.com/pivotalservices/concourse-pipeline-samples/tree/master/pcf-cfops-backup) 14 | 15 | - [Deploying Concourse on a Bosh 1.0 Director](https://github.com/pivotalservices/concourse-pipeline-samples/tree/master/concourse-on-bosh-1.0) 16 | 17 | - [Deploying Concourse on Bosh-lite](https://github.com/pivotalservices/concourse-pipeline-samples/tree/master/concourse-on-bosh-lite) 18 | 19 | - [Sample Concourse application pipeline](https://github.com/pivotalservices/sample-app-pipeline) 20 | -------------------------------------------------------------------------------- /common/docker-images/docker-image-update/ci/tasks/dummy-task.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -x 3 | 4 | echo "Running dummy task" 5 | sleep 2 6 | find . 7 | 8 | -------------------------------------------------------------------------------- /common/docker-images/docker-image-update/ci/tasks/dummy-task.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: pcfservices/docker-concourse-cf-tools 8 | tag: "latest" 9 | 10 | inputs: 11 | - name: concourse-pipeline-samples 12 | - name: bosh-release 13 | - name: concourse-release 14 | - name: spruce-release 15 | - name: spiff-release 16 | - name: cf-cli-release 17 | - name: cf-uaac-release 18 | - name: cfops-release 19 | - name: cfops-mysql-release 20 | - name: cfops-rabbitmq-release 21 | - name: cfops-redis-release 22 | - name: cfops-nfs-release 23 | 24 | run: 25 | path: concourse-pipeline-samples/docker-image-update/ci/tasks/dummy-task.sh 26 | -------------------------------------------------------------------------------- /common/docker-images/multi-purpose-ubuntu/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:14.04 2 | 3 | ENV LAST_UPDATE=2017-03-01 4 | 5 | # Install. 6 | RUN \ 7 | apt-get update && \ 8 | apt-get -y upgrade && \ 9 | apt-get -y install build-essential curl ruby ruby-dev libxml2-dev libsqlite3-dev libxslt1-dev libpq-dev libmysqlclient-dev zlib1g-dev wget nfs-common cifs-utils smbclient python python-pip && \ 10 | gem install bosh_cli --no-ri --no-rdoc && \ 11 | wget -O cfcli.tgz "https://cli.run.pivotal.io/stable?release=linux64-binary&source=github" && \ 12 | tar -xvzf cfcli.tgz && \ 13 | chmod 755 cf && \ 14 | mv cf /usr/bin && \ 15 | wget -O jq "https://github.com/stedolan/jq/releases/download/jq-1.5/jq-linux64" && \ 16 | chmod 755 ./jq && \ 17 | mv ./jq /usr/bin && \ 18 | apt-get -y install git && \ 19 | apt-get -y install sshpass && \ 20 | wget https://github.com/pivotalservices/cfops/releases/download/v3.0.5/cfops && \ 21 | mv cfops /usr/bin && \ 22 | cd /usr/bin && mkdir plugins && cd plugins && \ 23 | wget https://pivotal-cfops.s3.amazonaws.com/mysql-plugin-release/linux64/v0.0.22/cfops-mysql-plugin && \ 24 | chmod 755 cfops-mysql-plugin && \ 25 | wget https://pivotal-cfops.s3.amazonaws.com/redis-plugin-release/linux64/v0.0.14/cfops-redis-plugin && \ 26 | chmod 755 cfops-redis-plugin && \ 27 | wget https://pivotal-cfops.s3.amazonaws.com/rabbit-plugin-release/linux64/v0.0.5/cfops-rabbitmq-plugin && \ 28 | chmod 755 cfops-rabbitmq-plugin && \ 29 | wget https://pivotal-cfops.s3.amazonaws.com/nfs-plugin-release/linux64/v0.0.4/cfops-nfs-plugin && \ 30 | chmod 755 cfops-nfs-plugin && cd / && \ 31 | gem install cf-uaac && \ 32 | cd /tmp && \ 33 | wget https://github.com/spf13/hugo/releases/download/v0.16/hugo_0.16_linux-64bit.tgz && \ 34 | tar xvf ./hugo_0.16_linux-64bit.tgz && \ 35 | chmod 755 ./hugo && mv ./hugo /usr/bin && \ 36 | rm hugo_0.16_linux-64bit.tgz && rm *.md 37 | 38 | RUN localedef -i en_US -f UTF-8 en_US.UTF-8 \ 39 | && useradd -m -s /bin/bash pcfdev \ 40 | && echo 'pcfdev ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers 41 | 42 | USER pcfdev 43 | 44 | RUN pip install --upgrade --user awscli 45 | -------------------------------------------------------------------------------- /common/docker-images/multi-purpose-ubuntu/README.md: -------------------------------------------------------------------------------- 1 | # Multi-purpose Ubuntu Docker image 2 | 3 | This Dockerfile contains the image definition to build a multi-purpose Ubuntu image containing Bosh CLI, Bosh Init, Git client, CF CLI, Spruce, Spiff and CFOps cli. 4 | 5 | A complete description of the image and current sofware versions installed in it can be found at its Docker Hub location: 6 | https://hub.docker.com/r/pcfservices/docker-concourse-cf-tools/ 7 | 8 | In Concourse task's definition, refer to the image as docker:///pcfservices/docker-concourse-cf-tools 9 | 10 | In order to create your own image with this file, perform the following steps after saving it on your local system: 11 | - Install the [Docker Toolbox](https://www.docker.com/products/docker-toolbox) on your system 12 | - In the same directory where the Dockerfile is located, run command ```docker build .``` 13 | - Check the created image ID by running command ```docker images``` 14 | - Rename the image: ```docker tag /:latest``` 15 | - Login into Docker Hub using your Docker credentials: ```docker login``` 16 | - Push the new image to Docker Hub: ```docker push /:latest``` 17 | -------------------------------------------------------------------------------- /common/docker-images/pcf-usage-ubuntu/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:14.04 2 | 3 | # Install. 4 | RUN \ 5 | apt-get update && \ 6 | apt-get -y install build-essential curl ruby ruby-dev libxml2-dev libsqlite3-dev libxslt1-dev libpq-dev libmysqlclient-dev zlib1g-dev wget nfs-common && \ 7 | gem install bosh_cli --no-ri --no-rdoc && \ 8 | wget -O cfcli.tgz "https://cli.run.pivotal.io/stable?release=linux64-binary&source=github" && \ 9 | tar -xvzf cfcli.tgz && \ 10 | chmod 755 cf && \ 11 | mv cf /usr/bin && \ 12 | wget -O jq "https://github.com/stedolan/jq/releases/download/jq-1.5/jq-linux64" && \ 13 | chmod 755 ./jq && \ 14 | mv ./jq /usr/bin && \ 15 | apt-get -y install git && \ 16 | gem install cf-uaac && \ 17 | curl -sL https://deb.nodesource.com/setup_6.x | sudo bash - && \ 18 | apt-get -y install nodejs 19 | -------------------------------------------------------------------------------- /common/docker-images/send-email-task/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:14.04 2 | 3 | # Install. 4 | RUN \ 5 | apt-get update && \ 6 | apt-get -y install build-essential curl wget git && \ 7 | wget -O jq "https://github.com/stedolan/jq/releases/download/jq-1.5/jq-linux64" && \ 8 | chmod 755 ./jq && \ 9 | mv ./jq /usr/bin && \ 10 | curl -sL https://deb.nodesource.com/setup_6.x | sudo bash - && \ 11 | apt-get -y install nodejs && \ 12 | mkdir app && cd app && \ 13 | npm install nodemailer 14 | -------------------------------------------------------------------------------- /common/images/artifactory-pipeline1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amazted/concourse-pipeline-samples/f613287565cf50b8417efeff51b0d36a2e019ecb/common/images/artifactory-pipeline1.jpg -------------------------------------------------------------------------------- /common/images/azure-blobstore-pipeline1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amazted/concourse-pipeline-samples/f613287565cf50b8417efeff51b0d36a2e019ecb/common/images/azure-blobstore-pipeline1.jpg -------------------------------------------------------------------------------- /common/images/azure-blobstore-portal.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amazted/concourse-pipeline-samples/f613287565cf50b8417efeff51b0d36a2e019ecb/common/images/azure-blobstore-portal.jpg -------------------------------------------------------------------------------- /common/images/bg-pipeline-01.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amazted/concourse-pipeline-samples/f613287565cf50b8417efeff51b0d36a2e019ecb/common/images/bg-pipeline-01.jpg -------------------------------------------------------------------------------- /common/images/bg-pipeline-01a.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amazted/concourse-pipeline-samples/f613287565cf50b8417efeff51b0d36a2e019ecb/common/images/bg-pipeline-01a.jpg -------------------------------------------------------------------------------- /common/images/bg-pipeline-icon.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amazted/concourse-pipeline-samples/f613287565cf50b8417efeff51b0d36a2e019ecb/common/images/bg-pipeline-icon.jpg -------------------------------------------------------------------------------- /common/images/bg-pipeline.psd: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amazted/concourse-pipeline-samples/f613287565cf50b8417efeff51b0d36a2e019ecb/common/images/bg-pipeline.psd -------------------------------------------------------------------------------- /common/images/bgapp-screenshot-b.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amazted/concourse-pipeline-samples/f613287565cf50b8417efeff51b0d36a2e019ecb/common/images/bgapp-screenshot-b.jpg -------------------------------------------------------------------------------- /common/images/bgapp-screenshot-g.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amazted/concourse-pipeline-samples/f613287565cf50b8417efeff51b0d36a2e019ecb/common/images/bgapp-screenshot-g.jpg -------------------------------------------------------------------------------- /common/images/c0-logo-01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amazted/concourse-pipeline-samples/f613287565cf50b8417efeff51b0d36a2e019ecb/common/images/c0-logo-01.png -------------------------------------------------------------------------------- /common/images/cfops-icon.psd: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amazted/concourse-pipeline-samples/f613287565cf50b8417efeff51b0d36a2e019ecb/common/images/cfops-icon.psd -------------------------------------------------------------------------------- /common/images/cfops-pipeline.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amazted/concourse-pipeline-samples/f613287565cf50b8417efeff51b0d36a2e019ecb/common/images/cfops-pipeline.jpg -------------------------------------------------------------------------------- /common/images/cilounge_top1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amazted/concourse-pipeline-samples/f613287565cf50b8417efeff51b0d36a2e019ecb/common/images/cilounge_top1.jpg -------------------------------------------------------------------------------- /common/images/cilounge_top2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amazted/concourse-pipeline-samples/f613287565cf50b8417efeff51b0d36a2e019ecb/common/images/cilounge_top2.jpg -------------------------------------------------------------------------------- /common/images/cilounge_top3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amazted/concourse-pipeline-samples/f613287565cf50b8417efeff51b0d36a2e019ecb/common/images/cilounge_top3.jpg -------------------------------------------------------------------------------- /common/images/cilounge_top4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amazted/concourse-pipeline-samples/f613287565cf50b8417efeff51b0d36a2e019ecb/common/images/cilounge_top4.jpg -------------------------------------------------------------------------------- /common/images/concourse-and-artifactory.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amazted/concourse-pipeline-samples/f613287565cf50b8417efeff51b0d36a2e019ecb/common/images/concourse-and-artifactory.png -------------------------------------------------------------------------------- /common/images/concourse-and-azureblob.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amazted/concourse-pipeline-samples/f613287565cf50b8417efeff51b0d36a2e019ecb/common/images/concourse-and-azureblob.png -------------------------------------------------------------------------------- /common/images/concourse-and-bosh-1.0.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amazted/concourse-pipeline-samples/f613287565cf50b8417efeff51b0d36a2e019ecb/common/images/concourse-and-bosh-1.0.jpg -------------------------------------------------------------------------------- /common/images/concourse-and-bosh-lite.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amazted/concourse-pipeline-samples/f613287565cf50b8417efeff51b0d36a2e019ecb/common/images/concourse-and-bosh-lite.jpg -------------------------------------------------------------------------------- /common/images/concourse-and-gcs.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amazted/concourse-pipeline-samples/f613287565cf50b8417efeff51b0d36a2e019ecb/common/images/concourse-and-gcs.png -------------------------------------------------------------------------------- /common/images/concourse-and-private-registry.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amazted/concourse-pipeline-samples/f613287565cf50b8417efeff51b0d36a2e019ecb/common/images/concourse-and-private-registry.jpg -------------------------------------------------------------------------------- /common/images/concourse-and-s3-images.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amazted/concourse-pipeline-samples/f613287565cf50b8417efeff51b0d36a2e019ecb/common/images/concourse-and-s3-images.png -------------------------------------------------------------------------------- /common/images/concourse-local-files75.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amazted/concourse-pipeline-samples/f613287565cf50b8417efeff51b0d36a2e019ecb/common/images/concourse-local-files75.jpg -------------------------------------------------------------------------------- /common/images/docker-and-bosh.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amazted/concourse-pipeline-samples/f613287565cf50b8417efeff51b0d36a2e019ecb/common/images/docker-and-bosh.jpg -------------------------------------------------------------------------------- /common/images/double_icons.psd: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amazted/concourse-pipeline-samples/f613287565cf50b8417efeff51b0d36a2e019ecb/common/images/double_icons.psd -------------------------------------------------------------------------------- /common/images/email_with_attachment.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amazted/concourse-pipeline-samples/f613287565cf50b8417efeff51b0d36a2e019ecb/common/images/email_with_attachment.png -------------------------------------------------------------------------------- /common/images/fly_execute_01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amazted/concourse-pipeline-samples/f613287565cf50b8417efeff51b0d36a2e019ecb/common/images/fly_execute_01.png -------------------------------------------------------------------------------- /common/images/gcp-dashboard.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amazted/concourse-pipeline-samples/f613287565cf50b8417efeff51b0d36a2e019ecb/common/images/gcp-dashboard.jpg -------------------------------------------------------------------------------- /common/images/google-cloud-storage-pipeline1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amazted/concourse-pipeline-samples/f613287565cf50b8417efeff51b0d36a2e019ecb/common/images/google-cloud-storage-pipeline1.jpg -------------------------------------------------------------------------------- /common/images/multi-spaces-icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amazted/concourse-pipeline-samples/f613287565cf50b8417efeff51b0d36a2e019ecb/common/images/multi-spaces-icon.png -------------------------------------------------------------------------------- /common/images/multi-spaces-icon.psd: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amazted/concourse-pipeline-samples/f613287565cf50b8417efeff51b0d36a2e019ecb/common/images/multi-spaces-icon.psd -------------------------------------------------------------------------------- /common/images/multi-spaces-pipeline.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amazted/concourse-pipeline-samples/f613287565cf50b8417efeff51b0d36a2e019ecb/common/images/multi-spaces-pipeline.jpg -------------------------------------------------------------------------------- /common/images/pcf-cfops-backup-cleanup.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amazted/concourse-pipeline-samples/f613287565cf50b8417efeff51b0d36a2e019ecb/common/images/pcf-cfops-backup-cleanup.jpg -------------------------------------------------------------------------------- /common/images/pcf-cfops-backup-pipeline.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amazted/concourse-pipeline-samples/f613287565cf50b8417efeff51b0d36a2e019ecb/common/images/pcf-cfops-backup-pipeline.jpg -------------------------------------------------------------------------------- /common/images/remote_workers_icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amazted/concourse-pipeline-samples/f613287565cf50b8417efeff51b0d36a2e019ecb/common/images/remote_workers_icon.png -------------------------------------------------------------------------------- /common/tasks/dummy-task.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -x 3 | 4 | echo "Do something here" 5 | sleep 5 6 | find . 7 | -------------------------------------------------------------------------------- /common/tasks/dummy-task.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: concourse/buildroot 8 | tag: "git" 9 | 10 | inputs: 11 | - name: concourse-pipeline-samples 12 | 13 | run: 14 | path: concourse-pipeline-samples/common/tasks/dummy-task.sh 15 | -------------------------------------------------------------------------------- /common/tasks/placeholder-task.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: concourse/buildroot 8 | tag: "git" 9 | 10 | run: 11 | path: sh 12 | args: 13 | - -exc 14 | - | 15 | echo "Placeholder task: do something here" 16 | -------------------------------------------------------------------------------- /concourse-deployment-patterns/concourse-remote-worker/README.md: -------------------------------------------------------------------------------- 1 | ![Concourse Deployment Patterns](https://raw.githubusercontent.com/pivotalservices/concourse-pipeline-samples/master/common/images/remote_workers_icon.png) 2 | 3 | # How to create a remote concourse worker 4 | 5 | 1. Obtain the following information: 6 | 1. Download the remote-worker-vsphere.yml in this folder 7 | 1. From Concourse Deployment 8 | 1. A private key for a public key in (tsa -> authorized_keys) 9 | 1. A public key for (tsa -> host_key -> public_key) 10 | 1. TSA host (your web address for concourse. external_url) 11 | 1. vSphere details 12 | 1. vcenter_ip # eg vsphere.company.com 13 | 1. vcenter_user # administrator@domain 14 | 1. vcenter_password # password 15 | 1. vcenter_dc # Datacenter1 16 | 1. vcenter_vms #concourse_worker_folder 17 | 1. vcenter_templates #concourse_worker_template 18 | 1. vcenter_ds # datastore1 19 | 1. vcenter_disks # concourse_worker_disks 20 | 1. vcenter_cluster # Cluster1 21 | 1. vcenter_rp # RP1 22 | 2. internal_ip # 192.168.1.11 23 | 3. network_name # vwire-1 24 | 4. internal_dns # 198.168.20.20 25 | 5. internal_gw # 192.168.1.1 26 | 6. internal_cidr # 192.168.1.1/24 27 | 1. Worker Details 28 | 1. external_worker_tags 29 | 1. Release Versions 30 | 1. Get latest from bosh.io 31 | 1. From an opsmgr or other jumpbox on the remote network , run bosh create-env with the variables gathered (see example command below) 32 | 33 | Save creds.yml and state.json to a secure place. 34 | 35 | # Example 36 | ``` 37 | bosh create-env \ 38 | remote-worker-vsphere.yml \ 39 | --state=state.json \ 40 | --vars-store=creds.yml \ 41 | -v hashed_password='use mkpasswd -s -m sha-512' \ 42 | -v internal_cidr='192.168.10.0/26' \ 43 | -v internal_gw=192.168.10.1 \ 44 | -v internal_ip=192.168.10.60 \ 45 | -v network_name=vwire-03 \ 46 | -v vcenter_dc=Datacenter \ 47 | -v vcenter_ds=a-xio \ 48 | -v vcenter_ip=10.193.156.11 \ 49 | -v vcenter_user=administrator@vsphere.local \ 50 | -v vcenter_password='password' \ 51 | -v vcenter_templates=concourse-worker-templates \ 52 | -v vcenter_vms=concourse-vms \ 53 | -v vcenter_disks=concourse-worker-disks \ 54 | -v vcenter_cluster=Cluster-A \ 55 | -v vcenter_rp=A-RP01 \ 56 | -v internal_dns='[10.193.134.2]' \ 57 | -v ntp_servers='[10.193.134.2]' \ 58 | -v tsa_host=xx \ 59 | -v tsa_host_public_key=xx \ 60 | -v external_worker_tags='[c0lrp01]' \ 61 | -v external_worker_private_key=xx \ 62 | -v postgres_version=28 \ 63 | -v postgres_sha1=c1fcec62cb9d2e95e3b191e3c91d238e2b9d23fa \ 64 | -v concourse_version=3.13.0 \ 65 | -v concourse_sha1=fb3bedc9f9bf2304449b90c86f6d624a6819d363 \ 66 | -v garden_runc_version=1.13.1 \ 67 | -v garden_runc_sha1=54cbb89cae1be0708aa056185671665d7f4b2a4f 68 | ``` 69 | -------------------------------------------------------------------------------- /concourse-pipeline-hacks/README.md: -------------------------------------------------------------------------------- 1 | ![Pipeline Hacks](https://raw.githubusercontent.com/lsilvapvt/misc-support-files/master/docs/icons/pipeline-hacks.png) 2 | 3 | 4 | # Concourse Pipeline Pro Tips 5 | 6 | Hints and samples on how to get not-so-well-documented or never-thought-before features to work in Concourse pipelines. 7 | 8 | - [Running tasks with images on S3 for disconnected environments](docker-images-from-s3) 9 | 10 | - [Make Concourse retrieve an older version of a resource](check-resource) 11 | 12 | - [Test Worker Connectivity](https://github.com/pivotalservices/concourse-pipeline-samples/tree/master/tasks/concourse/will-worker-connect) 13 | 14 | - [Run a task container with a user other than `root`](task-run-user) 15 | 16 | - [Configure Concourse with an HTTP/HTTPS proxy](http-proxy-config) 17 | 18 | - [Running local tasks while developing your CI pipeline in Concourse](fly-execute) 19 | 20 | - [Running tasks and resources without a Docker registry](docker-images-from-repo) 21 | 22 | - [Preview Concourse pipelines in ATOM](concourse-atom-plugin) 23 | 24 | - [Tag everything in a pipeline](tagger) 25 | -------------------------------------------------------------------------------- /concourse-pipeline-hacks/check-resource/README.md: -------------------------------------------------------------------------------- 1 | ![Check-Resource](https://raw.githubusercontent.com/lsilvapvt/misc-support-files/master/docs/icons/concourse-check-resource.png) 2 | 3 | # Make Concourse retrieve an older version of a resource 4 | 5 | ### The problem 6 | 7 | After Concourse retrieves the latest version of a newly configured resource, it will not retrieve previous versions of that resource automatically anymore, even if those old versions are explicitly referred by the pipeline parameters. 8 | 9 | In order to force Concourse to retrieve an older version of a resource, use the fly CLI command with [`check-resource`](https://concourse-ci.org/fly-check-resource.html). 10 | 11 | For example, in the sample pipeline further below, when you try to run job `unit-test` for the first time, it will get stuck waiting for Concourse to retrieve version `2.5.0` (an older version) of the `fly-release` resource while it shows the following message: 12 | ``` 13 | waiting for a suitable set of input versions 14 | concourse-release - pinned version {"tag":"v2.5.0"} is not available 15 | ``` 16 | 17 | ### The solution 18 | 19 | For that job to run, execute the following `fly` command to force Concourse to retrieve version `2.5.0` of the `fly-release` resource: 20 | ``` 21 | fly -t check-resource --resource /fly-release --from tag:v2.5.0 22 | ``` 23 | 24 | ### Sample pipeline 25 | 26 | ``` 27 | --- 28 | resources: 29 | - name: fly-release 30 | type: github-release 31 | source: 32 | user: concourse 33 | repository: concourse 34 | jobs: 35 | - name: unit-test 36 | plan: 37 | - do: 38 | - get: concourse-release 39 | version: { tag: 'v2.5.0' } 40 | params: { globs: ["fly_linux_amd64"] } 41 | - task: test-release 42 | config: 43 | platform: linux 44 | image_resource: 45 | type: docker-image 46 | source: { repository: alpine } 47 | inputs: 48 | - name: fly-release 49 | run: 50 | path: sh 51 | args: 52 | - -exc 53 | - | 54 | cat ./fly-release/version 55 | ``` 56 | 57 | The complete definition file for the sample pipeline above is available for download [here](pipeline.yml). 58 | 59 | 60 | #### [Back to Concourse Pipeline Hacks](..) 61 | -------------------------------------------------------------------------------- /concourse-pipeline-hacks/check-resource/pipeline.yml: -------------------------------------------------------------------------------- 1 | --- 2 | resources: 3 | - name: fly-release 4 | type: github-release 5 | source: 6 | user: concourse 7 | repository: concourse 8 | jobs: 9 | - name: unit-test 10 | plan: 11 | - do: 12 | - get: fly-release 13 | version: { tag: 'v2.5.0' } 14 | params: { globs: ["fly_linux_amd64"] } 15 | - task: test-release 16 | config: 17 | platform: linux 18 | image_resource: 19 | type: docker-image 20 | source: { repository: alpine } 21 | inputs: 22 | - name: fly-release 23 | run: 24 | path: sh 25 | args: 26 | - -exc 27 | - | 28 | cat ./fly-release/version 29 | -------------------------------------------------------------------------------- /concourse-pipeline-hacks/concourse-atom-plugin/README.md: -------------------------------------------------------------------------------- 1 | 2 | # Preview Concourse pipelines in ATOM 3 | 4 | If you use the ATOM editor for coding Concourse pipelines, then there is a great tool available to preview pipelines directly from the editor's user interface: the [concourse-vis plugin](https://atom.io/packages/concourse-vis) by danhigham. 5 | 6 | The plugin allows you to visualize an entire CI pipeline, to pan and zoom into specific sections of it and also to click-and-jump into the the corresponding source code section of any resource that you click on the pipeline graph (which in my opinion is one of the best features of the plugin, as it makes navigation through a pipeline's source code a lot easier). 7 | 8 | ![ATOM plugin](https://github.com/lsilvapvt/misc-support-files/raw/master/docs/images/atom-plugin01.gif) 9 | *Preview Concourse CI pipelines in the ATOM editor* 10 | 11 | 12 | ### Installing the Concourse-vis plugin 13 | 14 | 1. In the ATOM user interface, go to the editor's "Preferences" ("Settings") page 15 | 16 | 2. Click on Install 17 | 18 | 3. Search for "concourse" and you should see the "concourse-vis" plugin listed in the results 19 | 20 | 4. Click on the "Install" button for that plugin and voilà, you are done. 21 | 22 | ![ATOM plugin](https://github.com/lsilvapvt/misc-support-files/raw/master/docs/images/atom-plugin02.gif) 23 | 24 | ### Using the Concourse-vis plugin 25 | 26 | 1. In the ATOM user interface, while editing a Concourse pipeline YML file, activate the pipeline preview window using keys "Control + Alt + P" 27 | 28 | 2. From the pipeline preview window: 29 | - pan an zoom in/out throughout the pipeline elements with your mouse 30 | - click on elements/resources of the pipeline graph to jump right into the corresponding source code block 31 | 32 | 33 | --- 34 | 35 | #### [Back to Concourse Pipeline Hacks](..) 36 | -------------------------------------------------------------------------------- /concourse-pipeline-hacks/docker-images-from-repo/inspect-docker-image.yml: -------------------------------------------------------------------------------- 1 | --- 2 | resources: 3 | - name: image-to-inspect 4 | type: docker-image 5 | source: 6 | repository: pivotalcf/pivnet-resource # UPDATE THIS with the targeted image repo name 7 | tag: "latest-final" # UPDATE THIS with the desired tag of the image 8 | 9 | jobs: 10 | - name: Inspect-Image 11 | plan: 12 | - get: image-to-inspect 13 | params: 14 | rootfs: true 15 | - task: prepare-image-to-export 16 | config: 17 | platform: linux 18 | inputs: 19 | - name: image-to-inspect 20 | outputs: 21 | - name: export-directory 22 | image_resource: 23 | type: docker-image 24 | source: 25 | repository: pivotalservices/docker-concourse-cf-tools 26 | run: 27 | path: sh 28 | args: 29 | - -exc 30 | - | 31 | cd export-directory 32 | cp ../image-to-inspect/metadata.json . 33 | mkdir rootfs 34 | tar -xvf ../image-to-inspect/rootfs.tar -C ./rootfs/ --exclude="dev/*" 35 | cd rootfs 36 | # add .keep file to all empty directories 37 | find . -type d -empty -not -path "./.git/*" -exec touch {}/.keep \; 38 | cd ../.. 39 | # WHAT TO DO FROM HERE 40 | # Option 1) 41 | # hijack into the container and then issue the 42 | # sequence of commands below to seed rootfs repo 43 | # with files prepared above from directory "export-directory": 44 | # git clone 45 | # cd 46 | # mv ../export-directory/metadata.json . 47 | # mv ../export-directory/rootfs . 48 | # git add . 49 | # git config --global user.email "" 50 | # git config --global user.name "" 51 | # git commit -m "Add rootfs files" 52 | # git push origin master 53 | # Option 2) 54 | # Create another task to this job that will consume the 55 | # "export-directory" output and automatically push it to 56 | # a repository (e.g. git) 57 | -------------------------------------------------------------------------------- /concourse-pipeline-hacks/docker-images-from-repo/resource-curl-with-image-from-git.yml: -------------------------------------------------------------------------------- 1 | --- 2 | resource_types: 3 | - name: curl-file-resource 4 | type: git 5 | source: 6 | uri: https://github.com/lsilvapvt/rootfs-curl-resource.git 7 | 8 | resources: 9 | - name: rootfs-repo 10 | type: git 11 | source: 12 | uri: https://github.com/lsilvapvt/rootfs-curl-resource.git 13 | - name: apache-lucene-5 14 | type: curl-file-resource 15 | source: 16 | url: http://www-us.apache.org/dist/lucene/java/5.5.4/lucene-5.5.4.zip 17 | filename: lucene-5.5.4.zip 18 | 19 | jobs: 20 | - name: run-and-get-Apache-Lucene 21 | plan: 22 | - get: rootfs-repo 23 | - get: apache-lucene-5 24 | - task: run-with-rootfs-curl-repository 25 | image: rootfs-repo 26 | config: 27 | platform: linux 28 | inputs: 29 | - name: apache-lucene-5 30 | run: 31 | path: sh 32 | args: 33 | - -exc 34 | - | 35 | find . 36 | -------------------------------------------------------------------------------- /concourse-pipeline-hacks/docker-images-from-repo/resource-pivnet-with-image-from-git.yml: -------------------------------------------------------------------------------- 1 | --- 2 | resource_types: 3 | - name: pivnet 4 | type: git 5 | source: 6 | uri: https://github.com/lsilvapvt/rootfs-pivnet-resource.git 7 | 8 | resources: 9 | - name: opsman-pivnet 10 | type: pivnet 11 | source: 12 | api_token: YOUR-PIVNET-API-TOKEN-GOES-HERE 13 | product_slug: ops-manager 14 | product_version: 1.9.* 15 | sort_by: semver 16 | check_every: 5m 17 | 18 | - name: buildroot-git-image-repo 19 | type: git 20 | source: 21 | uri: https://github.com/lsilvapvt/rootfs-buildroot-git.git 22 | 23 | jobs: 24 | - name: run-and-get-files 25 | plan: 26 | - get: buildroot-git-image-repo 27 | - get: opsman-pivnet 28 | params: 29 | globs: 30 | - '*AWS.pdf' 31 | - task: run-and-get-file 32 | image: buildroot-git-image-repo 33 | config: 34 | platform: linux 35 | inputs: 36 | - name: opsman-pivnet 37 | run: 38 | path: sh 39 | args: 40 | - -exc 41 | - | 42 | find . 43 | -------------------------------------------------------------------------------- /concourse-pipeline-hacks/docker-images-from-repo/resource-with-image-from-git.yml: -------------------------------------------------------------------------------- 1 | --- 2 | resource_types: 3 | - name: curl-file-resource 4 | type: git 5 | source: 6 | uri: https://github.com/lsilvapvt/rootfs-curl-resource.git 7 | 8 | resources: 9 | - name: buildroot-git-image-repo 10 | type: git 11 | source: 12 | uri: https://github.com/lsilvapvt/rootfs-buildroot-git.git 13 | 14 | - name: apache-lucene-5 15 | type: curl-file-resource 16 | source: 17 | url: http://www-us.apache.org/dist/lucene/java/5.5.4/lucene-5.5.4.zip 18 | filename: lucene-5.5.4.zip 19 | 20 | jobs: 21 | - name: run-and-get-files 22 | plan: 23 | - get: buildroot-git-image-repo 24 | - get: apache-lucene-5 25 | - task: run-and-get-file 26 | image: buildroot-git-image-repo 27 | config: 28 | platform: linux 29 | inputs: 30 | - name: apache-lucene-5 31 | run: 32 | path: sh 33 | args: 34 | - -exc 35 | - | 36 | find . 37 | -------------------------------------------------------------------------------- /concourse-pipeline-hacks/docker-images-from-repo/task-with-image-from-git.yml: -------------------------------------------------------------------------------- 1 | --- 2 | resources: 3 | - name: alpine-image-repo 4 | type: git 5 | source: 6 | uri: https://github.com/lsilvapvt/rootfs-alpine 7 | 8 | - name: buildroot-git-image-repo 9 | type: git 10 | source: 11 | uri: https://github.com/lsilvapvt/rootfs-buildroot-git.git 12 | 13 | jobs: 14 | - name: Run-with-images-from-git 15 | plan: 16 | - get: alpine-image-repo 17 | - get: buildroot-git-image-repo 18 | - task: run-with-alpine-image-from-git 19 | image: alpine-image-repo 20 | config: 21 | platform: linux 22 | run: 23 | path: sh 24 | args: 25 | - -exc 26 | - | 27 | uname -mrs 28 | cat /etc/*-release 29 | - task: run-with-buildroot-image-from-git 30 | image: buildroot-git-image-repo 31 | config: 32 | platform: linux 33 | run: 34 | path: sh 35 | args: 36 | - -exc 37 | - | 38 | uname -mrs 39 | cat /etc/*-release 40 | git --version 41 | -------------------------------------------------------------------------------- /concourse-pipeline-hacks/docker-images-from-s3/hello-s3-with-resource.yml: -------------------------------------------------------------------------------- 1 | --- 2 | resources: 3 | - name: my-image 4 | type: s3 5 | source: 6 | access_key_id: ((s3-access-key-id)) 7 | bucket: ((s3-bucket)) 8 | endpoint: ((s3-endpoint)) 9 | secret_access_key: ((s3-secret-access-key)) 10 | regexp: ubuntu/ubuntu-(.*).tgz 11 | 12 | jobs: 13 | - name: hello-ubuntu-s3-with-resource 14 | plan: 15 | - get: my-image 16 | params: 17 | unpack: true 18 | - task: run-with-image-from-s3-using-resource-definition 19 | image: my-image 20 | config: 21 | platform: linux 22 | run: 23 | path: sh 24 | args: 25 | - -c 26 | - echo "I can run with image from resource definition" 27 | -------------------------------------------------------------------------------- /concourse-pipeline-hacks/docker-images-from-s3/hello-s3.yml: -------------------------------------------------------------------------------- 1 | --- 2 | jobs: 3 | - name: hello-ubuntu-s3 4 | plan: 5 | - task: run-with-image-from-s3 6 | config: 7 | platform: linux 8 | image_resource: 9 | type: s3 10 | source: 11 | access_key_id: ((s3-access-key-id)) 12 | bucket: ((s3-bucket)) 13 | endpoint: ((s3-endpoint)) 14 | secret_access_key: ((s3-secret-access-key)) 15 | regexp: ubuntu/ubuntu-(.*).tgz 16 | # region_name: 17 | params: 18 | unpack: true 19 | run: 20 | path: sh 21 | args: 22 | - -c 23 | - echo "I can run with the image from S3" 24 | -------------------------------------------------------------------------------- /concourse-pipeline-hacks/docker-images-from-s3/package-docker-images.yml: -------------------------------------------------------------------------------- 1 | --- 2 | resources: 3 | - name: ubuntu-image-to-package 4 | type: docker-image 5 | source: 6 | repository: ubuntu # UPDATE THIS with the targeted image repo name 7 | tag: 17.04 # UPDATE THIS with the desired tag of the image 8 | 9 | - name: my-ubuntu-image-s3 10 | type: s3 11 | source: 12 | access_key_id: ((s3-access-key-id)) 13 | bucket: ((s3-bucket)) 14 | endpoint: ((s3-endpoint)) 15 | secret_access_key: ((s3-secret-access-key)) 16 | regexp: ubuntu/ubuntu-(.*).tgz 17 | 18 | jobs: 19 | - name: Download-And-Package-Ubuntu-Image 20 | plan: 21 | - get: ubuntu-image-to-package 22 | params: 23 | rootfs: true 24 | - task: prepare-image-to-export 25 | config: 26 | platform: linux 27 | inputs: 28 | - name: ubuntu-image-to-package 29 | outputs: 30 | - name: ubuntu 31 | image_resource: 32 | type: docker-image 33 | source: 34 | repository: ubuntu 35 | run: 36 | path: sh 37 | args: 38 | - -exc 39 | - | 40 | echo "Exporting ubuntu image" 41 | mkdir export-directory && cd export-directory 42 | cp ../ubuntu-image-to-package/metadata.json . 43 | mkdir rootfs 44 | tar -xvf ../ubuntu-image-to-package/rootfs.tar -C ./rootfs/ --exclude="dev/*" 45 | cd rootfs 46 | cd ../.. 47 | echo "Packaging ubuntu image" 48 | tmp_version="17.04" 49 | tar -czf "ubuntu/ubuntu-${tmp_version}.tgz" -C export-directory . 50 | ls -la ubuntu 51 | - put: my-ubuntu-image-s3 52 | params: 53 | file: "ubuntu/ubuntu-*.tgz" 54 | -------------------------------------------------------------------------------- /concourse-pipeline-hacks/http-proxy-config/README.md: -------------------------------------------------------------------------------- 1 | ![Proxy](https://github.com/lsilvapvt/misc-support-files/raw/master/docs/images/http-proxy.png) 2 | 3 | # How to configure Concourse with an HTTP/HTTPS proxy 4 | 5 | ### The problem 6 | 7 | Concourse workers require an HTTP/HTTPS proxy to access external artifacts from the internet. 8 | 9 | 10 | ### The solution 11 | 12 | Deploy Concourse workers with appropriate proxy configuration for the `groundcrew` job. 13 | 14 | ``` 15 | - name: worker 16 | ... 17 | jobs: 18 | - name: groundcrew 19 | release: concourse 20 | properties: 21 | http_proxy_url: : 22 | https_proxy_url: : 23 | no_proxy: 24 | - localhost 25 | - 127.0.0.1 26 | - mydomain.com 27 | - 10.190 28 | ``` 29 | 30 | Documentation on accepted groundcrew job's parameters: 31 | https://bosh.io/jobs/groundcrew?source=github.com/concourse/concourse#p=http_proxy_url 32 | 33 | In order to verify if the proxy configuration was applied after redeploying Concourse with the updates above, intercept the container of a pipeline task and check its environment variables (`env` command). It should contain the corresponding `http_proxy`,`https_proxy` and `no_proxy` variables along with their configured values from the Concourse deployment manifest. 34 | 35 | The garden job has similar parameters which may or may not need to be set. 36 | 37 | Note: no_proxy is not always supported for CIDRs. Some libraries will support it and 38 | some will not. You can however add IP ranges by doing the following: 39 | 40 | ``` 41 | 10.190 -> will match 10.190.0.0 - the equivalent of 10.190.0.0/16 42 | ``` 43 | 44 | 45 | 46 | #### Known issues 47 | 48 | - **Problem**: Even with proxy config, connectivity fails with a message similar to 49 | `Cloning into /tmp/git-resource-repo-cache\... 50 | fatal: unable to access ... : Failed to connect to port 1080: Connection timed out` 51 | **Possible root cause**: socks proxy may be running on a non-standard port. 52 | **Potential solution**: explicitly declare the proxy port number in the deployment manifest (*even for http and port 80*) and re-deploy Concourse. 53 | Example: 54 | ``` 55 | ... 56 | properties: 57 | http_proxy_url: http://10.160.0.29:80 58 | https_proxy_url: http://10.160.0.29:80 59 | ... 60 | ``` 61 | 62 | #### [Back to Concourse Pipeline Hacks](..) 63 | -------------------------------------------------------------------------------- /concourse-pipeline-hacks/private-docker-registry/docker-registry-release/README.md: -------------------------------------------------------------------------------- 1 | ![Docker Registry with Bosh](https://raw.githubusercontent.com/pivotalservices/concourse-pipeline-samples/master/common/images/docker-and-bosh.jpg) 2 | 3 | # Deploying a Private Docker Registry using Bosh 4 | 5 | A bosh release is available for Docker Registry at https://github.com/cloudfoundry-community/docker-registry-boshrelease and can be used to deploy a private docker registry using Bosh. 6 | 7 | The sample provided requires a Bosh 2.0 deployment (Cloud Config-based). 8 | 9 | The sample manifest will deploy a Docker Registry with the following VM/jobs topology: 10 | 11 | ``` 12 | [PROXY_VM] 13 | | | 14 | [REGISTRY_VM1] [REGISTRY_VM2] 15 | | | 16 | [NFS_SERVER] 17 | ``` 18 | 19 | The Proxy VM will load balance requests for Docker images between the two registry nodes, which in turn will retrieve/restore images from/to the shared NFS server node. 20 | 21 | The proxy's IP address is the one to be used by the tools and clients that will interact with the registry. For example, a Concourse pipeline would refer to the private registry's images with address ```:5000/```. If a DNAT or LB rule is setup at the network egde for the proxy, then the corresponding ip address and port numbers should be used instead. 22 | 23 | ## Deploying the Docker Registry 24 | 25 | The provided sample requires a Cloud-Config-based Bosh 2.0 Director. A sample cloud config file for a docker registry deployment is provided [here](https://github.com/pivotalservices/concourse-pipeline-samples/blob/master/private-docker-registry/docker-registry-release/cloud-config.yml). 26 | Once the Cloud Config is set for the Bosh Director, then update a copy of [this sample deployment manifest](https://github.com/pivotalservices/concourse-pipeline-samples/blob/master/private-docker-registry/docker-registry-release/docker-registry.yml) with the required information tagged in it and run Bosh Deploy. 27 | -------------------------------------------------------------------------------- /concourse-pipeline-hacks/private-docker-registry/docker-registry-release/cloud-config.yml: -------------------------------------------------------------------------------- 1 | azs: 2 | - name: z1 3 | cloud_properties: 4 | datacenters: 5 | - clusters: 6 | - EHC_DE08_PCF_Pod_1: # REPLACE_THIS_WITH_YOUR_CLUSTER_ID 7 | resource_pool: 8 | name: 9 | - name: z2 10 | cloud_properties: 11 | datacenters: 12 | - clusters: 13 | - EHC_DE08_PCF_Pod_1: # REPLACE_THIS_WITH_YOUR_CLUSTER_ID 14 | resource_pool: 15 | name: 16 | - name: z3 17 | cloud_properties: 18 | datacenters: 19 | - clusters: 20 | - EHC_DE08_PCF_Pod_1: # REPLACE_THIS_WITH_YOUR_CLUSTER_ID 21 | resource_pool: 22 | name: 23 | vm_types: 24 | - name: small 25 | cloud_properties: 26 | cpu: 1 27 | disk: 3000 28 | ram: 2048 29 | - name: medium 30 | cloud_properties: 31 | cpu: 2 32 | disk: 10240 33 | ram: 4096 34 | - name: large.memory 35 | cloud_properties: 36 | cpu: 4 37 | disk: 10240 38 | ram: 65536 39 | - name: large.cpu 40 | cloud_properties: 41 | cpu: 4 42 | disk: 40960 43 | ram: 4096 44 | disk_types: 45 | - name: small 46 | disk_size: 10000 47 | cloud_properties: {} 48 | - name: medium 49 | disk_size: 30000 50 | cloud_properties: {} 51 | - name: large 52 | disk_size: 50000 53 | cloud_properties: {} 54 | - name: xlarge 55 | disk_size: 100000 56 | cloud_properties: {} 57 | networks: 58 | - name: dockerregistry 59 | subnets: 60 | - az: z1 61 | cloud_properties: 62 | name: vxw-dvs-66-virtualwire-12-sid-5011-LS_PCF_01_Infra 63 | dns: 64 | - 65 | # e.g. 66 | # - 10.216.2.10 67 | # - 10.192.2.11 68 | gateway: 192.168.10.1 # 69 | range: 192.168.10.0/26 # 70 | reserved: 71 | - 72 | # e.g. 73 | # - 192.168.10.1-192.168.10.30 74 | # - 192.168.10.50-192.168.10.63 75 | static: 76 | - 77 | # e.g. 78 | # - 192.168.10.31 79 | # - 192.168.10.32 80 | # - 192.168.10.33 81 | # - 192.168.10.34 82 | type: manual 83 | compilation: 84 | workers: 4 85 | network: dockerregistry 86 | reuse_compilation_vms: true 87 | vm_type: medium 88 | az: z1 89 | -------------------------------------------------------------------------------- /concourse-pipeline-hacks/private-docker-registry/docker-registry-release/docker-registry.yml: -------------------------------------------------------------------------------- 1 | name: docker-registry 2 | director_uuid: 3 | releases: 4 | - name: docker-registry 5 | url: 6 | # https://github.com/cloudfoundry-community/docker-registry-boshrelease ... 7 | sha1: 8 | version: 9 | # e.g. "3" 10 | stemcells: 11 | - alias: trusty 12 | os: ubuntu-trusty 13 | url: 14 | sha1: 15 | # e.g. 4b8055aee6f80a4de13d0c972df3f62427b159d3 16 | version: 17 | # e.g. "3262.5" 18 | instance_groups: 19 | - name: nfs-server 20 | persistent_disk_type: xlarge 21 | instances: 1 22 | vm_type: medium 23 | stemcell: trusty 24 | azs: [z1] 25 | networks: [{name: , static_ips: []}] 26 | jobs: 27 | - name: debian_nfs_server 28 | release: docker-registry 29 | properties: 30 | nfs_server: 31 | allow_from_entries: 32 | - 33 | - 34 | # e.g. 35 | # - 192.168.10.34 36 | # - 192.168.10.35 37 | - name: registry 38 | instances: 2 39 | vm_type: medium 40 | stemcell: trusty 41 | azs: [z1] 42 | networks: [{name: , static_ips: []}] 43 | # e.g. [192.168.10.34, 192.168.10.35] 44 | jobs: 45 | - name: registry 46 | release: docker-registry 47 | properties: 48 | docker: 49 | registry: 50 | cookie: docker 51 | root: /var/vcap/nfs/shared 52 | - name: nfs_mounter 53 | release: docker-registry 54 | properties: 55 | nfs_server: 56 | address: 57 | share_path: /var/vcap/nfs 58 | - name: proxy 59 | instances: 1 60 | vm_type: medium 61 | stemcell: trusty 62 | azs: [z1] 63 | networks: [{name: , static_ips: []}] 64 | jobs: 65 | - name: proxy 66 | release: docker-registry 67 | properties: 68 | docker: 69 | proxy: 70 | backend: 71 | hosts: 72 | - 73 | - 74 | # e.g. 75 | # - 192.168.10.34 76 | # - 192.168.10.35 77 | ssl: 78 | cert: | 79 | -----BEGIN CERTIFICATE----- 80 | 81 | -----END CERTIFICATE----- 82 | key: | 83 | -----BEGIN RSA PRIVATE KEY----- 84 | 85 | -----END RSA PRIVATE KEY----- 86 | update: 87 | canaries: 1 88 | max_in_flight: 3 89 | canary_watch_time: 1000-60000 90 | update_watch_time: 1000-60000 91 | -------------------------------------------------------------------------------- /concourse-pipeline-hacks/private-docker-registry/inline-pipeline.yml: -------------------------------------------------------------------------------- 1 | groups: 2 | - name: main 3 | jobs: 4 | - check-os-version 5 | - issue-ls-command 6 | 7 | resources: 8 | - name: concourse-pipeline-samples 9 | type: git 10 | source: 11 | branch: master 12 | uri: https://github.com/pivotalservices/concourse-pipeline-samples.git 13 | 14 | jobs: 15 | - name: check-os-version 16 | serial: true 17 | public: true 18 | plan: 19 | - get: concourse-pipeline-samples 20 | trigger: true 21 | - task: check-image-os-version 22 | config: 23 | platform: linux 24 | image_resource: 25 | type: docker-image 26 | source: 27 | repository: :5000/ubuntu 28 | tag: "latest" 29 | insecure_registries: [ ":5000" ] 30 | inputs: 31 | - name: concourse-pipeline-samples 32 | run: 33 | path: cat 34 | args: 35 | - "/etc/lsb-release" 36 | 37 | - name: issue-ls-command 38 | serial: true 39 | public: true 40 | plan: 41 | - get: concourse-pipeline-samples 42 | trigger: true 43 | passed: 44 | - check-os-version 45 | - task: issue-ls-command 46 | config: 47 | platform: linux 48 | image_resource: 49 | type: docker-image 50 | source: 51 | repository: :5000/ubuntu 52 | tag: "latest" 53 | insecure_registries: [ ":5000" ] 54 | inputs: 55 | - name: concourse-pipeline-samples 56 | run: 57 | path: ls 58 | args: 59 | - "-la" 60 | -------------------------------------------------------------------------------- /concourse-pipeline-hacks/private-docker-registry/pipeline.yml: -------------------------------------------------------------------------------- 1 | groups: 2 | - name: main 3 | jobs: 4 | - issue-find-command 5 | - issue-ls-command 6 | - check-os-version 7 | 8 | resources: 9 | - name: concourse-pipeline-samples 10 | type: git 11 | source: 12 | branch: master 13 | uri: https://github.com/pivotalservices/concourse-pipeline-samples.git 14 | 15 | jobs: 16 | - name: issue-find-command 17 | serial: true 18 | public: true 19 | plan: 20 | - get: concourse-pipeline-samples 21 | trigger: true 22 | - task: issue-find-command 23 | file: concourse-pipeline-samples/private-docker-registry/tasks/find-command.yml 24 | - name: check-os-version 25 | serial: true 26 | public: true 27 | plan: 28 | - get: concourse-pipeline-samples 29 | trigger: true 30 | passed: 31 | - issue-find-command 32 | - task: check-os-version 33 | file: concourse-pipeline-samples/private-docker-registry/tasks/os-version.yml 34 | - name: issue-ls-command 35 | serial: true 36 | public: true 37 | plan: 38 | - get: concourse-pipeline-samples 39 | trigger: true 40 | passed: 41 | - check-os-version 42 | - task: issue-ls-command 43 | file: concourse-pipeline-samples/private-docker-registry/tasks/ls-command.yml 44 | -------------------------------------------------------------------------------- /concourse-pipeline-hacks/private-docker-registry/tasks/find-command.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -x 3 | 4 | echo "Running find command" 5 | #find . 6 | #sleep 2 7 | cat /etc/lsb-release 8 | apt-get -y install curl 9 | curl 192.168.99.100:5000/v2/ubuntu/manifests/latest 10 | -------------------------------------------------------------------------------- /concourse-pipeline-hacks/private-docker-registry/tasks/find-command.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: pcfservices/docker-concourse-cf-tools 8 | tag: "latest" 9 | 10 | inputs: 11 | - name: concourse-pipeline-samples 12 | 13 | run: 14 | path: concourse-pipeline-samples/private-docker-registry/tasks/find-command.sh 15 | -------------------------------------------------------------------------------- /concourse-pipeline-hacks/private-docker-registry/tasks/ls-command.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -x 3 | 4 | echo "Running ls command" 5 | ls -la . 6 | sleep 2 7 | -------------------------------------------------------------------------------- /concourse-pipeline-hacks/private-docker-registry/tasks/ls-command.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: 192.168.99.100:5000/docker-concourse-cf-tools 8 | tag: "latest" 9 | insecure_registries: [ "192.168.99.100:5000" ] 10 | 11 | 12 | inputs: 13 | - name: concourse-pipeline-samples 14 | 15 | run: 16 | path: concourse-pipeline-samples/private-docker-registry/tasks/ls-command.sh 17 | -------------------------------------------------------------------------------- /concourse-pipeline-hacks/private-docker-registry/tasks/os-version.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -x 3 | 4 | echo "Checking OS version of container" 5 | 6 | cat /etc/lsb-release 7 | 8 | sleep 2 9 | -------------------------------------------------------------------------------- /concourse-pipeline-hacks/private-docker-registry/tasks/os-version.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: 192.168.99.100:5000/ubuntu 8 | tag: "latest" 9 | insecure_registries: [ "192.168.99.100:5000" ] 10 | 11 | inputs: 12 | - name: concourse-pipeline-samples 13 | 14 | run: 15 | path: concourse-pipeline-samples/private-docker-registry/tasks/os-version.sh 16 | -------------------------------------------------------------------------------- /concourse-pipeline-hacks/tagger/README.md: -------------------------------------------------------------------------------- 1 | # Concourse Pipeline Tagger 2 | 3 | Tags every item in a plan with a given tag name 4 | ``` 5 | cat pipeline.yml | ./tagger.py remote-worker 6 | ``` 7 | 8 | -------------------------------------------------------------------------------- /concourse-pipeline-hacks/tagger/tagger.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import yaml 4 | import sys 5 | 6 | def process_job(job): 7 | for item in job['plan']: 8 | process_item(item) 9 | 10 | def process_item(item): 11 | if "aggregate" in item: 12 | for sub_item in item["aggregate"]: 13 | process_item(sub_item) 14 | else: 15 | try: 16 | item["tags"] = [sys.argv[1]] 17 | except: 18 | print("Couldn't tag item", item) 19 | 20 | if __name__ == "__main__": 21 | pipeline = yaml.safe_load(sys.stdin.read()) 22 | for job in pipeline['jobs']: 23 | process_job(job) 24 | print(yaml.dump(pipeline)) 25 | 26 | 27 | -------------------------------------------------------------------------------- /concourse-pipeline-hacks/task-run-user/README.md: -------------------------------------------------------------------------------- 1 | ![Task-run-user](https://raw.githubusercontent.com/lsilvapvt/misc-support-files/master/docs/icons/concourse-root.png) 2 | 3 | # Run a task container with a user other than `root` 4 | 5 | ### The problem 6 | By default, Concourse runs pipeline tasks with user `root`. 7 | Of course, this will not work for you if one of your task's image/container requires to be executed with a another user (e.g. postgres). 8 | 9 | ### The solution 10 | 11 | To accommodate that requirement, Concourse provides a [`user` parameter](http://concourse-ci.org/running-tasks.html#task-run-user) for you to explicitly set the user to run a task container with. 12 | 13 | 14 | #### Sample pipeline 15 | 16 | The pipeline sample below declares the `user` attribute (`postgres`) as part of the task's `run` definition. 17 | 18 | 19 | ``` 20 | --- 21 | jobs: 22 | - name: run-postgres-task 23 | plan: 24 | - do: 25 | - task: task-with-user-postgres 26 | config: 27 | platform: linux 28 | image_resource: 29 | type: docker-image 30 | source: 31 | repository: postgres 32 | run: 33 | user: postgres # <==== 34 | path: sh 35 | args: 36 | - -exc 37 | - | 38 | whoami 39 | ``` 40 | 41 | The definition file for the sample above is available for download [here](pipeline.yml). 42 | 43 | 44 | #### [Back to Concourse Pipeline Hacks](..) 45 | -------------------------------------------------------------------------------- /concourse-pipeline-hacks/task-run-user/pipeline.yml: -------------------------------------------------------------------------------- 1 | --- 2 | jobs: 3 | - name: run-postgres-task 4 | plan: 5 | - do: 6 | - task: task-with-user-postgres 7 | config: 8 | platform: linux 9 | image_resource: 10 | type: docker-image 11 | source: 12 | repository: postgres 13 | run: 14 | user: postgres 15 | path: sh 16 | args: 17 | - -exc 18 | - | 19 | whoami 20 | -------------------------------------------------------------------------------- /concourse-pipeline-patterns/README.md: -------------------------------------------------------------------------------- 1 | ![Concourse Pipeline Patterns](https://raw.githubusercontent.com/lsilvapvt/misc-support-files/master/docs/images/pipeline-patterns-02.png) 2 | 3 | ## Concourse Pipeline Patterns 4 | This article provides examples of common patterns for platform operations and application development pipelines. 5 | 6 | 1. [Gated Pipelines](gated-pipelines) 7 | 8 | 1. [Time triggered pipelines](time-triggered-pipelines) 9 | 10 | 1. [Parameterized pipeline tasks](parameterized-pipeline-tasks) 11 | 12 | 1. [Credentials Management with Vault](vault-integration) 13 | 14 | 1. [Authenticate Concourse team members with PCF UAA](uaa-authentication) 15 | 16 | 1. [Insert build metadata into user notifications in Concourse](https://github.com/pivotalservices/concourse-pipeline-samples/blob/master/pipelines/notifications/email-with-attachments/build-metadata.md) 17 | 18 | 19 | 33 | -------------------------------------------------------------------------------- /concourse-pipeline-patterns/gated-pipelines/01-simple/README.md: -------------------------------------------------------------------------------- 1 | # A simple gated CI pipeline 2 | 3 | In Concourse, a job requires to be manually triggered by default, as long as none of 4 | its resources specify the "[trigger: true](http://concourse-ci.org/get-step.html#trigger)" parameter. 5 | 6 | Therefore, in order to create a "gated" step in a pipeline, a simple job that requires a manual trigger needs to be inserted into it. 7 | 8 | ``` 9 | - name: Run-automatically 10 | plan: 11 | - get: my-resource 12 | trigger: true 13 | - task: do-your-task-here 14 | ... 15 | 16 | - name: Manually-trigger-me # <----- INJECT manual job in pipeline 17 | plan: 18 | - get: my-resource 19 | trigger: false # <----- REQUIRES manual trigger 20 | passed: 21 | - Run-automatically # <----- Adds it to the chain of jobs in the pipeline 22 | - task: do-your-manual-task-here 23 | ... 24 | 25 | - name: Do-more-stuff-after-manual-trigger 26 | plan: 27 | - get: my-resource 28 | passed: 29 | - Manually-trigger-me 30 | trigger: true 31 | - task: do-other-tasks-here 32 | ... 33 | ``` 34 | 35 | ### Sample pipeline 36 | A complete pipeline definition file for the sample above is available [here](gated-pipeline-01-simple.yml). 37 | 38 | It defines a manual job between two other jobs that are automatically trigger upon version changes of a common resource (a github repository). 39 | 40 | ![Simple gated pipeline screenshot](https://raw.githubusercontent.com/lsilvapvt/misc-support-files/master/docs/images/simple-gated-pipeline.gif) 41 | 42 | 43 | ### How to test the pipeline 44 | To create the sample pipeline in your concourse server, download file [gated-pipeline-01-simple.yml](gated-pipeline-01-simple.yml) and issue the following fly command: 45 | `fly -t set-pipeline -p simple-gate -c gated-pipeline-01-simple.yml` 46 | 47 | You will notice that, once the pipeline is unpaused in Concourse, it will automatically execute its first job (`Run-automatically`). Then, you will have to click on the second job (`Manually-trigger-me`) and click its `+` icon to manually run it. Only then, the second and third jobs will be executed with the corresponding resource version processed by the first job. 48 | 49 | This example illustrates the typical pattern of building and unit testing code in the first half of the CI/CD pipeline and then deploying it to a more tightly controlled environment (the second half of the pipeline) only upon a manual pipeline job trigger by an authorized user. 50 | 51 | ### See also 52 | 53 | - [The _Ship-it!_ gated pipeline example](../02-shipit) 54 | 55 | - [A more sophisticated gated pipeline](../03-shipit-enhanced) 56 | -------------------------------------------------------------------------------- /concourse-pipeline-patterns/gated-pipelines/01-simple/gated-pipeline-01-simple.yml: -------------------------------------------------------------------------------- 1 | --- 2 | resources: 3 | - name: my-resource 4 | type: git 5 | source: 6 | branch: master 7 | uri: https://github.com/pivotalservices/concourse-pipeline-samples.git 8 | 9 | jobs: 10 | - name: Run-automatically 11 | plan: 12 | - get: my-resource 13 | trigger: true 14 | - task: do-your-task-here 15 | config: 16 | platform: linux 17 | image_resource: 18 | type: docker-image 19 | source: 20 | repository: ubuntu 21 | run: 22 | path: sh 23 | args: 24 | - -exc 25 | - | 26 | echo "This job is automatically triggered upon any version change in the resource." 27 | 28 | 29 | - name: Manually-trigger-me 30 | plan: 31 | - get: my-resource 32 | passed: 33 | - Run-automatically 34 | trigger: false 35 | - task: do-your-manual-task-here 36 | config: 37 | platform: linux 38 | image_resource: 39 | type: docker-image 40 | source: 41 | repository: ubuntu 42 | run: 43 | path: sh 44 | args: 45 | - -exc 46 | - | 47 | echo "Output of your manually triggered task." 48 | 49 | - name: Do-more-stuff-after-manual-trigger 50 | plan: 51 | - get: my-resource 52 | passed: 53 | - Manually-trigger-me 54 | trigger: true 55 | - task: do-other-tasks-here 56 | config: 57 | platform: linux 58 | image_resource: 59 | type: docker-image 60 | source: 61 | repository: ubuntu 62 | run: 63 | path: sh 64 | args: 65 | - -exc 66 | - | 67 | echo "Output of your other tasks." 68 | -------------------------------------------------------------------------------- /concourse-pipeline-patterns/gated-pipelines/02-shipit/README.md: -------------------------------------------------------------------------------- 1 | # Ship-it! A gated CI pipeline with email notifications 2 | 3 | The pattern of gated CI pipelines applies to cases when software release updates are required to be manually approved and triggered by a release manager or platform administrator before they get deployed to a protected environment (e.g. production). 4 | 5 | This CI pipeline example illustrates the implementation of that pattern with a couple of additional enhancements on top of the previous [simple gated pipeline sample ](../01-simple): 6 | - email notification to release manager about a release ready to ship/deploy 7 | - monitoring of actual release deliveries of a software package in GitHub (Concourse FLY cli) 8 | 9 | ### Sample pipeline 10 | The pipeline definition file for the sample above is available [here](gated-pipeline-02-shipit.yml). 11 | 12 | #### How to test it 13 | To create the sample pipeline in your concourse server: 14 | 15 | 1. download file [gated-pipeline-02-shipit.yml](gated-pipeline-02-shipit.yml) 16 | 17 | 1. edit the file and update the entries below: 18 | - YOUR-SENDER-EMAIL-GOES-HERE: replace it with your sender email address 19 | - [YOUR-EMAIL-GOES-HERE]: replace it with your destination email addresses separated by comma and *keep the brackets* 20 | 21 | 1. issue the following fly command: 22 | `fly -t set-pipeline -p ship-it -c gated-pipeline-02-shipit.yml` 23 | 24 | 25 | ![ShipIt gated pipeline screenshot](https://raw.githubusercontent.com/lsilvapvt/misc-support-files/master/docs/images/shipit-gated-pipeline.png) 26 | 27 | 28 | Once the pipeline is unpaused in Concourse, it will: 29 | 30 | 1. Automatically execute its first two jobs (`Build-It` and `Test-It`) for every new release of the monitored repository 31 | 32 | 1. Notify via e-mail the release managers to review and take action in order to proceed with the deployment of the newly verified release 33 | 34 | 1. Proceed with the deployment execution only after the release manager manually triggers it (e.g. click on the `Ship-It!` job and then click on its `+` icon) 35 | 36 | 37 | ### See also 38 | 39 | - [A more sophisticated gated pipeline](../03-shipit-enhanced) 40 | -------------------------------------------------------------------------------- /concourse-pipeline-patterns/gated-pipelines/03-shipit-enhanced/README.md: -------------------------------------------------------------------------------- 1 | # A gated CI pipeline with a dynamically updated gated job name 2 | 3 | This pipeline is an enhancement to the previous [sample of a gated pipeline with notifications](../02-shipit) and contains a dynamically updated gated job name along with an additional email notification after its successful complete execution. 4 | 5 | 6 | ![Gated pipeline enhanced](https://raw.githubusercontent.com/lsilvapvt/misc-support-files/master/docs/images/shipit-gated-pipeline-enhanced.gif) 7 | 8 | 9 | The case for the dynamically updated gated job name is the fact that, without it, a release manager would have to dig into the pipeline UI in order to figure out which version of the validated resource will be deployed once he/she manually triggers the job. 10 | By updating the gated job name with the validated version number, it will be clear for the release manager which version is expected to be deployed once the gated job is manually triggered. 11 | 12 | 13 | ### Sample pipeline 14 | The pipeline definition file for the sample above is available [here](gated-pipeline-03-shipit-enhanced.yml). 15 | 16 | #### How to test it 17 | To create the sample pipeline in your concourse server: 18 | 19 | 1. download file [gated-pipeline-03-shipit-enhanced.yml](gated-pipeline-03-shipit-enhanced.yml) 20 | 21 | 22 | 1. edit `gated-pipeline-03-shipit-enhanced.yml` update the entries below: 23 | - YOUR-SENDER-EMAIL-GOES-HERE: replace it with your sender email address 24 | - [YOUR-EMAIL-GOES-HERE]: replace it with your destination email addresses separated by comma and *keep the brackets* 25 | 26 | 27 | 1. download file [params.yml](params.yml) 28 | 29 | 1. edit `params.yml` and replace the variables with the appropriate values. 30 | The concourse credentials are required for the auto-update of the "Ship-version-XXX" gated job label. 31 | The github token is needed to avoid the github API limit error for the targeted repository. 32 | 33 | 1. issue the following fly command: 34 | `fly -t set-pipeline -p shipt-it-enhanced -c gated-pipeline-03-shipit-enhanced.yml -l params.yml` 35 | 36 | 37 | Once the pipeline is unpaused in Concourse, it will: 38 | 39 | 1. Automatically execute the first two jobs (`Build-It` and `Test-It`) for every new release of the monitored repository 40 | 41 | 1. Update the `Ship-version-XX` gated job name with the version number of the new verified release. e.g. `Ship-version-2.7.0` . This provides a more descriptive and objective name for the gated job, so release managers will know the exact version that will be deployed just by glancing at the pipeline UI. 42 | 43 | 1. Notify via e-mail the release managers to review and take action in order to proceed with the deployment of the newly verified release 44 | 45 | 1. Proceed with the deployment execution only after the release manager manually triggers the gated job (e.g. click on the `Ship-version-XXX` job and then click on its `+` icon) 46 | 47 | 48 | ### See also 49 | 50 | - [A gated pipeline controlled by GitHub Pull Requests](../04-github-pull-request) 51 | -------------------------------------------------------------------------------- /concourse-pipeline-patterns/gated-pipelines/03-shipit-enhanced/params.yml: -------------------------------------------------------------------------------- 1 | concourse-url: YOUR-CONCOURSE-SERVER-URL # e.g. http://192.168.100.4:8080/ 2 | concourse-team: TARGETED-CONCOURSE_TEAM # e.g. main 3 | concourse-username: CONCOURSE-USER-NAME # e.g. admin 4 | concourse-password: CONCOURSE-PASSWORD 5 | concourse-pipeline-name: NAME-OF-YOUR-PIPELINE # e.g. shipt-it-enhanced 6 | github-access-token: YOUR-GIT-HUB-ACCESS-TOKEN 7 | -------------------------------------------------------------------------------- /concourse-pipeline-patterns/gated-pipelines/04-github-pull-request/params.yml: -------------------------------------------------------------------------------- 1 | github-deployment-control-repo: https://github.com/lsilvapvt/misc-support-files.git 2 | github-environment-control-file-path: environments/sandbox/environment.json 3 | github-username: YOUR-GIT-HUB-USER-NAME 4 | github-password: YOUR-GIT-HUB-PASSWORD 5 | github-access-token: YOUR-GIT-HUB-ACCESS-TOKEN 6 | email-address-sender: YOUR-EMAIL-GOES-HERE 7 | email-address-recipient: YOUR-EMAIL-GOES-HERE 8 | -------------------------------------------------------------------------------- /concourse-pipeline-patterns/gated-pipelines/README.md: -------------------------------------------------------------------------------- 1 | ![Pipeline image](https://raw.githubusercontent.com/lsilvapvt/misc-support-files/master/docs/icons/concourse-gate-pipelines.png) 2 | 3 | # Gated CI pipelines 4 | 5 | Gated pipelines provide control for administrators and release managers on *when* a given software release is deployed to a tightly protected environment (e.g. production). 6 | 7 | The execution of jobs that perform certain tasks (e.g. deployment) targeting the downstream environment beyond the "gate" step is done only upon either an approval coming from an external Change Control system or an explicit manual trigger of such step. 8 | 9 | Here are a few samples of this pattern: 10 | 11 | 1. [A simple gated pipeline](01-simple) 12 | 13 | 1. [Ship-it! A gated pipeline with notifications](02-shipit) 14 | 15 | 1. [A more sophisticated gated pipeline](03-shipit-enhanced) 16 | 17 | 1. [A gated pipeline controlled by GitHub Pull Requests](04-github-pull-request) 18 | 19 | 20 | 21 | ![Gated pipeline with GitHub PR](https://raw.githubusercontent.com/lsilvapvt/misc-support-files/master/docs/images/gated-pipeline-with-github-pr.jpg) 22 | -------------------------------------------------------------------------------- /concourse-pipeline-patterns/parameterized-pipeline-tasks/package-tutorials.yml: -------------------------------------------------------------------------------- 1 | --- 2 | resources: 3 | - name: tutorial-release 4 | type: git 5 | source: 6 | uri: https://github.com/docker/labs.git 7 | - name: java-dockerfile 8 | type: git 9 | source: 10 | uri: https://github.com/dockerfile/java.git 11 | - name: go-dockerfile 12 | type: git 13 | source: 14 | uri: https://github.com/dockerfile/go 15 | - name: common-tasks 16 | type: git 17 | source: 18 | uri: https://github.com/pivotalservices/concourse-pipeline-samples.git 19 | 20 | jobs: 21 | - name: Check-Consistency-of-Tutorial-files 22 | plan: 23 | - get: tutorial-release 24 | trigger: true 25 | - get: common-tasks 26 | - task: check-consistency-of-tutorial-files 27 | file: common-tasks/common/tasks/placeholder-task.yml 28 | 29 | - name: Package-Java-Tutorial 30 | plan: 31 | - get: tutorial-release 32 | trigger: true 33 | passed: 34 | - Check-Consistency-of-Tutorial-files 35 | - get: java-dockerfile 36 | - get: common-tasks 37 | - task: package-java-docker-tutorial 38 | file: common-tasks/concourse-pipeline-patterns/parameterized-pipeline-tasks/tasks/package-with-dockerfile.yml 39 | input_mapping: 40 | dockerfile: java-dockerfile 41 | params: 42 | PACKAGE_NAME: java-docker-tutorial.tgz 43 | 44 | - name: Package-Go-Tutorial 45 | plan: 46 | - get: tutorial-release 47 | trigger: true 48 | passed: 49 | - Check-Consistency-of-Tutorial-files 50 | - get: go-dockerfile 51 | - get: common-tasks 52 | - task: package-go-docker-tutorial 53 | file: common-tasks/concourse-pipeline-patterns/parameterized-pipeline-tasks/tasks/package-with-dockerfile.yml 54 | input_mapping: 55 | dockerfile: go-dockerfile 56 | params: 57 | PACKAGE_NAME: go-docker-tutorial.tgz 58 | -------------------------------------------------------------------------------- /concourse-pipeline-patterns/parameterized-pipeline-tasks/tasks/package-with-dockerfile.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: concourse/buildroot 8 | tag: "curl-tar" 9 | inputs: 10 | - name: tutorial-release 11 | - name: dockerfile 12 | outputs: 13 | - name: output-directory 14 | run: 15 | path: sh 16 | args: 17 | - -exc 18 | - | 19 | echo "Add specific Dockerfile to tutorials directory" 20 | cp ./dockerfile/Dockerfile ./tutorial-release 21 | echo "Package Tutorials directory containing specific Dockerfile" 22 | tar -cvf ./output-directory/$PACKAGE_NAME ./tutorial-release 23 | ls -la ./output-directory 24 | echo "After this step, save the packaged file $PACKAGE_NAME to an artifactory repository" 25 | -------------------------------------------------------------------------------- /concourse-pipeline-patterns/time-triggered-pipelines/01-single-time-trigger/README.md: -------------------------------------------------------------------------------- 1 | # A Concourse CI pipeline with a single time trigger 2 | 3 | This page provides an example of a pipeline that is triggered by a [time resource](https://github.com/concourse/time-resource) on a pre-determined interval. 4 | 5 | The [time resource](https://github.com/concourse/time-resource) produces a new trigger (or a new version in Concourse resource lingo) for the time interval that was declared in its definition in the pipeline configuration file. 6 | 7 | - For example, a trigger for a time range: 8 | ``` 9 | resources: 10 | - name: trigger-daily-between-1am-and-2am 11 | type: time 12 | source: 13 | start: 1:00 AM 14 | stop: 2:00 AM 15 | location: America/Phoenix 16 | ``` 17 | or, a trigger for a time interval: 18 | ``` 19 | resources: 20 | - name: trigger-every-3-minutes 21 | type: time 22 | source: 23 | interval: 3m 24 | ``` 25 | 26 | ### Sample pipeline 27 | The pipeline below provides a sample of multiple jobs that are automatically triggered by a single interval time resource. Download its configuration file [here](scheduled-pipeline-01.yml). 28 | 29 | ![Time-triggered pipeline screenshot](https://raw.githubusercontent.com/lsilvapvt/misc-support-files/master/docs/images/time-trigger-01.png) 30 | 31 | 32 | ### How to test the pipeline 33 | To create the sample pipeline in your concourse server, download file [scheduled-pipeline-01](scheduled-pipeline-01.yml) and issue the following fly command: 34 | `fly -t set-pipeline -p simple-timer -c scheduled-pipeline-01.yml` 35 | 36 | Then unpaused the pipeline in Concourse and it should automatically get triggered within 3 minutes. 37 | 38 | 39 | ### See also 40 | 41 | - [A CI pipeline with multiple time trigger resources](../02-multiple-time-triggers) 42 | -------------------------------------------------------------------------------- /concourse-pipeline-patterns/time-triggered-pipelines/01-single-time-trigger/scheduled-pipeline-01.yml: -------------------------------------------------------------------------------- 1 | --- 2 | resources: 3 | - name: trigger-every-3-minutes 4 | type: time 5 | source: 6 | interval: 3m 7 | 8 | jobs: 9 | - name: Run-Forrest-Run 10 | plan: 11 | - get: trigger-every-3-minutes 12 | trigger: true 13 | - task: do-your-task-here 14 | config: 15 | platform: linux 16 | image_resource: 17 | type: docker-image 18 | source: 19 | repository: ubuntu 20 | run: 21 | path: sh 22 | args: 23 | - -exc 24 | - | 25 | echo "This job is automatically triggered every 3 minutes." 26 | sleep 10 # so you can see the running job effect in the UI 27 | 28 | - name: Run-Bubba-Run 29 | plan: 30 | - get: trigger-every-3-minutes 31 | trigger: true 32 | passed: 33 | - Run-Forrest-Run 34 | - task: do-another-task-here 35 | config: 36 | platform: linux 37 | image_resource: 38 | type: docker-image 39 | source: 40 | repository: ubuntu 41 | run: 42 | path: sh 43 | args: 44 | - -exc 45 | - | 46 | echo "Output of second job's task." 47 | sleep 10 # so you can see the running job effect in the UI 48 | -------------------------------------------------------------------------------- /concourse-pipeline-patterns/time-triggered-pipelines/02-multiple-time-triggers/README.md: -------------------------------------------------------------------------------- 1 | # A CI pipeline with multiple time trigger resources 2 | 3 | As an enhancement to the previous [sample with a single time trigger](../01-single-time-trigger), this pipeline example implements _two_ [time resource triggers](https://github.com/concourse/time-resource) and the ability to manually kick it off outside of the time resources schedules. 4 | 5 | This is a typical pattern for system backup pipelines, where administrators 6 | require the automated backup to run for a couple or few times a day and also the ability to have a one-off run when necessary. 7 | 8 | ### Sample pipeline 9 | Download the sample pipeline configuration file [here](scheduled-pipeline-02.yml). 10 | 11 | ![Pipeline with multiple time triggers screenshot](https://raw.githubusercontent.com/lsilvapvt/misc-support-files/master/docs/images/time-trigger-02.png) 12 | 13 | The sample uses a third timer resource labelled as `manualtrigger` to control the manual triggering of all steps of the pipeline in the appropriate sequence. 14 | 15 | 16 | ### How to test the pipeline 17 | 18 | To create the sample pipeline in your concourse server: 19 | 20 | 1. download file [scheduled-pipeline-02.yml](scheduled-pipeline-02.yml) 21 | 22 | 1. issue the following fly command: 23 | `fly -t set-pipeline -p multiple-timers -c scheduled-pipeline-02.yml` 24 | 25 | Then unpaused the `multiple-timers` pipeline in Concourse and it will be triggered either automatically in the interval of every 4 or every 10 minutes or manually by running job `manual-trigger`. 26 | 27 | 28 | Note: 29 | 30 | - The `manualtrigger` resource is necessary in order to propagate the manual execution to all steps in this pipeline. Without it, if one tries to run the first individual job of the pipeline, no other job would be executed due to the lack of a common manual trigger. 31 | 32 | 33 | ##### Back to [Time triggered pipelines](..) 34 | -------------------------------------------------------------------------------- /concourse-pipeline-patterns/time-triggered-pipelines/02-multiple-time-triggers/params-for-s3.yml: -------------------------------------------------------------------------------- 1 | s3-bucket: 2 | s3-access-key-id: 3 | s3-secret-access-key: 4 | s3-region_name: 5 | -------------------------------------------------------------------------------- /concourse-pipeline-patterns/time-triggered-pipelines/02-multiple-time-triggers/scheduled-pipeline-02-with-s3.yml: -------------------------------------------------------------------------------- 1 | --- 2 | resources: 3 | - name: trigger-every-4-minutes 4 | type: time 5 | source: 6 | interval: 4m 7 | - name: trigger-every-10-minutes 8 | type: time 9 | source: 10 | interval: 10m 11 | - name: manualtrigger 12 | type: semver 13 | source: 14 | initial_version: 0.1.0 15 | driver: s3 16 | bucket: {{s3-bucket}} 17 | key: version 18 | access_key_id: {{s3-access-key-id}} 19 | secret_access_key: {{s3-secret-access-key}} 20 | region_name: {{s3-region_name}} 21 | 22 | jobs: 23 | - name: Manual-trigger 24 | plan: 25 | - put: manualtrigger 26 | params: 27 | bump: minor 28 | key: versions 29 | 30 | - name: Run-Forrest-Run 31 | plan: 32 | - get: trigger-every-4-minutes 33 | trigger: true 34 | - get: trigger-every-10-minutes 35 | trigger: true 36 | - get: manualtrigger 37 | trigger: true 38 | passed: 39 | - Manual-trigger 40 | - task: do-your-task-here 41 | config: 42 | platform: linux 43 | image_resource: 44 | type: docker-image 45 | source: 46 | repository: ubuntu 47 | run: 48 | path: sh 49 | args: 50 | - -exc 51 | - | 52 | echo "This job is automatically triggered every 3 minutes." 53 | sleep 10 # so you can see the running job effect in the UI 54 | 55 | - name: Run-Bubba-Run 56 | plan: 57 | - get: trigger-every-4-minutes 58 | trigger: true 59 | passed: 60 | - Run-Forrest-Run 61 | - get: trigger-every-10-minutes 62 | trigger: true 63 | passed: 64 | - Run-Forrest-Run 65 | - get: manualtrigger 66 | trigger: true 67 | passed: 68 | - Run-Forrest-Run 69 | - task: do-another-task-here 70 | config: 71 | platform: linux 72 | image_resource: 73 | type: docker-image 74 | source: 75 | repository: ubuntu 76 | run: 77 | path: sh 78 | args: 79 | - -exc 80 | - | 81 | echo "Output of second job's task." 82 | sleep 10 # so you can see the running job effect in the UI 83 | -------------------------------------------------------------------------------- /concourse-pipeline-patterns/time-triggered-pipelines/02-multiple-time-triggers/scheduled-pipeline-02.yml: -------------------------------------------------------------------------------- 1 | --- 2 | resources: 3 | - name: trigger-every-4-minutes 4 | type: time 5 | source: 6 | interval: 4m 7 | - name: trigger-every-10-minutes 8 | type: time 9 | source: 10 | interval: 10m 11 | - name: manualtrigger 12 | type: time # technique to trigger and propagate manual execution through pipeline 13 | source: 14 | interval: 1m 15 | 16 | jobs: 17 | - name: Manual-trigger 18 | plan: 19 | - get: manualtrigger 20 | trigger: false 21 | 22 | - name: Run-Forrest-Run 23 | plan: 24 | - get: trigger-every-4-minutes 25 | trigger: true 26 | - get: trigger-every-10-minutes 27 | trigger: true 28 | - get: manualtrigger 29 | trigger: true 30 | passed: 31 | - Manual-trigger 32 | - task: do-your-task-here 33 | config: 34 | platform: linux 35 | image_resource: 36 | type: docker-image 37 | source: 38 | repository: ubuntu 39 | run: 40 | path: sh 41 | args: 42 | - -exc 43 | - | 44 | echo "This job is automatically triggered every 3 minutes." 45 | sleep 10 # so you can see the running job effect in the UI 46 | 47 | - name: Run-Bubba-Run 48 | plan: 49 | - get: trigger-every-4-minutes 50 | trigger: true 51 | passed: 52 | - Run-Forrest-Run 53 | - get: trigger-every-10-minutes 54 | trigger: true 55 | passed: 56 | - Run-Forrest-Run 57 | - get: manualtrigger 58 | trigger: true 59 | passed: 60 | - Run-Forrest-Run 61 | - task: do-another-task-here 62 | config: 63 | platform: linux 64 | image_resource: 65 | type: docker-image 66 | source: 67 | repository: ubuntu 68 | run: 69 | path: sh 70 | args: 71 | - -exc 72 | - | 73 | echo "Output of second job's task." 74 | sleep 10 # so you can see the running job effect in the UI 75 | -------------------------------------------------------------------------------- /concourse-pipeline-patterns/time-triggered-pipelines/README.md: -------------------------------------------------------------------------------- 1 | ![Pipeline image](https://raw.githubusercontent.com/lsilvapvt/misc-support-files/master/docs/icons/concourse-timers.png) 2 | 3 | # Time triggered CI pipelines 4 | 5 | This page provides samples of pipelines triggered by [time/scheduler resources](https://github.com/concourse/time-resource) in Concourse CI. 6 | 7 | 1. [A pipeline with a single time trigger](01-single-time-trigger) 8 | 9 | 1. [A pipeline with multiple trigger resources](02-multiple-time-triggers) 10 | 11 | 12 | ![Pipeline with multiple time triggers screenshot](https://raw.githubusercontent.com/lsilvapvt/misc-support-files/master/docs/images/time-trigger-02.png) 13 | -------------------------------------------------------------------------------- /pipelines/appdev/blue-green-app-deployment/bgd-app/images/Blue-Green-icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amazted/concourse-pipeline-samples/f613287565cf50b8417efeff51b0d36a2e019ecb/pipelines/appdev/blue-green-app-deployment/bgd-app/images/Blue-Green-icon.png -------------------------------------------------------------------------------- /pipelines/appdev/blue-green-app-deployment/bgd-app/images/Blue-station.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amazted/concourse-pipeline-samples/f613287565cf50b8417efeff51b0d36a2e019ecb/pipelines/appdev/blue-green-app-deployment/bgd-app/images/Blue-station.png -------------------------------------------------------------------------------- /pipelines/appdev/blue-green-app-deployment/bgd-app/images/Green-station.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amazted/concourse-pipeline-samples/f613287565cf50b8417efeff51b0d36a2e019ecb/pipelines/appdev/blue-green-app-deployment/bgd-app/images/Green-station.png -------------------------------------------------------------------------------- /pipelines/appdev/blue-green-app-deployment/bgd-app/index.js: -------------------------------------------------------------------------------- 1 | var express = require('express'), 2 | http = require('http'), 3 | path = require('path'), 4 | bodyParser = require('body-parser'), 5 | methodOverride = require('method-override'), 6 | logger = require('morgan'), 7 | fs = require('fs'), 8 | url = require('url'); 9 | 10 | var app = express(); 11 | 12 | var NumberBlackBox = require('./src/NumberBlackBox.js'); 13 | var app_port_number = process.env.PORT || 3000; 14 | 15 | app.set('port', app_port_number); 16 | app.use(bodyParser.json()); 17 | app.use(logger('dev')); 18 | app.use(methodOverride()); 19 | 20 | app.get('/images/*', function(req, res) { // serve image files 21 | var request = url.parse(req.url, true); 22 | var action = request.pathname; 23 | var img = fs.readFileSync('.'+request.pathname); 24 | res.writeHead(200, {'Content-Type': 'image/gif' }); 25 | res.end(img, 'binary'); 26 | }); 27 | 28 | app.all('*', function(req, res) { // serve all other requests 29 | var vcap_app=process.env.VCAP_APPLICATION || '{ "application_name":"","application_version":"","application_uris":""}'; 30 | var app_obj = JSON.parse(vcap_app) 31 | var icon_name = (app_obj.application_name.indexOf("blue")>= 0)?"Blue-station.png":"Green-station.png"; 32 | res.writeHead(200, {"Content-Type": "text/html; charset=UTF-8"}); 33 | res.write(""); 34 | res.write("




  Blue-Green deployments


"); 35 | res.write("

"); 36 | res.write("
"); 37 | res.write("

Application name: "+ app_obj.application_name+"

"); 38 | res.write("

Application version: "+ app_obj.application_version+"

"); 39 | res.write("

Application URIs: "+ app_obj.application_uris+"

"); 40 | res.write("

VCAP_APPLICATION: "+ JSON.stringify(app_obj,null,'\t')+"

"); 41 | res.write("

Current time: "+new Date().toString()+"


"); 42 | res.write(""); 43 | res.end("\n"); 44 | 45 | }); 46 | 47 | var server = http.createServer(app); 48 | var boot = function () { 49 | server.listen(app.get('port'), function(){ 50 | console.info('Blue-Green-App-Test listening on port ' + app.get('port')); 51 | }); 52 | } 53 | var shutdown = function() { 54 | server.close(); 55 | } 56 | if (require.main === module) { 57 | boot(); 58 | } else { 59 | console.info('Running app as a module') 60 | exports.boot = boot; 61 | exports.shutdown = shutdown; 62 | exports.port = app.get('port'); 63 | } 64 | -------------------------------------------------------------------------------- /pipelines/appdev/blue-green-app-deployment/bgd-app/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "bfd-app", 3 | "version": "0.1.0", 4 | "description": "Node.js app for Blue-Green deployment sample pipeline", 5 | "main": "index.js", 6 | "scripts": { 7 | "test": "mocha tests --recursive" 8 | }, 9 | "repository": { 10 | "type": "git", 11 | "url": "git+https://github.com/pivotalservices/concourse-pipeline-samples.git" 12 | }, 13 | "keywords": [ 14 | "concourse", 15 | "blue-green" 16 | ], 17 | "author": "Luciano Silva", 18 | "license": "ISC", 19 | "bugs": { 20 | "url": "https://github.com/pivotalservices/concourse-pipeline-samples/issues" 21 | }, 22 | "homepage": "https://github.com/pivotalservices/concourse-pipeline-samples#readme", 23 | "dependencies": { 24 | "body-parser": "^1.15.1", 25 | "chai": "^3.5.0", 26 | "express": "^4.13.4", 27 | "http": "0.0.0", 28 | "method-override": "^2.3.6", 29 | "morgan": "^1.7.0", 30 | "path": "^0.12.7" 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /pipelines/appdev/blue-green-app-deployment/bgd-app/src/NumberBlackBox.js: -------------------------------------------------------------------------------- 1 | function NumberBlackBox() { 2 | }; 3 | 4 | NumberBlackBox.prototype.myNumber = Math.floor(Math.random() * 1000); // generates a number between 0 and 1000 5 | 6 | NumberBlackBox.prototype.getNumber = function() { 7 | return this.myNumber; 8 | }; 9 | 10 | NumberBlackBox.prototype.add = function(delta) { 11 | if ( isNaN(delta) ) { return NaN }; 12 | this.myNumber += delta; 13 | return this.myNumber; 14 | }; 15 | 16 | NumberBlackBox.prototype.subtract = function(delta) { 17 | if ( isNaN(delta) ) { return NaN }; 18 | this.myNumber -= delta; 19 | return this.myNumber; 20 | }; 21 | 22 | module.exports = NumberBlackBox; 23 | -------------------------------------------------------------------------------- /pipelines/appdev/blue-green-app-deployment/bgd-app/tests/NumberBlackBox-tests.js: -------------------------------------------------------------------------------- 1 | var chai = require('chai'); 2 | var expect = chai.expect 3 | , should = chai.should(); 4 | var NumberBlackBox = require(__dirname+'/../src/NumberBlackBox.js'); 5 | 6 | describe('NumberBlackBox Unit tests', function() { 7 | 8 | var numberBlackBox = new NumberBlackBox(); 9 | 10 | // GETNUMBER 11 | it('getNumber() should return a number', function() { 12 | expect(numberBlackBox.getNumber()).to.not.be.NaN; 13 | }); 14 | 15 | // ADD 16 | it('add() should return resulting number', function() { 17 | expect(numberBlackBox.add(1)).to.equal(numberBlackBox.getNumber()); 18 | }); 19 | 20 | it('add() should return NaN if a NaN value is passed in as argument', function() { 21 | expect(numberBlackBox.add('hello')).to.be.NaN; 22 | }); 23 | 24 | // subtract 25 | it('subtract() should return resulting number', function() { 26 | expect(numberBlackBox.subtract(1)).to.equal(numberBlackBox.getNumber()); 27 | }); 28 | 29 | it('subtract() should return NaN if a NaN value is passed in as argument', function() { 30 | expect(numberBlackBox.subtract('hello')).to.be.NaN; 31 | }); 32 | 33 | }); 34 | -------------------------------------------------------------------------------- /pipelines/appdev/blue-green-app-deployment/ci/credentials.yml.sample: -------------------------------------------------------------------------------- 1 | deploy-username: 2 | deploy-password: 3 | pws-organization: 4 | pws-space: 5 | pws-api: 6 | pws-app-suffix: app-hello 7 | pws-app-domain: cfapps.io 8 | -------------------------------------------------------------------------------- /pipelines/appdev/blue-green-app-deployment/ci/tasks/app-manifest-prep: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -xe 4 | 5 | cat ./current-app-info/current-app.txt 6 | 7 | sed "s/APPNAME/$(cat ./current-app-info/next-app.txt)-$PWS_APP_SUFFIX/" ./concourse-pipeline-samples/pipelines/appdev/blue-green-app-deployment/manifests/manifest.yml > ./app-manifest-output/manifest.yml 8 | 9 | cat ./app-manifest-output/manifest.yml 10 | -------------------------------------------------------------------------------- /pipelines/appdev/blue-green-app-deployment/ci/tasks/app-manifest-prep.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: pivotalservices/docker-concourse-cf-tools 8 | tag: "latest" 9 | 10 | inputs: 11 | - name: concourse-pipeline-samples 12 | - name: current-app-info 13 | 14 | outputs: 15 | - name: app-manifest-output 16 | 17 | run: 18 | path: concourse-pipeline-samples/pipelines/appdev/blue-green-app-deployment/ci/tasks/app-manifest-prep -------------------------------------------------------------------------------- /pipelines/appdev/blue-green-app-deployment/ci/tasks/current-app-get-info: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -xe 4 | 5 | pwd 6 | env 7 | 8 | cf api $PWS_API --skip-ssl-validation 9 | 10 | cf login -u $PWS_USER -p $PWS_PWD -o "$PWS_ORG" -s "$PWS_SPACE" 11 | 12 | cf apps 13 | 14 | set +e 15 | cf apps | grep "main-$PWS_APP_SUFFIX" | grep green 16 | if [ $? -eq 0 ] 17 | then 18 | echo "green" > ./current-app-info/current-app.txt 19 | echo "blue" > ./current-app-info/next-app.txt 20 | else 21 | echo "blue" > ./current-app-info/current-app.txt 22 | echo "green" > ./current-app-info/next-app.txt 23 | fi 24 | set -xe 25 | 26 | echo "Current main app routes to app instance $(cat ./current-app-info/current-app.txt)" 27 | echo "New version of app to be deployed to instance $(cat ./current-app-info/next-app.txt)" 28 | -------------------------------------------------------------------------------- /pipelines/appdev/blue-green-app-deployment/ci/tasks/current-app-get-info.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: pivotalservices/docker-concourse-cf-tools 8 | tag: "latest" 9 | 10 | inputs: 11 | - name: concourse-pipeline-samples 12 | 13 | outputs: 14 | - name: current-app-info 15 | 16 | run: 17 | path: concourse-pipeline-samples/pipelines/appdev/blue-green-app-deployment/ci/tasks/current-app-get-info 18 | -------------------------------------------------------------------------------- /pipelines/appdev/blue-green-app-deployment/ci/tasks/load-tests: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | echo "Installing artillery for load tests: https://artillery.io/docs/getting-started/" 6 | 7 | npm install -g artillery@"1.6.0-24" 8 | 9 | export NEXT_APP_COLOR=$(cat ./current-app-info/next-app.txt) 10 | export NEXT_APP_URL=http://$NEXT_APP_COLOR-$PWS_APP_SUFFIX.$PWS_APP_DOMAIN/ 11 | 12 | echo "Running artillery load tests..." 13 | 14 | artillery quick --duration 10 --rate 10 $NEXT_APP_URL 15 | -------------------------------------------------------------------------------- /pipelines/appdev/blue-green-app-deployment/ci/tasks/load-tests.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: djudorange/node-gulp-mocha 8 | tag: "latest" 9 | 10 | inputs: 11 | - name: concourse-pipeline-samples 12 | - name: current-app-info 13 | 14 | run: 15 | path: concourse-pipeline-samples/pipelines/appdev/blue-green-app-deployment/ci/tasks/load-tests 16 | -------------------------------------------------------------------------------- /pipelines/appdev/blue-green-app-deployment/ci/tasks/run-unit-tests: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -xe 4 | 5 | cd concourse-pipeline-samples/pipelines/appdev/blue-green-app-deployment/bgd-app 6 | npm install 7 | mocha tests --recursive 8 | -------------------------------------------------------------------------------- /pipelines/appdev/blue-green-app-deployment/ci/tasks/unit-tests.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: djudorange/node-gulp-mocha 8 | tag: "latest" 9 | 10 | inputs: 11 | - name: concourse-pipeline-samples 12 | 13 | run: 14 | path: concourse-pipeline-samples/pipelines/appdev/blue-green-app-deployment/ci/tasks/run-unit-tests 15 | -------------------------------------------------------------------------------- /pipelines/appdev/blue-green-app-deployment/ci/tasks/update-routes: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -xe 4 | 5 | pwd 6 | env 7 | 8 | cf api $PWS_API --skip-ssl-validation 9 | 10 | cf login -u $PWS_USER -p $PWS_PWD -o "$PWS_ORG" -s "$PWS_SPACE" 11 | 12 | cf apps 13 | 14 | cf routes 15 | 16 | export PWS_DOMAIN_NAME=$PWS_APP_DOMAIN 17 | export MAIN_ROUTE_HOSTNAME=main-$PWS_APP_SUFFIX 18 | 19 | export NEXT_APP_COLOR=$(cat ./current-app-info/next-app.txt) 20 | export NEXT_APP_HOSTNAME=$NEXT_APP_COLOR-$PWS_APP_SUFFIX 21 | 22 | export CURRENT_APP_COLOR=$(cat ./current-app-info/current-app.txt) 23 | export CURRENT_APP_HOSTNAME=$CURRENT_APP_COLOR-$PWS_APP_SUFFIX 24 | 25 | echo "Mapping main app route to point to $NEXT_APP_HOSTNAME instance" 26 | cf map-route $NEXT_APP_HOSTNAME $PWS_DOMAIN_NAME --hostname $MAIN_ROUTE_HOSTNAME 27 | 28 | cf routes 29 | 30 | echo "Removing previous main app route that pointed to $CURRENT_APP_HOSTNAME instance" 31 | 32 | set +e 33 | cf unmap-route $CURRENT_APP_HOSTNAME $PWS_DOMAIN_NAME --hostname $MAIN_ROUTE_HOSTNAME 34 | set -e 35 | 36 | echo "Routes updated" 37 | 38 | cf routes 39 | -------------------------------------------------------------------------------- /pipelines/appdev/blue-green-app-deployment/ci/tasks/update-routes.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: pivotalservices/docker-concourse-cf-tools 8 | tag: "latest" 9 | 10 | 11 | inputs: 12 | - name: concourse-pipeline-samples 13 | - name: current-app-info 14 | 15 | run: 16 | path: concourse-pipeline-samples/pipelines/appdev/blue-green-app-deployment/ci/tasks/update-routes 17 | -------------------------------------------------------------------------------- /pipelines/appdev/blue-green-app-deployment/manifests/manifest.yml: -------------------------------------------------------------------------------- 1 | --- 2 | applications: 3 | - name: APPNAME 4 | command: node index.js 5 | buildpack: https://github.com/cloudfoundry/buildpack-nodejs.git 6 | instances: 1 7 | -------------------------------------------------------------------------------- /pipelines/azure/azure-blobstore-integration/pipeline.yml: -------------------------------------------------------------------------------- 1 | --- 2 | resource_types: 3 | - name: azure-blob 4 | type: docker-image 5 | source: 6 | repository: cfcloudops/azure-blobstore-concourse-resource 7 | 8 | resources: 9 | - name: azure-blobstore 10 | type: azure-blob 11 | source: 12 | storage_account_name: REPLACE-WITH-YOUR-BLOBSTORE-ACCOUNT-NAME 13 | storage_access_key: REPLACE-WITH-YOUR-BLOBSTORE-ACCESS-KEY 14 | container: REPLACE-WITH-YOUR-BLOBSTORE-CONTAINER-NAME 15 | regexp: REPLACE-WITH-YOUR-FILES-NAME-AND-VERSION-REGEX # e.g. myapp-release-([0-9\.]+).tar.gz 16 | environment: AzureCloud 17 | 18 | jobs: 19 | - name: 1-build-and-save-release-to-blobstore 20 | plan: 21 | - task: create-artifact 22 | config: 23 | platform: linux 24 | image_resource: 25 | type: docker-image 26 | source: 27 | repository: ubuntu 28 | outputs: 29 | - name: build 30 | run: 31 | path: sh 32 | args: 33 | - -exc 34 | - | 35 | # Do your build steps here. Creating temporary file below as a sample: 36 | export CURRENT_TIMESTAMP=$(date +"%Y%m%d%H%S") 37 | echo "Sample build output file, timestamp: $CURRENT_TIMESTAMP" > ./build/myappfile.txt 38 | # Creating sample package file with a file name containing the new version number 39 | tar -cvzf ./myapp-release-$CURRENT_TIMESTAMP.tar.gz --directory=./build . 40 | mv ./myapp-release-*.tar.gz ./build 41 | find . 42 | - put: azure-blobstore 43 | params: { file: ./build/myapp-release-*.tar.gz } 44 | 45 | - name: 2-trigger-when-new-file-is-added-to-azure-blobstore 46 | plan: 47 | - get: azure-blobstore 48 | trigger: true 49 | passed: 50 | - 1-build-and-save-release-to-blobstore 51 | - task: use-new-file 52 | config: 53 | platform: linux 54 | image_resource: 55 | type: docker-image 56 | source: 57 | repository: ubuntu 58 | inputs: 59 | - name: azure-blobstore 60 | run: 61 | path: sh 62 | args: 63 | - -exc 64 | - | 65 | cd ./azure-blobstore 66 | ls -la 67 | echo "Version of release file retrieved: $(cat ./version). Extracting release file..." 68 | tar -xvf ./myapp-release-*.tar.gz 69 | ls -la 70 | cat ./myappfile.txt 71 | -------------------------------------------------------------------------------- /pipelines/deprecated/pcf-cfops-backup/ci/pipelines/credentials.yml.sample: -------------------------------------------------------------------------------- 1 | git-project-url: https://github.com/pivotalservices/concourse-pipeline-samples.git 2 | 3 | # use ops-manager's host domain name below, NOT its ip address 4 | ops-manager-hostname: myopsmanager.domain.com 5 | 6 | # For environments where OpsMngr hostname is not visible from concourse subnet, in order to concourse workers/containers in order to ssh opsman vm 7 | # Otherwise leave it commented out 8 | # ops-manager-private-ip: 192.168.10.11 9 | 10 | ops-manager-ui-user: my-ops-manager-ui-user 11 | ops-manager-ui-password: my-ops-manager-ui-password 12 | ops-manager-ssh-user: my-ops-manager-ssh-user 13 | ops-manager-ssh-password: my-ops-manager-ssh-password 14 | 15 | file-repo-ip: 255.255.255.255 16 | file-repo-user: my-file-repo-userid 17 | file-repo-password: my-file-repo-password 18 | file-repo-path: my-file-repo-path 19 | number-of-days-to-keep-backup-files: 10 20 | 21 | s3-bucket: CHANGEME 22 | s3-access-key: CHANGEME 23 | s3-secret-access-key: CHANGEME 24 | s3-endpoint: CHANGEME # Should have the protocol like https://s3.amazonaws.com or https://s3.us-west-2.amazonaws.com 25 | s3-signature-version: s3v2 # default to v2 26 | -------------------------------------------------------------------------------- /pipelines/deprecated/pcf-cfops-backup/ci/tasks/cfops_backup.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: pivotalservices/docker-concourse-cf-tools 8 | tag: "latest" 9 | 10 | inputs: 11 | - name: pcf-backup-scripts 12 | 13 | outputs: 14 | - name: backupdir 15 | 16 | run: 17 | path: pcf-backup-scripts/pcf-cfops-backup/scripts/cfops_backup.sh 18 | -------------------------------------------------------------------------------- /pipelines/deprecated/pcf-cfops-backup/ci/tasks/cfops_s3backup.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: pivotalservices/awscli-ubuntu 8 | tag: "latest" 9 | 10 | inputs: 11 | - name: pcf-backup-scripts 12 | 13 | outputs: 14 | - name: backupdir 15 | 16 | run: 17 | path: pcf-backup-scripts/pcf-cfops-backup/scripts/cfops_s3backup.sh 18 | -------------------------------------------------------------------------------- /pipelines/deprecated/pcf-cfops-backup/ci/tasks/cleanup-backup.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: pivotalservices/docker-concourse-cf-tools 8 | tag: "latest" 9 | 10 | inputs: 11 | - name: pcf-backup-scripts 12 | 13 | run: 14 | path: pcf-backup-scripts/pcf-cfops-backup/scripts/cleanup-old-files-in-repo.sh 15 | -------------------------------------------------------------------------------- /pipelines/deprecated/pcf-cfops-backup/scripts/cleanup-old-files-in-repo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -xe 3 | 4 | # This script performs a clean up of PCF backups stored in a file repository 5 | 6 | echo "FILE_REPO_IP: $FILE_REPO_IP" 7 | echo "FILE_REPO_USER_ID: $FILE_REPO_USER_ID" 8 | echo "FILE_REPO_PASSWORD: $FILE_REPO_PASSWORD" 9 | echo "FILE_REPO_PATH: $FILE_REPO_PATH" 10 | echo "NUMBER_OF_DAYS_TO_KEEP_FILES: $NUMBER_OF_DAYS_TO_KEEP_FILES" 11 | 12 | # ssh into file repository and remove backup directories older than 7 days 13 | sshpass -p "$FILE_REPO_PASSWORD" ssh -o 'StrictHostKeyChecking=no' $FILE_REPO_USER_ID@$FILE_REPO_IP "find $FILE_REPO_PATH/* -type d -ctime +$NUMBER_OF_DAYS_TO_KEEP_FILES | xargs rm -rf" 14 | -------------------------------------------------------------------------------- /pipelines/docker/pks-kubectl-image/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM cloudfoundry/cflinuxfs2 2 | 3 | COPY \ 4 | pks \ 5 | kubectl \ 6 | jq \ 7 | /usr/local/bin/ 8 | -------------------------------------------------------------------------------- /pipelines/docker/pks-kubectl-image/README.md: -------------------------------------------------------------------------------- 1 | ![Concourse and a Private Docker Registry](https://raw.githubusercontent.com/pivotalservices/concourse-pipeline-samples/master/common/images/concourse-and-private-registry.jpg) 2 | 3 | # Pipeline for creating or updating a Docker image in Docker Hub 4 | 5 | Sample pipeline that creates a Docker image that contains PCF PKS CLIs: `pks` and `kubectl`. 6 | 7 | It downloads the `pks` and `kubectl` CLIs from the Pivotal Network and adds it to a Docker image: [`pivotalservices/pks-kubectl`](https://hub.docker.com/r/pivotalservices/pks-kubectl/). 8 | 9 | The new image created gets tagged both as `latest` and as the same version of the PKS tile version in Docker Hub. 10 | -------------------------------------------------------------------------------- /pipelines/docker/pks-kubectl-image/pks_kubectl_image_params.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # set the path for the secrets below to be created in vault or credhub 4 | export concourse_root_secrets_path="/concourse" 5 | export concourse_team_name="team-name" 6 | export concourse_pipeline_name="pks-kubectl-image" 7 | 8 | # VAULT or CREDHUB - targeted secrets management system 9 | export targeted_system="VAULT" 10 | # This script assumes that: 11 | # 1) the credhub or vault CLI is installed 12 | # 2) you setup your vault or credhub target and login commands prior to invoking it 13 | # e.g. for VAULT 14 | # export VAULT_ADDR=https://myvaultdomain:8200 15 | # export VAULT_SKIP_VERIFY=true 16 | # export VAULT_TOKEN=vault-token 17 | # 18 | # e.g. for CREDHUB 19 | # credhub login -s credhub-server-uri -u username -p password --skip-tls-validation 20 | 21 | ## 22 | ## TEAM level secrets (shared by all pipelines in that team) 23 | ## 24 | export team_secrets=( 25 | "dockerhub-username"::"dockerhub-username" 26 | "dockerhub-password"::"dockerhub-password" 27 | "pivnet_token"::"pivotal-network-token" 28 | ) 29 | 30 | ## 31 | ## PIPELINE LEVEL secrets (specific to the pipeline) 32 | ## 33 | export pipeline_secrets=( 34 | # "^1\.0\..*$" 35 | "pks_product_version"::"^.*$" 36 | "pks_product_slug"::"pivotal-container-service" 37 | 38 | ) 39 | 40 | main () { 41 | 42 | # team level secrets 43 | concourse_team_level_secrets_path="${concourse_root_secrets_path}/${concourse_team_name}" 44 | writeCredentials "${concourse_team_level_secrets_path}" "${team_secrets[*]}" 45 | 46 | # pipeline level secrets 47 | concourse_pipeline_level_secrets_path="${concourse_team_level_secrets_path}/${concourse_pipeline_name}" 48 | writeCredentials "${concourse_pipeline_level_secrets_path}" "${pipeline_secrets[*]}" 49 | 50 | } 51 | 52 | writeCredentials () { 53 | secretsPath=${1} 54 | secretsObject=(${2}) 55 | 56 | for i in "${secretsObject[@]}" 57 | do 58 | KEY="${i%%::*}" 59 | VALUE="${i##*::}" 60 | echo "Creating secret for [$KEY]" 61 | if [[ $targeted_system == "VAULT" ]]; then 62 | vault write "${secretsPath}/${KEY}" value="${VALUE}" 63 | else # CREDHUB 64 | credhub set -n "${secretsPath}/${KEY}" -v "${VALUE}" 65 | fi 66 | done 67 | } 68 | 69 | main 70 | -------------------------------------------------------------------------------- /pipelines/google/google-cloud-storage-integration/pipeline.yml: -------------------------------------------------------------------------------- 1 | --- 2 | resource_types: 3 | - name: google-cloud-storage 4 | type: docker-image 5 | source: 6 | repository: frodenas/gcs-resource 7 | 8 | resources: 9 | - name: gcs-bucket 10 | type: google-cloud-storage 11 | source: 12 | bucket: YOUR-GCS-BUCKET-NAME 13 | regexp: YOUR-DIRECTORY-AND-FILE-NAME-REGEXP 14 | # e.g. releases/myapp-release-(.*).tar.gz" 15 | json_key: | 16 | # YOUR-JSON-PRIVATE-KEY-OBJECT-GOES-HERE 17 | { 18 | "type": ..., 19 | "project_id": ..., 20 | "private_key_id": ..., 21 | "private_key": ..., 22 | "client_email": ..., 23 | "client_id": ..., 24 | ... 25 | } 26 | # To create one for your GCS account, see: 27 | # https://cloud.google.com/storage/docs/authentication#generating-a-private-key 28 | 29 | jobs: 30 | - name: 1-build-and-save-release-to-gcs 31 | plan: 32 | - task: create-artifact 33 | config: 34 | platform: linux 35 | image_resource: 36 | type: docker-image 37 | source: 38 | repository: ubuntu 39 | outputs: 40 | - name: build 41 | run: 42 | path: sh 43 | args: 44 | - -exc 45 | - | 46 | # Do your build steps here. Creating temporary file below as a sample: 47 | export CURRENT_TIMESTAMP=$(date +"%Y%m%d%H%S") 48 | echo "Sample build output file, timestamp: $CURRENT_TIMESTAMP" > ./build/myappfile.txt 49 | # Creating sample package file with a file name containing the new version number 50 | tar -cvzf ./myapp-release-$CURRENT_TIMESTAMP.tar.gz --directory=./build . 51 | mv ./myapp-release-*.tar.gz ./build 52 | find . 53 | - put: gcs-bucket 54 | params: { file: ./build/myapp-release-*.tar.gz } 55 | 56 | - name: 2-trigger-when-new-file-is-added-to-gcs 57 | plan: 58 | - get: gcs-bucket 59 | trigger: true 60 | passed: 61 | - 1-build-and-save-release-to-gcs 62 | - task: use-new-file 63 | config: 64 | platform: linux 65 | image_resource: 66 | type: docker-image 67 | source: 68 | repository: ubuntu 69 | inputs: 70 | - name: gcs-bucket 71 | run: 72 | path: sh 73 | args: 74 | - -exc 75 | - | 76 | cd ./gcs-bucket 77 | ls -la 78 | echo "Version of release file retrieved: $(cat ./version). Extracting release file..." 79 | tar -xvf ./myapp-release-*.tar.gz 80 | ls -la 81 | cat ./myappfile.txt 82 | -------------------------------------------------------------------------------- /pipelines/jfrog/artifactory-integration/pipeline.yml: -------------------------------------------------------------------------------- 1 | --- 2 | resource_types: 3 | - name: artifactory 4 | type: docker-image 5 | source: 6 | repository: pivotalservices/artifactory-resource 7 | 8 | resources: 9 | - name: artifactory-repository 10 | type: artifactory 11 | check_every: 1m 12 | source: 13 | endpoint: http://ARTIFACTORY-HOST-NAME-GOES-HERE:8081/artifactory 14 | repository: "/repository-name/sub-folder" 15 | username: YOUR-ARTIFACTORY-USERNAME 16 | password: YOUR-ARTIFACTORY-PASSWORD 17 | 18 | jobs: 19 | - name: 1-build-an-artifact 20 | plan: 21 | - task: create-artifact 22 | config: 23 | platform: linux 24 | image_resource: 25 | type: docker-image 26 | source: 27 | repository: ubuntu 28 | outputs: 29 | - name: build 30 | run: 31 | path: sh 32 | args: 33 | - -exc 34 | - | 35 | echo "This is my file content." > ./build/myapp-$(date +"%Y%m%d%H%S").txt 36 | find . 37 | - put: artifactory-repository 38 | params: { file: ./build/myapp-*.txt } 39 | 40 | - name: 2-trigger-when-new-file-is-added-to-artifactory 41 | plan: 42 | - get: artifactory-repository 43 | trigger: true 44 | passed: 45 | - 1-build-an-artifact 46 | - task: use-new-file 47 | config: 48 | platform: linux 49 | image_resource: 50 | type: docker-image 51 | source: 52 | repository: ubuntu 53 | inputs: 54 | - name: artifactory-repository 55 | run: 56 | path: sh 57 | args: 58 | - -exc 59 | - | 60 | ls -la artifactory-repository 61 | cat ./artifactory-repository/myapp*.txt 62 | -------------------------------------------------------------------------------- /pipelines/notifications/email-with-attachments/build-metadata.md: -------------------------------------------------------------------------------- 1 | ![build-metadata](https://raw.githubusercontent.com/pivotalservices/concourse-pipeline-samples/master/common/images/email_with_attachment.png) 2 | 3 | # Insert build metadata into user notifications in Concourse 4 | 5 | Metadata about running builds, such as build ID, job or pipeline name, is available as environment variables for get or put operations in Concourse. 6 | 7 | Such information is very useful to provide to pipeline users when notifying them about their builds' success or failure. The list of available variables is listed in the [Concourse documentation](https://concourse-ci.org/implementing-resources.html#resource-metadata). 8 | 9 | See link below for an example of a pipeline definition file that inserts such variables into an email message for pipeline users using the email resource. 10 | 11 | The same can be done for other notification resources such as the ones for Slack or Twitter. 12 | 13 | [Sample pipeline from github](https://github.com/pivotalservices/concourse-pipeline-samples/blob/master/pipelines/notifications/email-with-attachments/ci/email_with_metadata.yml). 14 | 15 | ### Running the sample on your Concourse server 16 | 17 | Once you have a Concourse server setup to deploy this sample pipeline: 18 | 19 | 1. Download the sample pipeline YML file and pipeline files [from github](https://github.com/pivotalservices/concourse-pipeline-samples/tree/master/pipelines/notifications/email-with-attachments/ci) to your local machine. 20 | 21 | 2. Replace the email address placeholders with the appropriateemail addresses (e.g. search for YOUR-DESTINATION-EMAIL-ADDRESS-GOES-HERE), leaving the square brackets where existing 22 | 23 | 3. Setup the pipeline in Concourse with the fly command: 24 | `fly -t local set-pipeline -p email-with-metadata -c email_with_metadata.yml` 25 | 26 | 27 | 4. Go into the Concourse web interface, unpause the new pipeline and run the send-email-with-metadata job. 28 | 29 | --- 30 | ### Notes and hints 31 | 32 | - Task `prep-email-text` sets the subject and body text of the emails by creating text files in an output directory that will be consumed by the send-email put action later on. Those text files contain references to the build metadata variables (you will find them in double curly brackets in the echo commands), which will in turn be replaced by the email resource as part of its put action. 33 | 34 | - Some resources may have trouble dealing with some environment variables. For example, as of the publishing date of this article, the email resource used in this example failed to render variable `ATC_EXTERNAL_URL` in the body of the email message. So, if you don't get an email sent as expected, even though there is no error message in the send-email step of your pipeline, you may want to try removing some of those environment variable references from the email message body/subjects to check if that might be causing such behavior. 35 | 36 | - To simulate a build failure and get the email with the build failure email content sent out, just uncomment the `exit 1` command line from the `do-your-stuff-here` task of the pipeline. That will force the task to return an error code, halt the pipeline execution and execute the on_failure step that will send the failure email out. 37 | -------------------------------------------------------------------------------- /pipelines/notifications/email-with-attachments/ci/credentials.yml.sample: -------------------------------------------------------------------------------- 1 | git-project-url: https://github.com/pivotalservices/concourse-pcf-usage-report.git 2 | smtp-host: 3 | smtp-port: 4 | smtp-username: 5 | smtp-password: 6 | email-from: 7 | email-to: 8 | -------------------------------------------------------------------------------- /pipelines/notifications/email-with-attachments/ci/email_with_metadata.yml: -------------------------------------------------------------------------------- 1 | --- 2 | resource_types: 3 | - name: email 4 | type: docker-image 5 | source: 6 | repository: mdomke/concourse-email-resource 7 | 8 | resources: 9 | - name: send-email 10 | type: email 11 | source: 12 | from: YOUR-SENDER-EMAIL-ADDRESS-GOES-HERE 13 | 14 | jobs: 15 | - name: send-email-with-metadata 16 | serial: true 17 | public: true 18 | plan: 19 | - do: 20 | - task: prep-email-text 21 | config: 22 | platform: linux 23 | image_resource: 24 | type: docker-image 25 | source: 26 | repository: ubuntu 27 | tag: "latest" 28 | outputs: 29 | - name: email-text 30 | run: 31 | path: sh 32 | args: 33 | - -exc 34 | - | 35 | env 36 | echo "Successfull build for pipeline {{ BUILD_PIPELINE_NAME }}" > ./email-text/email-subject.txt 37 | echo "Build {{ BUILD_NAME }} of job {{ BUILD_JOB_NAME }} for pipeline {{ BUILD_PIPELINE_NAME }} ran successfully." > ./email-text/email-body.txt 38 | echo "Build {{ BUILD_NAME }} of {{ BUILD_PIPELINE_NAME }} pipeline failed" > ./email-text/email-subject-failure.txt 39 | echo "Build {{ BUILD_NAME }} of job {{ BUILD_JOB_NAME }} for pipeline {{ BUILD_PIPELINE_NAME }} failed." > ./email-text/email-body-failure.txt 40 | 41 | - task: do-your-stuff-here 42 | config: 43 | platform: linux 44 | image_resource: 45 | type: docker-image 46 | source: 47 | repository: ubuntu 48 | tag: "latest" 49 | run: 50 | path: sh 51 | args: 52 | - -exc 53 | - | 54 | env 55 | echo "Hello!!" 56 | # exit 1 # uncomment this line and reconfigure your pipeline to generate an error 57 | 58 | - put: send-email 59 | params: 60 | to: [YOUR-DESTINATION-EMAIL-ADDRESS-GOES-HERE] 61 | subject: ./email-text/email-subject.txt 62 | body: ./email-text/email-body.txt 63 | 64 | on_failure: 65 | put: send-email 66 | params: 67 | to: [YOUR-DESTINATION-EMAIL-ADDRESS-GOES-HERE] 68 | subject: ./email-text/email-subject-failure.txt 69 | body: ./email-text/email-body-failure.txt 70 | -------------------------------------------------------------------------------- /pipelines/notifications/email-with-attachments/ci/pipeline.yml: -------------------------------------------------------------------------------- 1 | --- 2 | resources: 3 | - name: pipeline-scripts 4 | type: git 5 | source: 6 | branch: master 7 | uri: {{git-project-url}} 8 | 9 | jobs: 10 | - name: send-email-with-attachment 11 | serial: true 12 | public: true 13 | plan: 14 | - do: 15 | - get: pipeline-scripts 16 | trigger: false 17 | 18 | - task: prep-email-text 19 | config: 20 | platform: linux 21 | image_resource: 22 | type: docker-image 23 | source: 24 | repository: pivotalservices/concourse-send-email 25 | tag: "latest" 26 | outputs: 27 | - name: email-text 28 | run: 29 | path: sh 30 | args: 31 | - -exc 32 | - | 33 | echo "Hello Concourse user" > ./email-text/email-subject.txt 34 | echo "This email is from my Concourse pipeline. You will find my file attached. Thanks!" > ./email-text/email-body.txt 35 | 36 | - task: send-email-with-attachment 37 | file: pipeline-scripts/email-with-attachments/ci/tasks/send-email.yml 38 | params: 39 | SMTP_HOST: {{smtp-host}} 40 | SMTP_PORT: {{smtp-port}} 41 | SMTP_USERNAME: {{smtp-username}} 42 | SMTP_PASSWORD: {{smtp-password}} 43 | EMAIL_FROM: {{email-from}} 44 | EMAIL_TO: {{email-to}} 45 | EMAIL_SUBJECT_FILE: ./email-text/email-subject.txt 46 | EMAIL_BODY_FILE: ./email-text/email-body.txt 47 | EMAIL_ATTACHMENTS: '[{ "filename": "my-attachment.txt","path": "./pipeline-scripts/email-with-attachments/my-attachment.txt", "contentType":"text/plain"}]' 48 | 49 | on_failure: 50 | task: email-notification-failure 51 | file: pipeline-scripts/email-with-attachments/ci/tasks/send-email.yml 52 | params: 53 | SMTP_HOST: {{smtp-host}} 54 | SMTP_PORT: {{smtp-port}} 55 | SMTP_USERNAME: {{smtp-username}} 56 | SMTP_PASSWORD: {{smtp-password}} 57 | EMAIL_FROM: {{email-from}} 58 | EMAIL_TO: {{email-to}} 59 | EMAIL_SUBJECT_TEXT: Pipeline failure 60 | EMAIL_BODY_TEXT: This is the report of a pipeline failure. See attachment for more info. 61 | EMAIL_ATTACHMENTS: '[{ "filename": "my-attachment.txt","path": "./pipeline-scripts/email-with-attachments/my-attachment.txt", "contentType":"text/plain"}]' 62 | -------------------------------------------------------------------------------- /pipelines/notifications/email-with-attachments/ci/scripts/send-email.js: -------------------------------------------------------------------------------- 1 | var fs = require('fs'); 2 | var nodemailer = require("nodemailer"); 3 | 4 | // create reusable transporter object using the default SMTP transport 5 | var transporter = nodemailer.createTransport('smtps://'+process.env.SMTP_USERNAME+':'+process.env.SMTP_PASSWORD+'@'+process.env.SMTP_HOST+':'+process.env.SMTP_PORT); 6 | 7 | var emailSubject=""; 8 | if (process.env.EMAIL_SUBJECT_TEXT) { 9 | emailSubject=process.env.EMAIL_SUBJECT_TEXT; 10 | } else { 11 | var emailSubjectFile=process.env.EMAIL_SUBJECT_FILE; 12 | emailSubject=fs.readFileSync(emailSubjectFile, 'utf8'); 13 | } 14 | 15 | var emailText=""; 16 | if (process.env.EMAIL_BODY_TEXT) { 17 | emailText=process.env.EMAIL_BODY_TEXT; 18 | } else { 19 | var emailTextFile=process.env.EMAIL_BODY_FILE; 20 | emailText=fs.readFileSync(emailTextFile, 'utf8'); 21 | } 22 | 23 | // setup e-mail data with unicode symbols 24 | var mailOptions = { 25 | from: process.env.EMAIL_FROM, 26 | to: process.env.EMAIL_TO, // receiver 27 | subject: emailSubject, // subject 28 | text: emailText, // body 29 | html: '

'+emailText+'

' // html body 30 | }; 31 | if (process.env.EMAIL_ATTACHMENTS) { 32 | mailOptions.attachments=JSON.parse(process.env.EMAIL_ATTACHMENTS); 33 | // attachment object must be json array with the following format: 34 | // '[{ "filename": "email-text-success.txt","path": "./email-text/email-text-success.txt", "contentType":"text/plain"}]' 35 | } 36 | 37 | // send mail with defined transport object 38 | transporter.sendMail(mailOptions, function(error, info){ 39 | if(error){ 40 | return console.log(error); 41 | } 42 | // console.log('Message sent: ' + info.response); 43 | }); 44 | -------------------------------------------------------------------------------- /pipelines/notifications/email-with-attachments/ci/tasks/send-email.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: pivotalservices/concourse-send-email 8 | tag: "latest" 9 | 10 | inputs: 11 | - name: pipeline-scripts 12 | - name: email-text 13 | 14 | run: 15 | path: sh 16 | args: 17 | - -exc 18 | - | 19 | mv /app/node_modules . 20 | node pipeline-scripts/email-with-attachments/ci/scripts/send-email.js 21 | -------------------------------------------------------------------------------- /pipelines/notifications/email-with-attachments/my-attachment.txt: -------------------------------------------------------------------------------- 1 | This is a sample text file to be attached to an email. 2 | Cheers! 3 | -------------------------------------------------------------------------------- /pipelines/pcf/certificates/monitor-expiring-certificates/README.md: -------------------------------------------------------------------------------- 1 | Concourse  Certs 2 | 3 | # Monitor Expiring PCF Certificates 4 | 5 | This sample pipeline checks for expiring certificates of a PCF deployment. 6 | 7 | It gets automatically triggered on a regular basis by a time resource to check for the list of certificates about to expire from the corresponding PCF Ops Manager. 8 | 9 | The pipeline monitors five types of PCF certificates: 10 | - [Configurable Certificates](https://docs.pivotal.io/pivotalcf/security/pcf-infrastructure/api-cert-rotation.html#rotate-config) 11 | - [Non-Configurable Certificates](https://docs.pivotal.io/pivotalcf/security/pcf-infrastructure/api-cert-rotation.html#rotate-non-config) 12 | - [CA Certificates](https://docs.pivotal.io/pivotalcf/security/pcf-infrastructure/api-cert-rotation.html#rotate-ca) 13 | - [Bosh Director Trusted Certificates](https://docs.pivotal.io/pivotalcf/customizing/trusted-certificates.html) 14 | - [Ops Manager Root Certificates](https://docs.pivotal.io/pivotalcf/security/pcf-infrastructure/api-cert-rotation.html#rotate-root) 15 | 16 | If a certificate is about to expire within the given `EXPIRATION_TIME_FRAME` pipeline parameter, then the pipeline will throw an error and send out a notification (e.g. email) to the configured recipients. 17 | 18 | --- 19 | 20 | ## How to use this pipeline 21 | 22 | 1) Update [`pcf_params.yml`](pcf_params.yml) by following the instructions in the file. 23 | 24 | This parameter file contains information about the PCF foundation's Ops Manager and Director required to obtain information about its certificates. 25 | 26 | 2) If automatic email notification is desired, update the corresponding parameters for resource `send-an-email` (e.g. smtp_host, credentials) in the `pipeline.yml` file. 27 | 28 | 3) Adjust how often the `time-trigger` resource should trigger the pipeline execution by updating its `interval` parameter in `pipeline.yml` to the desired time interval. 29 | 30 | 4) Adjust parameter `EXPIRATION_TIME_FRAME` in `pipeline.yml` to the desired time frame to check for about-to-expire certificates (e.g. within the next 3 months=`3m`) 31 | 32 | 5) Create the pipeline in Concourse: 33 | 34 | `fly -t set-pipeline -p monitor-certificates -c pipeline.yml -l pcf_params.yml` 35 | 36 | 6) Un-pause and run pipeline `monitor-certificates` 37 | 38 | --- 39 | -------------------------------------------------------------------------------- /pipelines/pcf/certificates/monitor-expiring-certificates/pcf_params.yml: -------------------------------------------------------------------------------- 1 | ######## PCF Foundation-specific parameters 2 | 3 | # Ops Manager information and admin credentials 4 | opsman_domain: ((opsman_domain)) 5 | opsman_admin_username: ((opsman_admin_username)) # Username for Ops Manager admin account 6 | opsman_admin_password: ((opsman_admin_password)) # Password for Ops Manager admin account 7 | # Either opsman_client_id/opsman_client_secret or opsman_admin_username/opsman_admin_password needs to be specified. 8 | # If you are using opsman_admin_username/opsman_admin_password, edit opsman_client_id/opsman_client_secret to be an empty value. 9 | # If you are using opsman_client_id/opsman_client_secret, edit opsman_admin_username/opsman_admin_password to be an empty value. 10 | opsman_client_id: ((opsman_client_id)) 11 | opsman_client_secret: ((opsman_client_secret)) 12 | -------------------------------------------------------------------------------- /pipelines/pcf/certificates/rotate-internal-certificates/README.md: -------------------------------------------------------------------------------- 1 | 22 | -------------------------------------------------------------------------------- /pipelines/pcf/certificates/rotate-internal-certificates/pcf_params.yml: -------------------------------------------------------------------------------- 1 | ######## PCF Foundation-specific parameters 2 | 3 | # Ops Manager information and admin credentials 4 | opsman_domain: ((opsman_domain)) 5 | opsman_admin_username: ((opsman_admin_username)) # Username for Ops Manager admin account 6 | opsman_admin_password: ((opsman_admin_password)) # Password for Ops Manager admin account 7 | # Either opsman_client_id/opsman_client_secret or opsman_admin_username/opsman_admin_password needs to be specified. 8 | # If you are using opsman_admin_username/opsman_admin_password, edit opsman_client_id/opsman_client_secret to be an empty value. 9 | # If you are using opsman_client_id/opsman_client_secret, edit opsman_admin_username/opsman_admin_password to be an empty value. 10 | opsman_client_id: ((opsman_client_id)) 11 | opsman_client_secret: ((opsman_client_secret)) 12 | -------------------------------------------------------------------------------- /pipelines/pcf/certificates/rotate-internal-certificates/pipeline.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # PCF Documentation: https://docs.pivotal.io/pivotalcf/security/pcf-infrastructure/api-cert-rotation.html 4 | # Note: use with caution (check for existing bugs in Ops Manager for certs rotation) or 5 | # just use it as a template to start a pipeline that automates the complete set of steps in the docs (and not just rotate certs api endpoint call) 6 | # 7 | resources: 8 | - name: pcf-pipelines-utils 9 | type: git 10 | source: 11 | uri: https://github.com/pivotalservices/concourse-pipeline-samples.git 12 | 13 | jobs: 14 | - name: Rotate-Non-Configurable-Certificates 15 | plan: 16 | - get: pcf-pipelines-utils 17 | - task: rotate-non-configurable-certificates 18 | file: pcf-pipelines-utils/tasks/pcf/certificates/regenerate-internal-certificates/task.yml 19 | params: 20 | OPSMAN_DOMAIN_OR_IP_ADDRESS: {{opsman_domain}} 21 | OPSMAN_USERNAME: {{opsman_admin_username}} 22 | OPSMAN_PASSWORD: {{opsman_admin_password}} 23 | OPSMAN_CLIENT_ID: {{opsman_client_id}} 24 | OPSMAN_CLIENT_SECRET: {{opsman_client_secret}} 25 | - task: apply-changes 26 | file: pcf-pipelines-utils/tasks/pcf/apply-changes/task.yml 27 | params: 28 | OPSMAN_CLIENT_ID: {{opsman_client_id}} 29 | OPSMAN_CLIENT_SECRET: {{opsman_client_secret}} 30 | OPSMAN_DOMAIN_OR_IP_ADDRESS: {{opsman_domain}} 31 | OPSMAN_PASSWORD: {{opsman_admin_password}} 32 | OPSMAN_USERNAME: {{opsman_admin_username}} 33 | -------------------------------------------------------------------------------- /pipelines/pcf/install-opsmgr/vsphere/README.md: -------------------------------------------------------------------------------- 1 | # Install Ops Manager pipeline 2 | 3 | **Experimental - Work in progress** 4 | 5 | This pipeline install Ops Manager and the Director tile. 6 | 7 | 8 | ## How to use this pipeline 9 | 10 | 1) Update [`pcf_params.yml`](pcf_params.yml) by following the instructions in the file. 11 | 12 | This parameter file contains information about the PCF foundation (e.g. Ops Manager and Director) to which the tile will be deployed to. 13 | 14 | 2) Update [`global_params.yml`](global_params.yml) by following the instructions in the file. 15 | 16 | This parameter file contains information about global properties that typically apply to any PCF pipeline (e.g. Pivotal Network token). 17 | 18 | This parameters file is separate from the others for reuse purposes, since any other PCF tile install or upgrade pipeline will use the same properties. If you already have this type of file created for another PCF tile pipeline, you can reuse it here. 19 | 20 | 3) Create the pipeline in Concourse: 21 | 22 | `fly -t sp -p install-opsmgr -c pipeline.yml -l global_params.yml -l pcf_params.yml` 23 | 24 | 25 | 4) Un-pause and run pipeline `install-opsmgr` 26 | -------------------------------------------------------------------------------- /pipelines/pcf/install-opsmgr/vsphere/global_params.yml: -------------------------------------------------------------------------------- 1 | ######## Generic global parameters 2 | ######## typically apply to all foundations and all tiles 3 | 4 | # Pivotal Network token for tile release download and pcf-pipelines download 5 | pivnet_token: ((pivnet_token)) 6 | -------------------------------------------------------------------------------- /pipelines/pcf/pks/configure-ingress-kubo/README.md: -------------------------------------------------------------------------------- 1 | # Configure ingress-kubo load balancer to PKS cluster 2 | 3 | This sample pipeline implements jobs to configure the [`ingress-kubo-poc`](https://github.com/datianshi/ingress-kubo-poc) services load balancer to an existing PKS cluster. 4 | 5 | ## How to use this pipeline 6 | 7 | 1) If you have CredHub or Vault integrated with Concourse, then update [`pks_configure_ingress_kubo_params.sh`](pks_configure_ingress_kubo_params.sh) with the required PKS credentials and then run the script to create all required secrets in your credentials management software. 8 | Otherwise, update [params.yml](params.yml) with the all required parameters. 9 | 10 | 2) Create the pipeline in Concourse: 11 | 12 | `fly -t sp -p configure-ingress-kubo -c pipeline.yml -l params.yml` 13 | 14 | 3) Un-pause and run pipeline `configure-ingress-kubo` by manually triggering job `install-ingress-kubo` 15 | 16 | 17 | ## For vSphere with NSX-V environments 18 | 19 | A job to configure NSV-V Load Balancer with rules for the PKS cluster master node is provided under group tab "vSphere NSX-V LB setup". 20 | 21 | For that job to work correctly, appropriately fill out all vSphere- and NSXV-related parameters in the parameters file (or in `pks_configure_ingress_kubo_params.sh` script for Concourse servers integrated with credential management systems). 22 | 23 | After the PKS cluster gets created successfully, run job `configure-vsphere-nxsv-lb-ingress-kubo` to configure the NSX-V load balancer. 24 | -------------------------------------------------------------------------------- /pipelines/pcf/pks/configure-ingress-kubo/params.yml: -------------------------------------------------------------------------------- 1 | # PKS API url: e.g. api.pks.domain.com 2 | pcf_pks_api: ((pcf_pks_api)) 3 | # PKS service domain: e.g. pks.domain.com 4 | pcf_pks_domain: ((pcf_pks_domain)) 5 | # PKS CLI admin credentials 6 | pks_cli_username: ((pks_cli_username)) 7 | pks_cli_password: ((pks_cli_password)) 8 | # PKS cluster name 9 | pks_cluster_name: ((pks_cluster_name)) 10 | # URL of master node of PKS cluster 11 | pks_cluster_master_node_hostname: ((pks_cluster_master_node_hostname)) 12 | 13 | ## optional - for vSphere NXS-V load balancer config only 14 | vcenter_host: ((vcenter_host)) 15 | vcenter_usr: ((vcenter_usr)) 16 | vcenter_pwd: ((vcenter_pwd)) 17 | vcenter_datacenter: ((vcenter_datacenter)) 18 | vcenter_datastore: ((vcenter_datastore)) 19 | nsxv_manager_address: ((nsxv_manager_address)) 20 | nsxv_manager_admin_username: ((nsxv_manager_admin_username)) 21 | nsxv_manager_admin_password: ((nsxv_manager_admin_password)) 22 | nsxv_gen_mgr_transport_zone: ((nsxv_gen_mgr_transport_zone)) 23 | nsxv_gen_edge_name: ((nsxv_gen_edge_name)) 24 | nsxv_gen_edge_cluster: ((nsxv_gen_edge_cluster)) 25 | nsxv_gen_vip_ip: ((nsxv_gen_vip_ip)) 26 | -------------------------------------------------------------------------------- /pipelines/pcf/pks/configure-ingress-kubo/pks_configure_ingress_kubo_params.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # set the path for the secrets below to be created in vault or credhub 4 | export concourse_root_secrets_path="/concourse" 5 | export concourse_team_name="team-name" 6 | export concourse_pipeline_name="configure-ingress-kubo" 7 | 8 | # VAULT or CREDHUB - targeted secrets management system 9 | export targeted_system="VAULT" 10 | # This script assumes that: 11 | # 1) the credhub or vault CLI is installed 12 | # 2) you setup your vault or credhub target and login commands prior to invoking it 13 | # e.g. for VAULT 14 | # export VAULT_ADDR=https://myvaultdomain:8200 15 | # export VAULT_SKIP_VERIFY=true 16 | # export VAULT_TOKEN=vault-token 17 | # 18 | # e.g. for CREDHUB 19 | # credhub login -s credhub-server-uri -u username -p password --skip-tls-validation 20 | 21 | ## 22 | ## TEAM level secrets (shared by all pipelines in that team) 23 | ## 24 | export team_secrets=( 25 | ) 26 | 27 | ## 28 | ## PIPELINE LEVEL secrets (specific to the pipeline) 29 | ## 30 | export pipeline_secrets=( 31 | "pcf_pks_domain"::"pks.domain.com" 32 | "pcf_pks_api"::"api.pks.domain.com" 33 | # username for PKS CLI username creation 34 | "pks_cli_username"::"pksadmin" 35 | # password for PKS CLI username creation 36 | "pks_cli_password"::"mypassword" 37 | "pks_cluster_name"::"cluster1" 38 | "pks_cluster_master_node_hostname"::"cluster1.pks.domain.com" 39 | 40 | ## optional - for vSphere NSX-V load balancer config only 41 | # vcenter hostname, do not include protocol information 42 | "vcenter_host"::"vcenter.domain.com" 43 | # vcenter credentials and properties 44 | "vcenter_usr"::"myvcenteruser@vsphere.local" 45 | "vcenter_pwd"::"myvcenterpassword" 46 | "vcenter_datacenter"::"Datacenter" 47 | "vcenter_datastore"::"mydatastore" 48 | 49 | "nsxv_manager_address"::"mynsxv.domain.com" 50 | "nsxv_manager_admin_username"::"admin" 51 | "nsxv_manager_admin_password"::"password" 52 | "nsxv_gen_edge_name"::"nsxv_gen_edge_name" 53 | "nsxv_gen_edge_cluster"::"Cluster-A" 54 | "nsxv_gen_mgr_transport_zone"::"nsxv_gen_mgr_transport_zone" 55 | "nsxv_gen_vip_ip"::"10.10.10.10" 56 | ) 57 | 58 | main () { 59 | 60 | # team level secrets 61 | concourse_team_level_secrets_path="${concourse_root_secrets_path}/${concourse_team_name}" 62 | writeCredentials "${concourse_team_level_secrets_path}" "${team_secrets[*]}" 63 | 64 | # pipeline level secrets 65 | concourse_pipeline_level_secrets_path="${concourse_team_level_secrets_path}/${concourse_pipeline_name}" 66 | writeCredentials "${concourse_pipeline_level_secrets_path}" "${pipeline_secrets[*]}" 67 | 68 | } 69 | 70 | writeCredentials () { 71 | secretsPath=${1} 72 | secretsObject=(${2}) 73 | 74 | for i in "${secretsObject[@]}" 75 | do 76 | KEY="${i%%::*}" 77 | VALUE="${i##*::}" 78 | echo "Creating secret for [$KEY]" 79 | if [[ $targeted_system == "VAULT" ]]; then 80 | vault write "${secretsPath}/${KEY}" value="${VALUE}" 81 | else # CREDHUB 82 | credhub set -n "${secretsPath}/${KEY}" -v "${VALUE}" 83 | fi 84 | done 85 | } 86 | 87 | main 88 | -------------------------------------------------------------------------------- /pipelines/pcf/pks/configure-pks-cluster/README.md: -------------------------------------------------------------------------------- 1 | # Create a PKS cluster 2 | 3 | This sample pipeline implements jobs to create and delete a PKS cluster on a PCF+PKS environment using the `pks` CLI. 4 | 5 | ## How to use this pipeline 6 | 7 | 1) If you have CredHub or Vault integrated with Concourse, then update [`pks_create_cluster_params.sh`](pks_create_cluster_params.sh) with the required PKS credentials and then run the script to create all required secrets in your credentials management software. 8 | Otherwise, update [params.yml](params.yml) with the all required parameters. 9 | 10 | 2) Create the pipeline in Concourse: 11 | 12 | `fly -t sp -p create-pks-cluster -c pipeline.yml -l params.yml` 13 | 14 | 3) Un-pause and run pipeline `create-pks-cluster` by manually triggering job `create-pks-cluster` 15 | 16 | 17 | ## For vSphere with NSX-V environments 18 | 19 | A job to configure NSV-V Load Balancer with rules for the PKS cluster master node is provided under group tab "vSphere NSX-V LB setup". 20 | 21 | For that job to work correctly, appropriately fill out all vSphere- and NSXV-related parameters in the parameters file (or in `pks_create_cluster_params.sh` script for Concourse servers integrated with credential management systems). 22 | 23 | After the PKS cluster gets created successfully, run job `configure-lb-pks-cluster-master-node` to configure the NSX-V load balancer. 24 | -------------------------------------------------------------------------------- /pipelines/pcf/pks/configure-pks-cluster/params.yml: -------------------------------------------------------------------------------- 1 | pcf_pks_api: ((pcf_pks_api)) 2 | pks_cli_username: ((pks_cli_username)) 3 | pks_cli_password: ((pks_cli_password)) 4 | pks_plan_name: ((pks_plan_name)) 5 | pks_cluster_name: ((pks_cluster_name)) 6 | pks_cluster_master_node_hostname: ((pks_cluster_master_node_hostname)) 7 | pks_cluster_number_of_workers: ((pks_cluster_number_of_workers)) 8 | 9 | ## optional - for vSphere NXS-V load balancer config only 10 | vcenter_host: ((vcenter_host)) 11 | vcenter_usr: ((vcenter_usr)) 12 | vcenter_pwd: ((vcenter_pwd)) 13 | vcenter_datacenter: ((vcenter_datacenter)) 14 | nsxv_manager_address: ((nsxv_manager_address)) 15 | nsxv_manager_admin_username: ((nsxv_manager_admin_username)) 16 | nsxv_manager_admin_password: ((nsxv_manager_admin_password)) 17 | nsxv_gen_mgr_transport_zone: ((nsxv_gen_mgr_transport_zone)) 18 | nsxv_gen_edge_cluster: ((nsxv_gen_edge_cluster)) 19 | vcenter_datastore: ((vcenter_datastore)) 20 | nsxv_gen_edge_name: ((nsxv_gen_edge_name)) 21 | nsxv_gen_vip_ip: ((nsxv_gen_vip_ip)) 22 | -------------------------------------------------------------------------------- /pipelines/pcf/pks/configure-pks-cluster/pks_create_cluster_params.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # set the path for the secrets below to be created in vault or credhub 4 | export concourse_root_secrets_path="/concourse" 5 | export concourse_team_name="team-name" 6 | export concourse_pipeline_name="create-pks-cluster" 7 | 8 | # VAULT or CREDHUB - targeted secrets management system 9 | export targeted_system="VAULT" 10 | # This script assumes that: 11 | # 1) the credhub or vault CLI is installed 12 | # 2) you setup your vault or credhub target and login commands prior to invoking it 13 | # e.g. for VAULT 14 | # export VAULT_ADDR=https://myvaultdomain:8200 15 | # export VAULT_SKIP_VERIFY=true 16 | # export VAULT_TOKEN=vault-token 17 | # 18 | # e.g. for CREDHUB 19 | # credhub login -s credhub-server-uri -u username -p password --skip-tls-validation 20 | 21 | ## 22 | ## TEAM level secrets (shared by all pipelines in that team) 23 | ## 24 | export team_secrets=( 25 | ) 26 | 27 | ## 28 | ## PIPELINE LEVEL secrets (specific to the pipeline) 29 | ## 30 | export pipeline_secrets=( 31 | "pcf_pks_domain"::"pks.domain.com" 32 | "pcf_pks_api"::"api.pks.domain.com" 33 | # username for PKS CLI username creation 34 | "pks_cli_username"::"pksadmin" 35 | # password for PKS CLI username creation 36 | "pks_cli_password"::"mypassword" 37 | "pks_plan_name"::"small" 38 | "pks_cluster_name"::"cluster1" 39 | "pks_cluster_master_node_hostname"::"cluster1.pks.domain.com" 40 | "pks_cluster_number_of_workers"::"3" 41 | 42 | ## optional - for vSphere NSX-V load balancer config only 43 | # vcenter hostname, do not include protocol information 44 | "vcenter_host"::"vcenter.domain.com" 45 | # vcenter credentials and properties 46 | "vcenter_usr"::"myvcenteruser@vsphere.local" 47 | "vcenter_pwd"::"myvcenterpassword" 48 | "vcenter_datacenter"::"Datacenter" 49 | "vcenter_datastore"::"mydatastore" 50 | 51 | "nsxv_manager_address"::"mynsxv.domain.com" 52 | "nsxv_manager_admin_username"::"admin" 53 | "nsxv_manager_admin_password"::"password" 54 | "nsxv_gen_edge_name"::"nsxv_gen_edge_name" 55 | "nsxv_gen_edge_cluster"::"Cluster-A" 56 | "nsxv_gen_mgr_transport_zone"::"nsxv_gen_mgr_transport_zone" 57 | 58 | ) 59 | 60 | main () { 61 | 62 | # team level secrets 63 | concourse_team_level_secrets_path="${concourse_root_secrets_path}/${concourse_team_name}" 64 | writeCredentials "${concourse_team_level_secrets_path}" "${team_secrets[*]}" 65 | 66 | # pipeline level secrets 67 | concourse_pipeline_level_secrets_path="${concourse_team_level_secrets_path}/${concourse_pipeline_name}" 68 | writeCredentials "${concourse_pipeline_level_secrets_path}" "${pipeline_secrets[*]}" 69 | 70 | } 71 | 72 | writeCredentials () { 73 | secretsPath=${1} 74 | secretsObject=(${2}) 75 | 76 | for i in "${secretsObject[@]}" 77 | do 78 | KEY="${i%%::*}" 79 | VALUE="${i##*::}" 80 | echo "Creating secret for [$KEY]" 81 | if [[ $targeted_system == "VAULT" ]]; then 82 | vault write "${secretsPath}/${KEY}" value="${VALUE}" 83 | else # CREDHUB 84 | credhub set -n "${secretsPath}/${KEY}" -v "${VALUE}" 85 | fi 86 | done 87 | } 88 | 89 | main 90 | -------------------------------------------------------------------------------- /pipelines/pcf/pks/install-pks/global_params.yml: -------------------------------------------------------------------------------- 1 | ######## Generic global parameters 2 | ######## typically apply to all foundations and all tiles 3 | 4 | # Pivotal Network token for tile release download and pcf-pipelines download 5 | pivnet_token: ((pivnet_token)) 6 | -------------------------------------------------------------------------------- /pipelines/pcf/pks/install-pks/pcf_params.yml: -------------------------------------------------------------------------------- 1 | ######## PCF Foundation-specific parameters 2 | 3 | # The IaaS name for which stemcell to download. This must match the IaaS name 4 | # within the stemcell to download, e.g. "vsphere", "aws", "azure", "google" must be lowercase. 5 | iaas_type: ((iaas_type)) 6 | 7 | # Ops Manager information and admin credentials 8 | opsman_domain: ((opsman_domain)) 9 | opsman_admin_username: ((opsman_admin_username)) # Username for Ops Manager admin account 10 | opsman_admin_password: ((opsman_admin_password)) # Password for Ops Manager admin account 11 | # Either opsman_client_id/opsman_client_secret or opsman_admin_username/opsman_admin_password needs to be specified. 12 | # If you are using opsman_admin_username/opsman_admin_password, edit opsman_client_id/opsman_client_secret to be an empty value. 13 | # If you are using opsman_client_id/opsman_client_secret, edit opsman_admin_username/opsman_admin_password to be an empty value. 14 | opsman_client_id: ((opsman_client_id)) 15 | opsman_client_secret: ((opsman_client_secret)) 16 | 17 | az_1_name: ((az_1_name)) 18 | az_2_name: ((az_2_name)) 19 | az_3_name: ((az_3_name)) 20 | services_network_name: ((services_network_name)) 21 | dynamic_services_network_name: ((dynamic_services_network_name)) 22 | bosh_vm_folder: ((bosh_vm_folder)) # The name should be the same as the VM Folder in the Ops Manager Director tile, under the vCenter config page. 23 | 24 | # pcf-pipelines legacy parameters - only used for when no_proxy option is used 25 | company_proxy_domain: 26 | opsman_ip_address: 27 | -------------------------------------------------------------------------------- /pipelines/pcf/pks/vsphere/configure-pks-api-lb/README.md: -------------------------------------------------------------------------------- 1 | # Configure NSX-V Load Balancer for PKS API endpoints 2 | 3 | This sample pipeline configures an NSX-V Load Balancer for PKS API and UAA endpoints after the PKS tile has been successfully deployed to vSphere with NSX-V. 4 | 5 | It requires a reserved IP address to be assigned as a VIP/Virtual Server for the UAA and API endpoints (parameter `nsxv_gen_vip_ip`). 6 | 7 | ## How to use this pipeline 8 | 9 | 1) If you have CredHub or Vault integrated with Concourse, then update [`pks_api_nsxv_lb_params.sh`](pks_api_nsxv_lb_params.sh) with the required credentials and then run the script to create all required secrets in your credentials management software. 10 | Otherwise, update [params.yml](params.yml) with the all required parameters. 11 | 12 | 2) Create the pipeline in Concourse: 13 | 14 | `fly -t sp -p pks-api-config-nsxv -c pipeline.yml` 15 | 16 | 3) Un-pause and run pipeline `pks-api-config-nsxv` by manually triggering job `configure-lb-pks-application-profile` 17 | -------------------------------------------------------------------------------- /pipelines/pcf/pks/vsphere/configure-pks-api-lb/params.yml: -------------------------------------------------------------------------------- 1 | pcf_pks_api: ((pcf_pks_api)) 2 | 3 | opsman_domain: ((opsman_domain)) 4 | opsman_admin_username: ((opsman_admin_username)) 5 | opsman_admin_password: ((opsman_admin_password)) 6 | opsman_client_id: ((opsman_client_id)) 7 | opsman_client_secret: ((opsman_client_secret)) 8 | 9 | vcenter_host: ((vcenter_host)) 10 | vcenter_username: ((vcenter_usr)) 11 | vcenter_password: ((vcenter_pwd)) 12 | vcenter_datacenter: ((vcenter_datacenter)) 13 | nsxv_manager_address: ((nsxv_manager_address)) 14 | nsxv_manager_admin_username: ((nsxv_manager_admin_username)) 15 | nsxv_manager_admin_password: ((nsxv_manager_admin_password)) 16 | nsxv_gen_mgr_transport_zone: ((nsxv_gen_mgr_transport_zone)) 17 | nsxv_gen_edge_cluster: ((nsxv_gen_edge_cluster)) 18 | vcenter_datastore: ((vcenter_datastore)) 19 | nsxv_gen_edge_name: ((nsxv_gen_edge_name)) 20 | nsxv_gen_vip_ip: ((nsxv_gen_vip_ip)) 21 | pks_api_cert_cn: ((pks_api_cert_cn)) 22 | -------------------------------------------------------------------------------- /pipelines/pcf/pks/vsphere/nsxt/README.md: -------------------------------------------------------------------------------- 1 | # Configure NSX-T Components for use with PKS 2 | This sample pipeline will setup switches, routers, an IP block, and an IP pool to be used by PKS. 3 | 4 | It requires routable IP addresses from your IaaS provider for the external IP pool, the Tier-0 router IP and the T0 uplink IP. 5 | 6 | ## How to use this pipeline 7 | 8 | 1) If you have CredHub or Vault integrated with Concourse, then update [`nsxt_params.sh`](nsxt_params.sh) with the required credentials and parameters and then run the script to create all required secrets in your credentials management software. 9 | Otherwise, update [params.yml](params.yml) with the all required parameters. 10 | 11 | 2) Create the pipeline in Concourse: 12 | 13 | `fly -t sp -p pks-nsxt-config -c pipeline.yml -l params.yml` 14 | 15 | 3) Un-pause and run pipeline `pks-nsxt-config` by manually triggering job `create-logical-switches` 16 | -------------------------------------------------------------------------------- /pipelines/pcf/pks/vsphere/nsxt/params.yml: -------------------------------------------------------------------------------- 1 | # NSX Manager Params 2 | nsx_manager_address: ((nsx_manager_address)) 3 | nsx_manager_username: ((nsx_manager_username)) 4 | nsx_manager_password: ((nsx_manager_password)) 5 | 6 | #Unique Name for this PCF install 7 | pcf_foundation_name: ((pcf_foundation_name)) 8 | 9 | # Names of NSX Components. Used to connect switches and routers to already established NSX Components 10 | vlan_transport_zone: ((vlan_transport_zone)) 11 | overlay_transport_zone: ((overlay_transport_zone)) 12 | edge_cluster_name: ((edge_cluster_name)) 13 | 14 | # T0 router IP and mask 15 | t0_router_ip: ((t0_router_ip)) 16 | t0_router_ip_mask: ((t0_router_ip_mask)) 17 | 18 | # Static route where T0 router should send all traffic back to IaaS 19 | t0_next_hop_ip: ((t0_next_hop_ip)) 20 | 21 | #Params for DNAT and SNAT rules created on the T) router 22 | ops_mgr_dnat_ip: ((ops_mgr_dnat_ip)) 23 | infrastructure_network_snat_ip: ((infrastructure_network_snat_ip)) 24 | 25 | #Params to define a pool of IPs that will be used for dynamically created Organizations 26 | external_nat_ip_pool_cidr: ((external_nat_ip_pool_cidr)) 27 | external_nat_ip_pool_start_ip: ((external_nat_ip_pool_start_ip)) 28 | external_nat_ip_pool_end_ip: ((external_nat_ip_pool_end_ip)) 29 | 30 | vlan_uplink_switch_name: ((vlan_uplink_switch_name)) 31 | infrastructure_switch_name: ((infrastructure_switch_name)) 32 | services_switch_name: ((services_switch_name)) 33 | 34 | t0_router_name: ((t0_router_name)) 35 | infrastructure_router_name: ((infrastructure_router_name)) 36 | services_router_name: ((services_router_name)) 37 | -------------------------------------------------------------------------------- /pipelines/pcf/vsphere/nsxt/README.md: -------------------------------------------------------------------------------- 1 | # Configure NSX-T Components for use with PAS 2 | This sample pipeline will setup switches, routers, an IP block, and an IP pool to be used by PAS. 3 | 4 | It requires routable IP addresses from your IaaS provider for the external IP pool, the Tier-0 router IP and the T0 uplink IP. 5 | 6 | ## How to use this pipeline 7 | 8 | 1) If you have CredHub or Vault integrated with Concourse, then update [`nsxt_params.sh`](nsxt_params.sh) with the required credentials and parameters and then run the script to create all required secrets in your credentials management software. 9 | Otherwise, update [params.yml](params.yml) with the all required parameters. 10 | 11 | 2) Create the pipeline in Concourse: 12 | 13 | `fly -t sp -p pcf-nsxt-config -c pipeline.yml -l params.yml` 14 | 15 | 3) Un-pause and run pipeline `pcf-nsxt-config` by manually triggering job `create-logical-switches` 16 | -------------------------------------------------------------------------------- /pipelines/pcf/vsphere/nsxt/params.yml: -------------------------------------------------------------------------------- 1 | # NSX Manager Params 2 | nsx_manager_address: ((nsx_manager_address)) 3 | nsx_manager_username: ((nsx_manager_username)) 4 | nsx_manager_password: ((nsx_manager_password)) 5 | 6 | #Unique Name for this PCF install 7 | pcf_foundation_name: ((pcf_foundation_name)) 8 | 9 | # Names of NSX Components. Used to connect switches and routers to already established NSX Components 10 | vlan_transport_zone: ((vlan_transport_zone)) 11 | overlay_transport_zone: ((overlay_transport_zone)) 12 | edge_cluster_name: ((edge_cluster_name)) 13 | 14 | # T0 router IP and mask 15 | t0_router_ip: ((t0_router_ip)) 16 | t0_router_ip_mask: ((t0_router_ip_mask)) 17 | 18 | # Static route where T0 router should send all traffic back to IaaS 19 | t0_next_hop_ip: ((t0_next_hop_ip)) 20 | 21 | #Params for DNAT and SNAT rules created on the T) router 22 | ops_mgr_dnat_ip: ((ops_mgr_dnat_ip)) 23 | infrastructure_network_snat_ip: ((infrastructure_network_snat_ip)) 24 | 25 | #Params to define a pool of IPs that will be used for dynamically created Organizations 26 | external_nat_ip_pool_cidr: ((external_nat_ip_pool_cidr)) 27 | external_nat_ip_pool_start_ip: ((external_nat_ip_pool_start_ip)) 28 | external_nat_ip_pool_end_ip: ((external_nat_ip_pool_end_ip)) 29 | 30 | vlan_uplink_switch_name: ((vlan_uplink_switch_name)) 31 | infrastructure_switch_name: ((infrastructure_switch_name)) 32 | deployment_switch_name: ((deployment_switch_name)) 33 | services_switch_name: ((services_switch_name)) 34 | dynamic_services_switch_name: ((dynamic_services_switch_name)) 35 | 36 | t0_router_name: ((t0_router_name)) 37 | infrastructure_router_name: ((infrastructure_router_name)) 38 | deployment_router_name: ((deployment_router_name)) 39 | services_router_name: ((services_router_name)) 40 | dynamic_services_router_name: ((dynamic_services_router_name)) 41 | -------------------------------------------------------------------------------- /pipelines/pcf/vsphere/nsxv/README.md: -------------------------------------------------------------------------------- 1 | # Configure NSX-V Components for use with PAS 2 | This sample pipeline will setup switches and an ESG ready to be used with PAS. 3 | 4 | ## How to use this pipeline 5 | 6 | 1) If you have CredHub or Vault integrated with Concourse, then update [`nsxv_valut_params.sh`](nsxv_vault_params.sh) with the required credentials and parameters and then run the script to create all required secrets in your credentials management software. 7 | Otherwise, update [params.yml](params.yml) with the all required parameters. 8 | 9 | 2) Create the pipeline in Concourse: 10 | 11 | `fly -t sp -p pcf-nsxv-config -c pipeline.yml -l params.yml` 12 | 13 | 3) Un-pause and run pipeline `pcf-nsxv-config` by manually triggering job `create-edge` 14 | -------------------------------------------------------------------------------- /pipelines/pcf/vsphere/nsxv/nsxv_vault_params.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # set the path for the secrets below to be created in vault or credhub 4 | concourse_secrets_path="/concourse/team-name/pipeline-name" 5 | 6 | # VAULT or CREDHUB - targeted secrets management system 7 | targeted_system="VAULT" 8 | 9 | # This script assumes that: 10 | # 1) the credhub or vault CLI is installed 11 | # 2) you setup your vault or credhub target and login commands prior to invoking it 12 | # e.g. for VAULT 13 | # export VAULT_ADDR=https://myvaultdomain:8200 14 | # export VAULT_SKIP_VERIFY=true 15 | # export VAULT_TOKEN=vault-token 16 | # 17 | # e.g. for CREDHUB 18 | # credhub login -s credhub-server-uri -u username -p password --skip-tls-validation 19 | 20 | ## UPDATE the secret entries below with the corresponding values for your PCF PKS environment 21 | 22 | secrets=( 23 | 24 | ## for vSPHERE deployments - ignore or remove entries if not applicable 25 | # vcenter credentials and properties 26 | "vcenter_usr"::"myvcenteruser@vsphere.local" 27 | "vcenter_pwd"::"myvcenterpassword" 28 | # vcenter hostname, do not include protocol information 29 | "vcenter_host"::"vcenter.domain.com" 30 | "vcenter_datacenter"::"Datacenter" 31 | "vcenter_datastore"::"mydatastore" 32 | 33 | "nsx_owner_name"::"owner_org_name" 34 | "nsxv_manager_address"::"mynsxv.domain.com" 35 | "nsxv_manager_admin_username"::"admin" 36 | "nsxv_manager_admin_password"::"password" 37 | "nsxv_gen_edge_name"::"nsxv_gen_edge_name" 38 | "nsxv_gen_edge_cluster"::"A-RPNSX" 39 | "nsxv_gen_mgr_transport_zone"::"tz-01" 40 | 41 | "num_logical_switches"::"5" 42 | "esg_default_uplink_ip_1"::"10.10.10.10" 43 | "esg_default_uplink_secondary_ips"::"10.10.10.11,10.10.10.12,10.10.10.13,10.10.10.14,10.10.10.15,10.10.10.16,10.10.10.17" 44 | "esg_default_uplink_pg_1"::"default_uplink_port_group_id" 45 | "esg_go_router_uplink_ip_1"::"10.10.10.13" 46 | "esg_snat_uplink_ip_1"::"10.10.10.12" 47 | "esg_opsmgr_uplink_ip_1"::"10.10.10.11" 48 | "nsx_edge_gen_nsx_manager_transport_zone_clusters"::"Cluster-A,Cluster-B,Cluster-C" 49 | # Certificate CN - has to match the CN in ssl_cert, including asterisks when applicable 50 | "ssl_cert_cn"::"my.domain.com" 51 | 52 | ) 53 | 54 | for i in "${secrets[@]}" 55 | do 56 | KEY="${i%%::*}" 57 | VALUE="${i##*::}" 58 | echo "Creating secret for [$KEY]" 59 | if [[ $targeted_system == "VAULT" ]]; then 60 | vault write "${concourse_secrets_path}/${KEY}" value="${VALUE}" 61 | else # CREDHUB 62 | credhub set -n "${concourse_secrets_path}/${KEY}" -v "${VALUE}" 63 | fi 64 | done 65 | 66 | cat << EOF > ssl_cert.crt 67 | -----BEGIN ----- 68 | ... 69 | -----END ----- 70 | EOF 71 | 72 | cat << EOF > ssl_private_key.pem 73 | -----BEGIN ----- 74 | ... 75 | -----END ----- 76 | EOF 77 | 78 | certs=( 79 | # Optional PEM-encoded certificates to add to BOSH director 80 | "ssl_cert"::"ssl_cert.crt" 81 | "ssl_private_key"::"ssl_private_key.pem" 82 | ) 83 | 84 | for i in "${certs[@]}" 85 | do 86 | KEY="${i%%::*}" 87 | CERT_FILE="${i##*::}" 88 | echo "Creating certificate secret for [$KEY]" 89 | if [[ $targeted_system == "VAULT" ]]; then 90 | cat "$CERT_FILE" | vault write "${concourse_secrets_path}/${KEY}" value=- 91 | # else # CREDHUB - TBE 92 | # credhub set -n "${concourse_secrets_path}/${KEY}" -v "${VALUE}" 93 | fi 94 | done 95 | -------------------------------------------------------------------------------- /pipelines/pcf/vsphere/nsxv/params.yml: -------------------------------------------------------------------------------- 1 | vcenter_usr: ((vcenter_usr)) 2 | vcenter_pwd: ((vcenter_pwd)) 3 | vcenter_host: ((vcenter_host)) 4 | vcenter_data_center: ((vcenter_datacenter)) 5 | owner_name: ((nsx_owner_name)) 6 | nsx_edge_gen_nsx_manager_admin_user: ((nsxv_manager_admin_username)) 7 | nsx_edge_gen_nsx_manager_admin_passwd: ((nsxv_manager_admin_password)) 8 | nsx_edge_gen_nsx_manager_address: ((nsxv_manager_address)) 9 | nsx_edge_gen_egde_cluster: ((nsxv_gen_edge_cluster)) 10 | nsx_edge_gen_egde_datastore: ((vcenter_datastore)) 11 | nsx_edge_gen_name: ((nsxv_gen_edge_name)) 12 | nsx_edge_gen_nsx_manager_transport_zone: ((nsxv_gen_mgr_transport_zone)) 13 | num_logical_switches: ((num_logical_switches)) 14 | esg_default_uplink_ip_1: ((esg_default_uplink_ip_1)) 15 | esg_default_uplink_secondary_ips: ((esg_default_uplink_secondary_ips)) 16 | esg_default_uplink_pg_1: ((esg_default_uplink_pg_1)) 17 | esg_go_router_uplink_ip_1: ((esg_go_router_uplink_ip_1)) 18 | esg_snat_uplink_ip_1: ((esg_snat_uplink_ip_1)) 19 | esg_opsmgr_uplink_ip_1: ((esg_opsmgr_uplink_ip_1)) 20 | nsx_edge_gen_nsx_manager_transport_zone_clusters: ((nsx_edge_gen_nsx_manager_transport_zone_clusters)) 21 | ssl_cert_cn: ((ssl_cert_cn)) 22 | ssl_cert: ((ssl_cert)) 23 | ssl_private_key: ((ssl_private_key)) 24 | #The rest of the params are ignored for now 25 | router_instances: 26 | nsx_edge_gen_nsx_manager_distributed_portgroup: 27 | nsx_edge_gen_enable_dlr: 28 | nsx_edge_gen_bosh_nsx_enabled: 29 | mysql_proxy_instances: 30 | isozone_switch_name_1: 31 | isozone_switch_name_2: 32 | isozone_switch_cidr_1: 33 | iso_certs_ou_1: 34 | iso_certs_name_1: 35 | iso_certs_domains_1: 36 | iso_certs_country_1: 37 | esg_tcp_router_uplink_ip_1: 38 | esg_tcp_router_isozone_1_uplink_ip_1: 39 | esg_tcp_router_isozone_1_inst_1: 40 | esg_size: 41 | esg_ospf_password_1: 42 | esg_go_router_ssl_term_1: 43 | esg_go_router_isozone_1_uplink_ip_1: 44 | esg_go_router_isozone_1_ssl_term_1: 45 | esg_go_router_isozone_1_inst_1: 46 | esg_gateway_1: 47 | esg_diego_brain_uplink_ip_1: 48 | esg_cli_username_1: 49 | esg_cli_password_1: 50 | ert_certs_ou: 51 | ert_certs_name_1: 52 | ert_certs_country: 53 | diego_brain_instances: 54 | apps_domain: 55 | tile_rabbit_proxy_instances: 56 | tile_mysql_proxy_instances: 57 | tile_iso_ssl_private_key_1: 58 | tile_iso_ssl_cert_1: 59 | tcp_router_instances: 60 | system_domain: 61 | -------------------------------------------------------------------------------- /pipelines/vmware/install-harbor/global_params.yml: -------------------------------------------------------------------------------- 1 | ######## Generic global parameters 2 | ######## typically apply to all foundations and all tiles 3 | 4 | # Pivotal Network token for tile release download and pcf-pipelines download 5 | pivnet_token: ((pivnet_token)) 6 | -------------------------------------------------------------------------------- /pipelines/vmware/install-harbor/pcf_params.yml: -------------------------------------------------------------------------------- 1 | ######## PCF Foundation-specific parameters 2 | 3 | # The IaaS name for which stemcell to download. This must match the IaaS name 4 | # within the stemcell to download, e.g. "vsphere", "aws", "azure", "google" must be lowercase. 5 | iaas_type: ((iaas_type)) 6 | 7 | # Ops Manager information and admin credentials 8 | opsman_domain: ((opsman_domain)) 9 | opsman_admin_username: ((opsman_admin_username)) # Username for Ops Manager admin account 10 | opsman_admin_password: ((opsman_admin_password)) # Password for Ops Manager admin account 11 | # Either opsman_client_id/opsman_client_secret or opsman_admin_username/opsman_admin_password needs to be specified. 12 | # If you are using opsman_admin_username/opsman_admin_password, edit opsman_client_id/opsman_client_secret to be an empty value. 13 | # If you are using opsman_client_id/opsman_client_secret, edit opsman_admin_username/opsman_admin_password to be an empty value. 14 | opsman_client_id: ((opsman_client_id)) 15 | opsman_client_secret: ((opsman_client_secret)) 16 | 17 | az_1_name: ((az_1_name)) 18 | az_2_name: ((az_2_name)) 19 | az_3_name: ((az_3_name)) 20 | services_network_name: ((services_network_name)) 21 | dynamic_services_network_name: ((dynamic_services_network_name)) 22 | 23 | # pcf-pipelines legacy parameters - only used for when no_proxy option is used 24 | company_proxy_domain: 25 | opsman_ip_address: 26 | -------------------------------------------------------------------------------- /pipelines/vmware/install-harbor/vsphere/configure-harbor-api-lb/README.md: -------------------------------------------------------------------------------- 1 | # Configure NSX-V Load Balancer for HARBOR API endpoints 2 | 3 | This sample pipeline configures an NSX-V Load Balancer for the Harbor API endpoints after the Harbor tile has been successfully deployed on top of an Ops Manager environment. 4 | 5 | It requires a reserved IP address to be assigned as a VIP/Virtual Server (parameter `nsxv_gen_vip_ip`). 6 | 7 | ## How to use this pipeline 8 | 9 | 1) If you have CredHub or Vault integrated with Concourse, then update [`harbor_api_nsxv_lb_params.sh`](harbor_api_nsxv_lb_params.sh) with the required credentials and then run the script to create all required secrets in your credentials management software. 10 | Otherwise, update [params.yml](params.yml) with the all required parameters. 11 | 12 | 2) Create the pipeline in Concourse: 13 | 14 | `fly -t sp -p harbor-api-config-nsxv -c pipeline.yml` 15 | 16 | 3) Un-pause and run pipeline `harbor-api-config-nsxv` by manually triggering job `configure-lb-api-application-profile` 17 | -------------------------------------------------------------------------------- /pipelines/vmware/install-harbor/vsphere/configure-harbor-api-lb/harbor_api_nsxv_lb_params.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # set the path for the secrets below to be created in vault or credhub 4 | export concourse_root_secrets_path="/concourse" 5 | export concourse_team_name="team-name" 6 | export concourse_pipeline_name="harbor-api-config-nsxv" 7 | 8 | # VAULT or CREDHUB - targeted secrets management system 9 | export targeted_system="VAULT" 10 | # This script assumes that: 11 | # 1) the credhub or vault CLI is installed 12 | # 2) you setup your vault or credhub target and login commands prior to invoking it 13 | # e.g. for VAULT 14 | # export VAULT_ADDR=https://myvaultdomain:8200 15 | # export VAULT_SKIP_VERIFY=true 16 | # export VAULT_TOKEN=vault-token 17 | # 18 | # e.g. for CREDHUB 19 | # credhub login -s credhub-server-uri -u username -p password --skip-tls-validation 20 | 21 | ## 22 | ## TEAM level secrets (shared by all pipelines in that team) 23 | ## 24 | export team_secrets=( 25 | 26 | # vcenter hostname, do not include protocol information 27 | "vcenter_host"::"vcenter.domain.com" 28 | # vcenter credentials and properties 29 | "vcenter_usr"::"myvcenteruser@vsphere.local" 30 | "vcenter_pwd"::"myvcenterpassword" 31 | "vcenter_datacenter"::"Datacenter" 32 | "vcenter_datastore"::"mydatastore" 33 | 34 | "nsxv_manager_address"::"mynsxv.domain.com" 35 | "nsxv_manager_admin_username"::"admin" 36 | "nsxv_manager_admin_password"::"password" 37 | "nsxv_gen_edge_name"::"nsxv_gen_edge_name" 38 | "nsxv_gen_edge_cluster"::"Cluster-A" 39 | "nsxv_gen_mgr_transport_zone"::"nsxv_gen_mgr_transport_zone" 40 | "nsxv_gen_vip_ip"::"nsxv_gen_vip_ip" 41 | 42 | ) 43 | 44 | ## 45 | ## PIPELINE LEVEL secrets (specific to the pipeline) 46 | ## 47 | export pipeline_secrets=( 48 | 49 | "api_domain"::"harbor.domain.com" 50 | "api_cert_cn"::"*.domain.com" # match the existing certificate domain in vcenter 51 | "api_ips"::"192.168.24.100" 52 | 53 | ) 54 | 55 | main () { 56 | 57 | # team level secrets 58 | concourse_team_level_secrets_path="${concourse_root_secrets_path}/${concourse_team_name}" 59 | writeCredentials "${concourse_team_level_secrets_path}" "${team_secrets[*]}" 60 | 61 | # pipeline level secrets 62 | concourse_pipeline_level_secrets_path="${concourse_team_level_secrets_path}/${concourse_pipeline_name}" 63 | writeCredentials "${concourse_pipeline_level_secrets_path}" "${pipeline_secrets[*]}" 64 | 65 | } 66 | 67 | writeCredentials () { 68 | secretsPath=${1} 69 | secretsObject=(${2}) 70 | 71 | for i in "${secretsObject[@]}" 72 | do 73 | KEY="${i%%::*}" 74 | VALUE="${i##*::}" 75 | echo "Creating secret for [$KEY]" 76 | if [[ $targeted_system == "VAULT" ]]; then 77 | vault write "${secretsPath}/${KEY}" value="${VALUE}" 78 | else # CREDHUB 79 | credhub set -n "${secretsPath}/${KEY}" -v "${VALUE}" 80 | fi 81 | done 82 | } 83 | 84 | main 85 | -------------------------------------------------------------------------------- /pipelines/vmware/install-harbor/vsphere/configure-harbor-api-lb/params.yml: -------------------------------------------------------------------------------- 1 | api_domain: ((api_domain)) 2 | api_cert_cn: ((api_cert_cn)) 3 | api_ips: ((api_ips)) 4 | 5 | vcenter_host: ((vcenter_host)) 6 | vcenter_usr: ((vcenter_usr)) 7 | vcenter_pwd: ((vcenter_pwd)) 8 | vcenter_datacenter: ((vcenter_datacenter)) 9 | nsxv_manager_address: ((nsxv_manager_address)) 10 | nsxv_manager_admin_username: ((nsxv_manager_admin_username)) 11 | nsxv_manager_admin_password: ((nsxv_manager_admin_password)) 12 | nsxv_gen_mgr_transport_zone: ((nsxv_gen_mgr_transport_zone)) 13 | nsxv_gen_edge_cluster: ((nsxv_gen_edge_cluster)) 14 | vcenter_datastore: ((vcenter_datastore)) 15 | nsxv_gen_edge_name: ((nsxv_gen_edge_name)) 16 | nsxv_gen_vip_ip: ((nsxv_gen_vip_ip)) 17 | -------------------------------------------------------------------------------- /tasks/concourse/will-worker-connect/README.md: -------------------------------------------------------------------------------- 1 | Concourse  Connectivity 2 | 3 | ## Test Worker Connectivity 4 | 5 | This sample task can be used to test connectivity from Concourse workers to a routable network address. 6 | 7 | Under the covers, the task simply issues a `curl` command targeted at the provided `CONNECT_TO_URL` parameter and with extra command options from `EXTRA_COMMAND_PARAMETERS`. 8 | 9 | 10 | #### What to use this task for 11 | 12 | To quickly test the connectivity from Concourse workers (or even from specific set of tagged workers when using the `tag` option) to a defined domain name or ip address. 13 | 14 | 15 | #### How to use this task 16 | 17 | This task can be used as part of a pipeline, but the easiest way to use it is with the `fly execute` command. 18 | 19 | 1. Clone this repository and then cd into the parent folder of this README. 20 | e.g. `cd concourse-pipeline-samples/tasks/concourse/will-worker-connect` 21 | 22 | 2. Using `fly` CLI, login to Concourse 23 | 24 | e.g `fly -t mytarget login` 25 | 26 | 3. Set the task parameters as environment variables 27 | 28 | `export CONNECT_TO_URL="http://google.com"` 29 | `export EXTRA_COMMAND_PARAMETERS="-k --connect-timeout 10"` 30 | 31 | 4. Execute the `fly execute` command for the task 32 | 33 | e.g. to test connectivity from a default worker: 34 | `fly -t mytarget execute -c ./task.yml` 35 | 36 | e.g. to test connectivity from a tagged worker: 37 | `fly -t mytarget execute -c ./task.yml --tag my-worker-tag` 38 | 39 | The task will be executed and the connection test result from the `curl` command will be displayed in the command's output. 40 | -------------------------------------------------------------------------------- /tasks/concourse/will-worker-connect/task.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: concourse/buildroot 8 | tag: curl 9 | 10 | run: 11 | path: bash 12 | args: 13 | - -ec 14 | - | 15 | set -e 16 | echo "Trying to curl $CONNECT_TO_URL with parameters [$EXTRA_COMMAND_PARAMETERS]" 17 | set -x 18 | curl "$CONNECT_TO_URL" $EXTRA_COMMAND_PARAMETERS 19 | 20 | params: 21 | CONNECT_TO_URL: 22 | EXTRA_COMMAND_PARAMETERS: 23 | -------------------------------------------------------------------------------- /tasks/generate-yaml-file.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | image_resource: 4 | type: docker-image 5 | source: 6 | repository: pivotalservices/bosh2-docker 7 | 8 | params: 9 | FILENAME: 10 | YAML: 11 | 12 | outputs: 13 | - name: yaml_output 14 | 15 | run: 16 | path: bash 17 | args: 18 | - "-c" 19 | - | 20 | # Nasty sed command is needed to replace physical newlines, which bosh 21 | # will interploate as spaces, with \n 22 | bosh int <(echo "${YAML}" | sed -E ':a;N;$!ba;s/\r{0,1}\n/\\n/g') > yaml_output/${FILENAME} 23 | -------------------------------------------------------------------------------- /tasks/pcf/apply-changes-single-product/task.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eu 4 | 5 | echo "Applying changes to ${PRODUCT_NAME} on Ops Manager @ ${OPSMAN_DOMAIN_OR_IP_ADDRESS}" 6 | 7 | om-linux version 8 | 9 | # Current version of om-linux available on the image-resourece does not have the om version with selective deploy. 10 | # This is a hack to pull down the latest version 11 | curl -sSL -o om-linux $(curl -s https://api.github.com/repos/pivotal-cf/om/releases/latest | jq -r -c ".assets[] | .browser_download_url" | grep linux) && chmod +x om-linux 12 | ./om-linux version 13 | 14 | ./om-linux \ 15 | --target "https://${OPSMAN_DOMAIN_OR_IP_ADDRESS}" \ 16 | --skip-ssl-validation \ 17 | --client-id "${OPSMAN_CLIENT_ID}" \ 18 | --client-secret "${OPSMAN_CLIENT_SECRET}" \ 19 | --username "${OPSMAN_USERNAME}" \ 20 | --password "${OPSMAN_PASSWORD}" \ 21 | apply-changes \ 22 | --product-name "${PRODUCT_NAME}" \ 23 | --ignore-warnings 24 | -------------------------------------------------------------------------------- /tasks/pcf/apply-changes-single-product/task.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: pcfnorm/rootfs 8 | version: release-candidate 9 | 10 | inputs: 11 | - name: pcf-pipelines-utils 12 | 13 | params: 14 | OPSMAN_CLIENT_ID: 15 | OPSMAN_CLIENT_SECRET: 16 | OPSMAN_USERNAME: 17 | OPSMAN_PASSWORD: 18 | OPSMAN_DOMAIN_OR_IP_ADDRESS: 19 | PRODUCT_NAME: 20 | 21 | run: 22 | path: pcf-pipelines-utils/tasks/pcf/apply-changes-single-product/task.sh 23 | -------------------------------------------------------------------------------- /tasks/pcf/apply-changes/task.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eu 4 | 5 | echo "Applying changes on Ops Manager @ ${OPSMAN_DOMAIN_OR_IP_ADDRESS}" 6 | 7 | om-linux \ 8 | --target "https://${OPSMAN_DOMAIN_OR_IP_ADDRESS}" \ 9 | --skip-ssl-validation \ 10 | --client-id "${OPSMAN_CLIENT_ID}" \ 11 | --client-secret "${OPSMAN_CLIENT_SECRET}" \ 12 | --username "${OPSMAN_USERNAME}" \ 13 | --password "${OPSMAN_PASSWORD}" \ 14 | apply-changes \ 15 | --ignore-warnings 16 | -------------------------------------------------------------------------------- /tasks/pcf/apply-changes/task.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: {repository: czero/rootfs} 7 | 8 | inputs: 9 | - name: pcf-pipelines-utils 10 | 11 | params: 12 | OPSMAN_CLIENT_ID: 13 | OPSMAN_CLIENT_SECRET: 14 | OPSMAN_USERNAME: 15 | OPSMAN_PASSWORD: 16 | OPSMAN_DOMAIN_OR_IP_ADDRESS: 17 | 18 | run: 19 | path: pcf-pipelines-utils/tasks/pcf/apply-changes/task.sh 20 | -------------------------------------------------------------------------------- /tasks/pcf/certificates/check-expiring-certificates/task.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: czero/rootfs 8 | 9 | inputs: 10 | - name: pcf-pipelines-utils 11 | 12 | outputs: 13 | - name: expiring_certs 14 | 15 | run: 16 | path: pcf-pipelines-utils/tasks/pcf/certificates/check-expiring-certificates/task.sh 17 | 18 | params: 19 | OPSMAN_DOMAIN_OR_IP_ADDRESS: 20 | OPSMAN_USERNAME: 21 | OPSMAN_PASSWORD: 22 | OPSMAN_CLIENT_ID: 23 | OPSMAN_CLIENT_SECRET: 24 | EXPIRATION_TIME_FRAME: 25 | -------------------------------------------------------------------------------- /tasks/pcf/certificates/regenerate-internal-certificates/task.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eu 3 | 4 | # This task will invoke the regenerate non-certificates API endpoint from Ops Manager 5 | # PCF Documentation: https://docs.pivotal.io/pivotalcf/security/pcf-infrastructure/api-cert-rotation.html 6 | 7 | main() { 8 | 9 | echo "Rotating non-configurable certificates for ${OPSMAN_DOMAIN_OR_IP_ADDRESS}..." 10 | om-linux \ 11 | --target "https://${OPSMAN_DOMAIN_OR_IP_ADDRESS}" \ 12 | --skip-ssl-validation \ 13 | --client-id "${OPSMAN_CLIENT_ID}" \ 14 | --client-secret "${OPSMAN_CLIENT_SECRET}" \ 15 | --username "${OPSMAN_USERNAME}" \ 16 | --password "${OPSMAN_PASSWORD}" \ 17 | curl \ 18 | --path /api/v0/certificate_authorities/active/regenerate \ 19 | --request POST 20 | 21 | } 22 | 23 | main 24 | -------------------------------------------------------------------------------- /tasks/pcf/certificates/regenerate-internal-certificates/task.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: czero/rootfs 8 | 9 | inputs: 10 | - name: pcf-pipelines-utils 11 | 12 | run: 13 | path: pcf-pipelines-utils/tasks/pcf/certificates/regenerate-internal-certificates/task.sh 14 | 15 | params: 16 | OPSMAN_DOMAIN_OR_IP_ADDRESS: 17 | OPSMAN_USERNAME: 18 | OPSMAN_PASSWORD: 19 | OPSMAN_CLIENT_ID: 20 | OPSMAN_CLIENT_SECRET: 21 | -------------------------------------------------------------------------------- /tasks/pcf/configure-tile/task.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: czero/rootfs 8 | 9 | inputs: 10 | - name: pcf-pipelines-utils 11 | 12 | run: 13 | path: pcf-pipelines-utils/tasks/pcf/configure-tile/task.sh 14 | 15 | params: 16 | OPSMAN_DOMAIN_OR_IP_ADDRESS: 17 | OPSMAN_USERNAME: 18 | OPSMAN_PASSWORD: 19 | OPSMAN_CLIENT_ID: 20 | OPSMAN_CLIENT_SECRET: 21 | TILE_PROPERTIES: 22 | TILE_NETWORK: 23 | TILE_RESOURCES: 24 | TILE_PRODUCT_NAME: 25 | -------------------------------------------------------------------------------- /tasks/pcf/delete-tile/task.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eu 3 | 4 | main() { 5 | 6 | # find tile version installed 7 | echo "Retrieving current staged version of ${TILE_PRODUCT_NAME}" 8 | product_version=$(om-linux \ 9 | --target https://$OPSMAN_DOMAIN_OR_IP_ADDRESS \ 10 | --client-id "${OPSMAN_CLIENT_ID}" \ 11 | --client-secret "${OPSMAN_CLIENT_SECRET}" \ 12 | --username "$OPSMAN_USERNAME" \ 13 | --password "$OPSMAN_PASSWORD" \ 14 | --skip-ssl-validation -tr \ 15 | deployed-products | grep ${TILE_PRODUCT_NAME} | cut -d "|" -f 3 | tr -d " ") 16 | 17 | echo "Deleting product [${TILE_PRODUCT_NAME}], version [${product_version}] , from ${OPSMAN_DOMAIN_OR_IP_ADDRESS}" 18 | 19 | om-linux \ 20 | --target https://$OPSMAN_DOMAIN_OR_IP_ADDRESS \ 21 | --client-id "${OPSMAN_CLIENT_ID}" \ 22 | --client-secret "${OPSMAN_CLIENT_SECRET}" \ 23 | --username "$OPSMAN_USERNAME" \ 24 | --password "$OPSMAN_PASSWORD" \ 25 | --skip-ssl-validation -tr \ 26 | delete-product \ 27 | --product-name "$TILE_PRODUCT_NAME" \ 28 | --product-version "$product_version" 29 | 30 | } 31 | 32 | main 33 | -------------------------------------------------------------------------------- /tasks/pcf/delete-tile/task.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: czero/rootfs 8 | 9 | inputs: 10 | - name: pcf-pipelines-utils 11 | 12 | run: 13 | path: pcf-pipelines-utils/tasks/pcf/delete-tile/task.sh 14 | 15 | params: 16 | OPSMAN_DOMAIN_OR_IP_ADDRESS: 17 | OPSMAN_USERNAME: 18 | OPSMAN_PASSWORD: 19 | OPSMAN_CLIENT_ID: 20 | OPSMAN_CLIENT_SECRET: 21 | TILE_PRODUCT_NAME: 22 | -------------------------------------------------------------------------------- /tasks/pcf/disable-errands/task.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eu 4 | 5 | if [[ -z "$ERRANDS_TO_DISABLE" ]] || [[ "$ERRANDS_TO_DISABLE" == "none" ]]; then 6 | echo Nothing to do. 7 | exit 0 8 | fi 9 | 10 | enabled_errands=$( 11 | om-linux \ 12 | --target "https://${OPSMAN_DOMAIN_OR_IP_ADDRESS}" \ 13 | --skip-ssl-validation \ 14 | --client-id "${OPSMAN_CLIENT_ID}" \ 15 | --client-secret "${OPSMAN_CLIENT_SECRET}" \ 16 | --username "$OPSMAN_USERNAME" \ 17 | --password "$OPSMAN_PASSWORD" \ 18 | errands \ 19 | --product-name "$PRODUCT_NAME" | 20 | tail -n+4 | head -n-1 | grep -v false | cut -d'|' -f2 | tr -d ' ' 21 | ) 22 | 23 | if [[ "$ERRANDS_TO_DISABLE" == "all" ]]; then 24 | errands_to_disable="${enabled_errands[@]}" 25 | else 26 | errands_to_disable=$(echo "$ERRANDS_TO_DISABLE" | tr ',' '\n') 27 | fi 28 | 29 | will_disable=$( 30 | echo $enabled_errands | 31 | jq \ 32 | --arg to_disable "${errands_to_disable[@]}" \ 33 | --raw-input \ 34 | --raw-output \ 35 | 'split(" ") 36 | | reduce .[] as $errand ([]; 37 | if $to_disable | contains($errand) then 38 | . + [$errand] 39 | else 40 | . 41 | end) 42 | | join("\n")' 43 | ) 44 | 45 | if [ -z "$will_disable" ]; then 46 | echo Nothing to do. 47 | exit 0 48 | fi 49 | 50 | while read errand; do 51 | echo -n Disabling $errand... 52 | om-linux \ 53 | --target "https://${OPSMAN_DOMAIN_OR_IP_ADDRESS}" \ 54 | --skip-ssl-validation \ 55 | --client-id "${OPSMAN_CLIENT_ID}" \ 56 | --client-secret "${OPSMAN_CLIENT_SECRET}" \ 57 | --username "$OPSMAN_USERNAME" \ 58 | --password "$OPSMAN_PASSWORD" \ 59 | set-errand-state \ 60 | --product-name "$PRODUCT_NAME" \ 61 | --errand-name $errand \ 62 | --post-deploy-state "disabled" 63 | echo done 64 | done < <(echo "$will_disable") 65 | -------------------------------------------------------------------------------- /tasks/pcf/disable-errands/task.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: {repository: pcfnorm/rootfs} 7 | 8 | inputs: 9 | - name: pcf-pipelines 10 | 11 | params: 12 | PRODUCT_NAME: 13 | OPSMAN_DOMAIN_OR_IP_ADDRESS: 14 | OPSMAN_CLIENT_ID: 15 | OPSMAN_CLIENT_SECRET: 16 | OPSMAN_USERNAME: 17 | OPSMAN_PASSWORD: 18 | ERRANDS_TO_DISABLE: 19 | 20 | run: 21 | path: pcf-pipelines/tasks/pcf/disable-errands/task.sh 22 | -------------------------------------------------------------------------------- /tasks/pcf/pks/configure-pks-cli-user/task.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eu 3 | 4 | echo "Note - pre-requisite for this task to work:" 5 | echo "- Your PKS API endpoint [$PKS_API_DOMAIN] should be routable and accessible from the Concourse worker(s) network." 6 | echo "- See PKS tile documentation for configuration details for vSphere [https://docs.pivotal.io/runtimes/pks/1-0/installing-pks-vsphere.html#loadbalancer-pks-api] and GCP [https://docs.pivotal.io/runtimes/pks/1-0/installing-pks-gcp.html#loadbalancer-pks-api]" 7 | 8 | echo "Retrieving PKS tile properties from Ops Manager [https://$OPSMAN_DOMAIN_OR_IP_ADDRESS]..." 9 | # get PKS UAA admin credentails from OpsMgr 10 | PRODUCTS=$(om-linux --target "https://$OPSMAN_DOMAIN_OR_IP_ADDRESS" --client-id "${OPSMAN_CLIENT_ID}" --client-secret "${OPSMAN_CLIENT_SECRET}" --username "$OPSMAN_USERNAME" --password "$OPSMAN_PASSWORD" --skip-ssl-validation curl -p /api/v0/staged/products) 11 | PKS_GUID=$(echo "$PRODUCTS" | jq -r '.[] | .guid' | grep pivotal-container-service) 12 | PKS_VERSION=$(echo "$PRODUCTS" | jq --arg PKS_GUID "$PKS_GUID" -r '.[] | select(.guid==$PKS_GUID) | .product_version') 13 | 14 | PKS_UAA_ADMIN_SECRET_FIELD=".properties.pks_uaa_management_admin_client" # for 1.1+ 15 | if [[ ${PKS_VERSION:0:3} == "1.0" ]]; then 16 | PKS_UAA_ADMIN_SECRET_FIELD=".properties.uaa_admin_secret" # for 1.0.x 17 | fi 18 | 19 | UAA_ADMIN_SECRET=$(om-linux --target "https://$OPSMAN_DOMAIN_OR_IP_ADDRESS" --client-id "${OPSMAN_CLIENT_ID}" --client-secret "${OPSMAN_CLIENT_SECRET}" --username "$OPSMAN_USERNAME" --password "$OPSMAN_PASSWORD" --skip-ssl-validation curl -p /api/v0/deployed/products/$PKS_GUID/credentials/$PKS_UAA_ADMIN_SECRET_FIELD | jq -rc '.credential.value.secret') 20 | 21 | echo "Connecting to PKS UAA server [<$PKS_API_DOMAIN>]..." 22 | # login to PKS UAA 23 | uaac target https://$PKS_API_DOMAIN:8443 --skip-ssl-validation 24 | uaac token client get admin --secret $UAA_ADMIN_SECRET 25 | 26 | echo "Creating PKS CLI administrator user per PK tile documentation https://docs.pivotal.io/runtimes/pks/1-0/manage-users.html#uaa-scopes" 27 | # create pks admin user 28 | uaac user add "$PKS_CLI_USERNAME" --emails "$PKS_CLI_USEREMAIL" -p "$PKS_CLI_PASSWORD" 29 | uaac member add pks.clusters.admin "$PKS_CLI_USERNAME" 30 | uaac member add pks.clusters.manage "$PKS_CLI_USERNAME" 31 | 32 | echo "PKS CLI administrator user [$PKS_CLI_USERNAME] successfully created." 33 | 34 | echo "Next, download the PKS CLI from Pivotal Network and login to the PKS API to create a new K8s cluster [https://docs.pivotal.io/runtimes/pks/1-0/create-cluster.html]" 35 | echo "Example: " 36 | echo " pks login -a $PKS_API_DOMAIN -u $PKS_CLI_USERNAME -p " 37 | -------------------------------------------------------------------------------- /tasks/pcf/pks/configure-pks-cli-user/task.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: czero/rootfs 8 | 9 | inputs: 10 | - name: pcf-pipelines-utils 11 | 12 | run: 13 | path: pcf-pipelines-utils/tasks/pcf/pks/configure-pks-cli-user/task.sh 14 | 15 | params: 16 | OPSMAN_DOMAIN_OR_IP_ADDRESS: 17 | OPSMAN_USERNAME: 18 | OPSMAN_PASSWORD: 19 | OPSMAN_CLIENT_ID: 20 | OPSMAN_CLIENT_SECRET: 21 | PKS_API_DOMAIN: 22 | PKS_CLI_USERNAME: 23 | PKS_CLI_USEREMAIL: 24 | PKS_CLI_PASSWORD: 25 | -------------------------------------------------------------------------------- /tasks/pcf/pks/create-pks-cluster/task.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -eu 2 | 3 | echo "Login to PKS API [$PCF_PKS_API]" 4 | pks login -a "$PCF_PKS_API" -u "$PKS_CLI_USERNAME" -p "$PKS_CLI_PASSWORD" --skip-ssl-validation # TBD --ca-cert CERT-PATH 5 | 6 | echo "Creating PKS cluster [$PKS_CLUSTER_NAME], master node hostname [$PKS_CLUSTER_MASTER_HOSTNAME], plan [$PKS_SERVICE_PLAN_NAME], number of workers [$PKS_CLUSTER_NUMBER_OF_WORKERS]" 7 | pks create-cluster "$PKS_CLUSTER_NAME" --external-hostname "$PKS_CLUSTER_MASTER_HOSTNAME" --plan "$PKS_SERVICE_PLAN_NAME" --num-nodes "$PKS_CLUSTER_NUMBER_OF_WORKERS" 8 | 9 | echo "Monitoring the creation status for PKS cluster [$PKS_CLUSTER_NAME]:" 10 | in_progress_state="in progress" 11 | succeeded_state="succeeded" 12 | cluster_state="$in_progress_state" 13 | 14 | while [[ "$cluster_state" == "$in_progress_state" ]]; do 15 | cluster_state=$(pks cluster "$PKS_CLUSTER_NAME" --json | jq -rc '.last_action_state') 16 | echo "${cluster_state}..." 17 | sleep 10 18 | done 19 | 20 | last_action_description=$(pks cluster "$PKS_CLUSTER_NAME" --json | jq -rc '.last_action_description') 21 | 22 | if [[ "$cluster_state" == "$succeeded_state" ]]; then 23 | echo "Successfully created cluster [$PKS_CLUSTER_NAME], last_action_state=[$cluster_state], last_action_description=[$last_action_description]" 24 | pks cluster "$PKS_CLUSTER_NAME" 25 | echo "Next step: make sure that the external hostname configured for the cluster [$PKS_CLUSTER_MASTER_HOSTNAME] is accessible from a DNS/network standpoint, so it can be managed with 'kubectl'" 26 | else 27 | echo "Error creating cluster [$PKS_CLUSTER_NAME], last_action_state=[$cluster_state], last_action_description=[$last_action_description]" 28 | exit 1 29 | fi 30 | -------------------------------------------------------------------------------- /tasks/pcf/pks/create-pks-cluster/task.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: pivotalservices/pks-kubectl 8 | 9 | inputs: 10 | - name: pcf-pipelines-utils 11 | 12 | run: 13 | path: pcf-pipelines-utils/tasks/pcf/pks/create-pks-cluster/task.sh 14 | 15 | params: 16 | PCF_PKS_API: 17 | PKS_CLI_USERNAME: 18 | PKS_CLI_PASSWORD: 19 | PKS_SERVICE_PLAN_NAME: 20 | PKS_CLUSTER_NAME: 21 | PKS_CLUSTER_MASTER_HOSTNAME: 22 | PKS_CLUSTER_NUMBER_OF_WORKERS: 23 | -------------------------------------------------------------------------------- /tasks/pcf/pks/delete-all-pks-clusters/task.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -eu 2 | 3 | 4 | if [[ "$DEBUG" == "true" ]]; then 5 | set -x 6 | fi 7 | 8 | echo "Login to PKS API [$PCF_PKS_API]" 9 | pks login -a "$PCF_PKS_API" -u "$PKS_CLI_USERNAME" -p "$PKS_CLI_PASSWORD" --skip-ssl-verification # TBD --ca-cert CERT-PATH 10 | 11 | echo "List all PKS clusters" 12 | pks clusters --json | jq -rc '.[] | .name' > list_of_clusters.txt 13 | cat list_of_clusters.txt 14 | 15 | while read clustername; do 16 | echo "Deleting PKS cluster [$clustername]..." 17 | 18 | pks delete-cluster "$clustername" 19 | 20 | echo "Monitoring the deletion status for PKS cluster [$clustername]" 21 | in_progress_state="in progress" 22 | cluster_state="$in_progress_state" 23 | 24 | while [[ "$cluster_state" == "$in_progress_state" ]]; do 25 | echo "status: [$cluster_state]..." 26 | sleep 5 27 | cluster_state=$(pks clusters --json | jq --arg clustername "$clustername" -rc '.[] | select(.name==$clustername) | .last_action_state') 28 | done 29 | echo "status on exit: [$cluster_state]..." 30 | 31 | # check if cluster to be deleted still exist after delete try 32 | if [[ $(pks clusters --json | jq -rc '.[].name' | grep $clustername) ]]; then 33 | last_action_description=$(pks clusters --json | jq --arg clustername "$clustername" -rc '.[] | select(.name==$clustername) | .last_action_description') 34 | echo "Error deleting cluster [$clustername], last_action_state=[$cluster_state], last_action_description=[$last_action_description]" 35 | exit 1 36 | else 37 | echo "Successfully deleted cluster [$clustername]" 38 | echo "Current list of PKS clusters:" 39 | pks clusters --json 40 | fi 41 | 42 | done /dev/null | jq -rc '.last_action_state') 16 | echo "status: [$cluster_state]..." 17 | sleep 5 18 | done 19 | 20 | cluster_exists=$(pks clusters --json | jq -rc '.[].name') 21 | 22 | if [[ "$cluster_exists" == "" ]]; then 23 | echo "Successfully deleted cluster [$PKS_CLUSTER_NAME]" 24 | echo "Current list of PKS clusters:" 25 | pks clusters --json 26 | else 27 | last_action_description=$(pks cluster "$PKS_CLUSTER_NAME" --json | jq -rc '.last_action_description') 28 | echo "Error deleting cluster [$PKS_CLUSTER_NAME], last_action_state=[$cluster_state], last_action_description=[$last_action_description]" 29 | exit 1 30 | fi 31 | -------------------------------------------------------------------------------- /tasks/pcf/pks/delete-pks-cluster/task.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: pivotalservices/pks-kubectl 8 | 9 | inputs: 10 | - name: pcf-pipelines-utils 11 | 12 | run: 13 | path: pcf-pipelines-utils/tasks/pcf/pks/delete-pks-cluster/task.sh 14 | 15 | params: 16 | PCF_PKS_API: 17 | PKS_CLI_USERNAME: 18 | PKS_CLI_PASSWORD: 19 | PKS_CLUSTER_NAME: 20 | -------------------------------------------------------------------------------- /tasks/pcf/stage-product/task.yml: -------------------------------------------------------------------------------- 1 | # Copyright 2017-Present Pivotal Software, Inc. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | --- 16 | platform: linux 17 | 18 | image_resource: 19 | type: docker-image 20 | source: {repository: pcfnorm/rootfs} 21 | 22 | inputs: 23 | - name: pcf-pipelines 24 | - name: pivnet-product 25 | 26 | params: 27 | OPSMAN_CLIENT_ID: 28 | OPSMAN_CLIENT_SECRET: 29 | OPSMAN_USERNAME: 30 | OPSMAN_PASSWORD: 31 | OPSMAN_DOMAIN_OR_IP_ADDRESS: 32 | 33 | run: 34 | path: pcf-pipelines/tasks/pcf/stage-product/task.sh 35 | -------------------------------------------------------------------------------- /tasks/pcf/upload-product-and-stemcell/task.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eu 4 | 5 | if [[ -n "$NO_PROXY" ]]; then 6 | echo "$OM_IP $OPSMAN_DOMAIN_OR_IP_ADDRESS" >> /etc/hosts 7 | fi 8 | 9 | STEMCELL_VERSION=$( 10 | cat ./pivnet-product/metadata.json | 11 | jq --raw-output \ 12 | ' 13 | [ 14 | .Dependencies[] 15 | | select(.Release.Product.Name | contains("Stemcells")) 16 | | .Release.Version 17 | ] 18 | | map(split(".") | map(tonumber)) 19 | | transpose | transpose 20 | | max // empty 21 | | map(tostring) 22 | | join(".") 23 | ' 24 | ) 25 | 26 | if [ -n "$STEMCELL_VERSION" ]; then 27 | diagnostic_report=$( 28 | om-linux \ 29 | --target https://$OPSMAN_DOMAIN_OR_IP_ADDRESS \ 30 | --client-id "${OPSMAN_CLIENT_ID}" \ 31 | --client-secret "${OPSMAN_CLIENT_SECRET}" \ 32 | --username "$OPS_MGR_USR" \ 33 | --password "$OPS_MGR_PWD" \ 34 | --skip-ssl-validation \ 35 | curl --silent --path "/api/v0/diagnostic_report" 36 | ) 37 | 38 | stemcell=$( 39 | echo $diagnostic_report | 40 | jq \ 41 | --arg version "$STEMCELL_VERSION" \ 42 | --arg glob "$IAAS" \ 43 | '.stemcells[] | select(contains($version) and contains($glob))' 44 | ) 45 | 46 | if [[ -z "$stemcell" ]]; then 47 | echo "Downloading stemcell $STEMCELL_VERSION" 48 | 49 | product_slug=$( 50 | jq --raw-output \ 51 | ' 52 | if any(.Dependencies[]; select(.Release.Product.Name | contains("Stemcells for PCF (Windows)"))) then 53 | "stemcells-windows-server" 54 | elif any(.Dependencies[]; select(.Release.Product.Name | contains("Stemcells for PCF (Ubuntu Xenial)"))) then 55 | "stemcells-ubuntu-xenial" 56 | else 57 | "stemcells" 58 | end 59 | ' < pivnet-product/metadata.json 60 | ) 61 | 62 | pivnet-cli login --api-token="$PIVNET_API_TOKEN" 63 | pivnet-cli download-product-files -p "$product_slug" -r $STEMCELL_VERSION -g "*${IAAS}*" --accept-eula 64 | 65 | SC_FILE_PATH=`find ./ -name *.tgz` 66 | 67 | if [ ! -f "$SC_FILE_PATH" ]; then 68 | echo "Stemcell file not found!" 69 | exit 1 70 | fi 71 | 72 | om-linux -t https://$OPSMAN_DOMAIN_OR_IP_ADDRESS \ 73 | --client-id "${OPSMAN_CLIENT_ID}" \ 74 | --client-secret "${OPSMAN_CLIENT_SECRET}" \ 75 | -u "$OPS_MGR_USR" \ 76 | -p "$OPS_MGR_PWD" \ 77 | -k \ 78 | upload-stemcell \ 79 | -s $SC_FILE_PATH 80 | fi 81 | fi 82 | 83 | # Should the slug contain more than one product, pick only the first. 84 | FILE_PATH=`find ./pivnet-product -name *.pivotal | sort | head -1` 85 | om-linux -t https://$OPSMAN_DOMAIN_OR_IP_ADDRESS \ 86 | --client-id "${OPSMAN_CLIENT_ID}" \ 87 | --client-secret "${OPSMAN_CLIENT_SECRET}" \ 88 | -u "$OPS_MGR_USR" \ 89 | -p "$OPS_MGR_PWD" \ 90 | -k \ 91 | --request-timeout 3600 \ 92 | upload-product \ 93 | -p $FILE_PATH 94 | -------------------------------------------------------------------------------- /tasks/pcf/upload-product-and-stemcell/task.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | platform: linux 4 | 5 | image_resource: 6 | type: docker-image 7 | source: {repository: pcfnorm/rootfs} 8 | 9 | params: 10 | OPSMAN_DOMAIN_OR_IP_ADDRESS: 11 | OPSMAN_CLIENT_ID: 12 | OPSMAN_CLIENT_SECRET: 13 | OPS_MGR_USR: 14 | OPS_MGR_PWD: 15 | PIVNET_API_TOKEN: 16 | NO_PROXY: 17 | OM_IP: 18 | IAAS: 19 | 20 | inputs: 21 | - name: pivnet-product 22 | - name: pcf-pipelines 23 | 24 | run: 25 | path: pcf-pipelines/tasks/pcf/upload-product-and-stemcell/task.sh 26 | -------------------------------------------------------------------------------- /tasks/vsphere/nsxt/create-ip-block/task.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -eu 2 | 3 | cat << EOF > /opt/pynsxt/nsx.ini 4 | [nsxv] 5 | nsx_manager = https://$NSX_MANAGER_ADDRESS/api/v1 6 | nsx_username = $NSX_MANAGER_USERNAME 7 | nsx_password = $NSX_MANAGER_PASSWORD 8 | 9 | EOF 10 | 11 | pushd /opt/pynsxt 12 | 13 | pynsxt_local() { 14 | python /opt/pynsxt/cli.py "$@" 15 | } 16 | 17 | pynsxt_local pool create_ip_block \ 18 | -n $IP_BLOCK_NAME \ 19 | -c "$IP_BLOCK_CIDR" \ 20 | -tag "ncp/cluster=$PCF_FOUNDATION_NAME" 21 | -------------------------------------------------------------------------------- /tasks/vsphere/nsxt/create-ip-block/task.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: pivotalservices/pynsxt 8 | 9 | inputs: 10 | - name: pcf-pipelines-utils 11 | 12 | run: 13 | path: pcf-pipelines-utils/tasks/vsphere/nsxt/create-ip-block/task.sh 14 | 15 | params: 16 | NSX_MANAGER_ADDRESS: 17 | NSX_MANAGER_USERNAME: 18 | NSX_MANAGER_PASSWORD: 19 | IP_BLOCK_NAME: 20 | IP_BLOCK_CIDR: 21 | PCF_FOUNDATION_NAME: 22 | -------------------------------------------------------------------------------- /tasks/vsphere/nsxt/create-ip-pool/task.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -eu 2 | 3 | cat << EOF > /opt/pynsxt/nsx.ini 4 | [nsxv] 5 | nsx_manager = https://$NSX_MANAGER_ADDRESS/api/v1 6 | nsx_username = $NSX_MANAGER_USERNAME 7 | nsx_password = $NSX_MANAGER_PASSWORD 8 | 9 | EOF 10 | 11 | pushd /opt/pynsxt 12 | 13 | pynsxt_local() { 14 | python /opt/pynsxt/cli.py "$@" 15 | } 16 | 17 | pynsxt_local pool create_ip_pool \ 18 | -n $IP_POOL_NAME \ 19 | -s $IP_POOL_START_IP \ 20 | -e $IP_POOL_END_IP \ 21 | -c $IP_POOL_CIDR \ 22 | -tag "ncp/cluster=$PCF_FOUNDATION_NAME" \ 23 | -external $EXTERNAL 24 | -------------------------------------------------------------------------------- /tasks/vsphere/nsxt/create-ip-pool/task.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: pivotalservices/pynsxt 8 | 9 | inputs: 10 | - name: pcf-pipelines-utils 11 | 12 | run: 13 | path: pcf-pipelines-utils/tasks/vsphere/nsxt/create-ip-pool/task.sh 14 | 15 | params: 16 | NSX_MANAGER_ADDRESS: 17 | NSX_MANAGER_USERNAME: 18 | NSX_MANAGER_PASSWORD: 19 | IP_POOL_NAME: 20 | IP_POOL_CIDR: 21 | IP_POOL_START_IP: 22 | IP_POOL_END_IP: 23 | PCF_FOUNDATION_NAME: 24 | EXTERNAL: 25 | -------------------------------------------------------------------------------- /tasks/vsphere/nsxt/create-logical-router-port/task.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -eu 2 | 3 | cat << EOF > /opt/pynsxt/nsx.ini 4 | [nsxv] 5 | nsx_manager = https://$NSX_MANAGER_ADDRESS/api/v1 6 | nsx_username = $NSX_MANAGER_USERNAME 7 | nsx_password = $NSX_MANAGER_PASSWORD 8 | 9 | EOF 10 | 11 | pushd /opt/pynsxt 12 | 13 | pynsxt_local() { 14 | python /opt/pynsxt/cli.py "$@" 15 | } 16 | 17 | pynsxt_local routing create_router_port \ 18 | -n $LOGICAL_ROUTER_PORT_NAME \ 19 | -rpt $LOGICAL_ROUTER_PORT_TYPE \ 20 | -ls $LOGICAL_SWITCH_NAME \ 21 | -lr $LOGICAL_ROUTER_NAME \ 22 | -ip $LOGICAL_ROUTER_PORT_IP \ 23 | -mask $LOGICAL_ROUTER_PORT_IP_MASK 24 | -------------------------------------------------------------------------------- /tasks/vsphere/nsxt/create-logical-router-port/task.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: pivotalservices/pynsxt 8 | 9 | inputs: 10 | - name: pcf-pipelines-utils 11 | 12 | run: 13 | path: pcf-pipelines-utils/tasks/vsphere/nsxt/create-logical-router-port/task.sh 14 | 15 | params: 16 | NSX_MANAGER_ADDRESS: 17 | NSX_MANAGER_USERNAME: 18 | NSX_MANAGER_PASSWORD: 19 | LOGICAL_ROUTER_PORT_NAME: 20 | LOGICAL_ROUTER_PORT_TYPE: 21 | LOGICAL_SWITCH_NAME: 22 | LOGICAL_ROUTER_NAME: 23 | LOGICAL_ROUTER_PORT_IP: 24 | LOGICAL_ROUTER_PORT_IP_MASK: 25 | -------------------------------------------------------------------------------- /tasks/vsphere/nsxt/create-logical-router/task.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -eu 2 | 3 | cat << EOF > /opt/pynsxt/nsx.ini 4 | [nsxv] 5 | nsx_manager = https://$NSX_MANAGER_ADDRESS/api/v1 6 | nsx_username = $NSX_MANAGER_USERNAME 7 | nsx_password = $NSX_MANAGER_PASSWORD 8 | 9 | EOF 10 | 11 | pushd /opt/pynsxt 12 | 13 | pynsxt_local() { 14 | python /opt/pynsxt/cli.py "$@" 15 | } 16 | 17 | pynsxt_local routing create_router \ 18 | -n $LOGICAL_ROUTER_NAME \ 19 | -t $LOGICAL_ROUTER_TYPE \ 20 | -ec $EDGE_CLUSTER_NAME \ 21 | -t0 $T0_ROUTER_NAME \ 22 | -tag "ncp/cluster=$PCF_FOUNDATION_NAME" 23 | -------------------------------------------------------------------------------- /tasks/vsphere/nsxt/create-logical-router/task.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: pivotalservices/pynsxt 8 | 9 | inputs: 10 | - name: pcf-pipelines-utils 11 | 12 | run: 13 | path: pcf-pipelines-utils/tasks/vsphere/nsxt/create-logical-router/task.sh 14 | 15 | params: 16 | NSX_MANAGER_ADDRESS: 17 | NSX_MANAGER_USERNAME: 18 | NSX_MANAGER_PASSWORD: 19 | LOGICAL_ROUTER_NAME: 20 | LOGICAL_ROUTER_TYPE: 21 | EDGE_CLUSTER_NAME: 22 | T0_ROUTER_NAME: 23 | PCF_FOUNDATION_NAME: 24 | -------------------------------------------------------------------------------- /tasks/vsphere/nsxt/create-logical-switch/task.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -eu 2 | 3 | cat << EOF > /opt/pynsxt/nsx.ini 4 | [nsxv] 5 | nsx_manager = https://$NSX_MANAGER_ADDRESS/api/v1 6 | nsx_username = $NSX_MANAGER_USERNAME 7 | nsx_password = $NSX_MANAGER_PASSWORD 8 | 9 | EOF 10 | 11 | pushd /opt/pynsxt 12 | 13 | pynsxt_local() { 14 | python /opt/pynsxt/cli.py "$@" 15 | } 16 | 17 | if [ -n "$LOGICAL_SWITCH_VLAN" ] 18 | then 19 | pynsxt_local switch create \ 20 | -n $LOGICAL_SWITCH_NAME \ 21 | -t $LOGICAL_SWITCH_TRANSPORT_ZONE \ 22 | -vlan $LOGICAL_SWITCH_VLAN 23 | else 24 | pynsxt_local switch create \ 25 | -n $LOGICAL_SWITCH_NAME \ 26 | -t $LOGICAL_SWITCH_TRANSPORT_ZONE 27 | fi 28 | -------------------------------------------------------------------------------- /tasks/vsphere/nsxt/create-logical-switch/task.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: pivotalservices/pynsxt 8 | 9 | inputs: 10 | - name: pcf-pipelines-utils 11 | 12 | run: 13 | path: pcf-pipelines-utils/tasks/vsphere/nsxt/create-logical-switch/task.sh 14 | 15 | params: 16 | NSX_MANAGER_ADDRESS: 17 | NSX_MANAGER_USERNAME: 18 | NSX_MANAGER_PASSWORD: 19 | LOGICAL_SWITCH_NAME: 20 | LOGICAL_SWITCH_TRANSPORT_ZONE: 21 | LOGICAL_SWITCH_VLAN: 22 | -------------------------------------------------------------------------------- /tasks/vsphere/nsxt/create-nat-rule/task.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -eu 2 | 3 | cat << EOF > /opt/pynsxt/nsx.ini 4 | [nsxv] 5 | nsx_manager = https://$NSX_MANAGER_ADDRESS/api/v1 6 | nsx_username = $NSX_MANAGER_USERNAME 7 | nsx_password = $NSX_MANAGER_PASSWORD 8 | 9 | EOF 10 | 11 | pushd /opt/pynsxt 12 | 13 | pynsxt_local() { 14 | python /opt/pynsxt/cli.py "$@" 15 | } 16 | 17 | pynsxt_local routing create_nat_rule \ 18 | -lr $LOGICAL_ROUTER_NAME \ 19 | -a $NAT_TYPE \ 20 | -oip $ORIGINAL_IP \ 21 | -tip $TRANSLATED_IP \ 22 | -------------------------------------------------------------------------------- /tasks/vsphere/nsxt/create-nat-rule/task.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: pivotalservices/pynsxt 8 | 9 | inputs: 10 | - name: pcf-pipelines-utils 11 | 12 | run: 13 | path: pcf-pipelines-utils/tasks/vsphere/nsxt/create-nat-rule/task.sh 14 | 15 | params: 16 | NSX_MANAGER_ADDRESS: 17 | NSX_MANAGER_USERNAME: 18 | NSX_MANAGER_PASSWORD: 19 | LOGICAL_ROUTER_NAME: 20 | NAT_TYPE: 21 | ORIGINAL_IP: 22 | TRANSLATED_IP: 23 | -------------------------------------------------------------------------------- /tasks/vsphere/nsxt/create-static-route/task.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -eu 2 | 3 | cat << EOF > /opt/pynsxt/nsx.ini 4 | [nsxv] 5 | nsx_manager = https://$NSX_MANAGER_ADDRESS/api/v1 6 | nsx_username = $NSX_MANAGER_USERNAME 7 | nsx_password = $NSX_MANAGER_PASSWORD 8 | 9 | EOF 10 | 11 | pushd /opt/pynsxt 12 | 13 | pynsxt_local() { 14 | python /opt/pynsxt/cli.py "$@" 15 | } 16 | 17 | pynsxt_local routing create_static_route \ 18 | -lr $LOGICAL_ROUTER_NAME \ 19 | -network "$STATIC_ROUTE_NETWORK" \ 20 | -next_hop $NEXT_HOP 21 | -------------------------------------------------------------------------------- /tasks/vsphere/nsxt/create-static-route/task.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: pivotalservices/pynsxt 8 | 9 | inputs: 10 | - name: pcf-pipelines-utils 11 | 12 | run: 13 | path: pcf-pipelines-utils/tasks/vsphere/nsxt/create-static-route/task.sh 14 | 15 | params: 16 | NSX_MANAGER_ADDRESS: 17 | NSX_MANAGER_USERNAME: 18 | NSX_MANAGER_PASSWORD: 19 | LOGICAL_ROUTER_NAME: 20 | STATIC_ROUTE_NETWORK: 21 | NEXT_HOP: 22 | -------------------------------------------------------------------------------- /tasks/vsphere/nsxv/configure-nsxv-lb-profile/task.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eu 3 | 4 | main() { 5 | 6 | cat << EOF > nsx.ini 7 | [nsxv] 8 | nsx_manager = $NSX_EDGE_GEN_NSX_MANAGER_ADDRESS 9 | nsx_username = $NSX_EDGE_GEN_NSX_MANAGER_ADMIN_USER 10 | nsx_password = $NSX_EDGE_GEN_NSX_MANAGER_ADMIN_PASSWD 11 | [vcenter] 12 | vcenter = $VCENTER_HOST 13 | vcenter_user = $VCENTER_USR 14 | vcenter_passwd = $VCENTER_PWD 15 | [defaults] 16 | transport_zone = $NSX_EDGE_GEN_NSX_MANAGER_TRANSPORT_ZONE 17 | datacenter_name = $VCENTER_DATA_CENTER 18 | edge_datastore = $NSX_EDGE_GEN_EDGE_DATASTORE 19 | edge_cluster = $NSX_EDGE_GEN_EDGE_CLUSTER 20 | EOF 21 | 22 | # Create lb app profile if needed 23 | pynsxvg lb add_profile \ 24 | -n $NSX_EDGE_GEN_NAME \ 25 | --profile_name $NSX_EDGE_GEN_PROFILE_NAME \ 26 | --protocol $NSX_EDGE_GEN_PROFILE_PROTOCOL \ 27 | -x $NSX_EDGE_GEN_X_FORWARDED_FOR \ 28 | --ssl_passthrough $NSX_EDGE_GEN_SSL_PASSTHROUGH \ 29 | --pool_side_ssl $NSX_EDGE_GEN_POOL_SIDE_SSL \ 30 | -cert "$NSX_EDGE_GEN_PROFILE_CERT_CN" 31 | 32 | } 33 | 34 | pynsxvg () { 35 | /opt/pynsxv/cli.py "$@" 36 | } 37 | 38 | main 39 | -------------------------------------------------------------------------------- /tasks/vsphere/nsxv/configure-nsxv-lb-profile/task.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: pivotalservices/pynsxv 8 | 9 | inputs: 10 | - name: pcf-pipelines-utils 11 | 12 | run: 13 | path: pcf-pipelines-utils/tasks/vsphere/nsxv/configure-nsxv-lb-profile/task.sh 14 | 15 | params: 16 | VCENTER_HOST: 17 | VCENTER_USR: 18 | VCENTER_PWD: 19 | VCENTER_DATA_CENTER: 20 | 21 | NSX_EDGE_GEN_NSX_MANAGER_ADDRESS: 22 | NSX_EDGE_GEN_NSX_MANAGER_ADMIN_USER: 23 | NSX_EDGE_GEN_NSX_MANAGER_ADMIN_PASSWD: 24 | NSX_EDGE_GEN_NSX_MANAGER_TRANSPORT_ZONE: 25 | 26 | NSX_EDGE_GEN_NAME: 27 | NSX_EDGE_GEN_PROFILE_NAME: 28 | NSX_EDGE_GEN_EDGE_CLUSTER: 29 | NSX_EDGE_GEN_EDGE_DATASTORE: 30 | NSX_EDGE_GEN_PROFILE_PROTOCOL: 31 | NSX_EDGE_GEN_SSL_PASSTHROUGH: 32 | NSX_EDGE_GEN_POOL_SIDE_SSL: 33 | NSX_EDGE_GEN_X_FORWARDED_FOR: 34 | NSX_EDGE_GEN_PROFILE_CERT_CN: 35 | -------------------------------------------------------------------------------- /tasks/vsphere/nsxv/configure-nsxv-lb-rules-vip/task.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: pivotalservices/pynsxv 8 | 9 | inputs: 10 | - name: pcf-pipelines-utils 11 | - name: nsxv-pool-data 12 | 13 | run: 14 | path: pcf-pipelines-utils/tasks/vsphere/nsxv/configure-nsxv-lb-rules-vip/task.sh 15 | 16 | params: 17 | VCENTER_HOST: 18 | VCENTER_USR: 19 | VCENTER_PWD: 20 | VCENTER_DATA_CENTER: 21 | 22 | NSX_EDGE_GEN_NSX_MANAGER_ADDRESS: 23 | NSX_EDGE_GEN_NSX_MANAGER_ADMIN_USER: 24 | NSX_EDGE_GEN_NSX_MANAGER_ADMIN_PASSWD: 25 | NSX_EDGE_GEN_NSX_MANAGER_TRANSPORT_ZONE: 26 | 27 | NSX_EDGE_GEN_NAME: 28 | NSX_EDGE_GEN_VIP_NAME: 29 | NSX_EDGE_GEN_VIP_IP: 30 | NSX_EDGE_GEN_VIP_PORT: 31 | NSX_EDGE_GEN_PROFILE_NAME: 32 | NSX_EDGE_GEN_PROFILE_PROTOCOL: 33 | NSX_EDGE_GEN_EDGE_CLUSTER: 34 | NSX_EDGE_GEN_EDGE_DATASTORE: 35 | NSX_EDGE_GEN_ADD_RULE_TO_VIP: 36 | -------------------------------------------------------------------------------- /tasks/vsphere/nsxv/configure-nsxv-lb-rules/task.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eu 3 | 4 | # script expects to have access to a directory/filename called `nsxv-pool-data/pool_config.yml` 5 | # containing the following parameters: 6 | # 7 | # application-name: appname 8 | # application-domain: appname.domain.com 9 | # application-port-number: 8443 10 | # pool-ips: 10.10.10.1,10.10.10.2 11 | # pool-name-prefix: pks-clustername 12 | # 13 | main() { 14 | 15 | pool_config_file=nsxv-pool-data/pool_config.yml 16 | 17 | APPLICATION_NAME=$(getYamlPropertyValue "application-name" "$pool_config_file") 18 | APPLICATION_DOMAIN=$(getYamlPropertyValue "application-domain" "$pool_config_file") 19 | APPLICATION_PORT_NUMBER=$(getYamlPropertyValue "application-port-number" "$pool_config_file") 20 | POOL_IPS=$(getYamlPropertyValue "pool-ips" "$pool_config_file") 21 | POOL_NAME_PREFIX=$(getYamlPropertyValue "pool-name-prefix" "$pool_config_file") 22 | 23 | # POOL_IPS parsed example =( 192.168.28.101 192.168.28.102 192.168.28.103 ) 24 | 25 | # generated params 26 | # Vsphere Settings 27 | POOL_NAME=${POOL_NAME_PREFIX}-${APPLICATION_NAME} 28 | RULE_NAME="route-${APPLICATION_NAME}" 29 | 30 | cat << EOF > nsx.ini 31 | [nsxv] 32 | nsx_manager = $NSX_EDGE_GEN_NSX_MANAGER_ADDRESS 33 | nsx_username = $NSX_EDGE_GEN_NSX_MANAGER_ADMIN_USER 34 | nsx_password = $NSX_EDGE_GEN_NSX_MANAGER_ADMIN_PASSWD 35 | [vcenter] 36 | vcenter = $VCENTER_HOST 37 | vcenter_user = $VCENTER_USR 38 | vcenter_passwd = $VCENTER_PWD 39 | [defaults] 40 | transport_zone = $NSX_EDGE_GEN_NSX_MANAGER_TRANSPORT_ZONE 41 | datacenter_name = $VCENTER_DATA_CENTER 42 | edge_datastore = $NSX_EDGE_GEN_EDGE_DATASTORE 43 | edge_cluster = $NSX_EDGE_GEN_EDGE_CLUSTER 44 | EOF 45 | 46 | cat << EOF > app_rule 47 | acl host_${APPLICATION_NAME} hdr_dom(host) -i ${APPLICATION_DOMAIN} 48 | use_backend ${POOL_NAME} if host_${APPLICATION_NAME} 49 | EOF 50 | 51 | # create lb pool 52 | pynsxvg lb add_pool -n $NSX_EDGE_GEN_NAME \ 53 | --pool_name ${POOL_NAME} \ 54 | --algorithm round-robin \ 55 | --monitor default_tcp_monitor 56 | # add members to pool 57 | for ip in ${POOL_IPS[@]} 58 | do 59 | pynsxvg lb add_member \ 60 | -n $NSX_EDGE_GEN_NAME \ 61 | --pool_name $POOL_NAME \ 62 | --member_name node-${ip//./_} \ 63 | --member $ip \ 64 | --port ${APPLICATION_PORT_NUMBER} \ 65 | --monitor_port ${APPLICATION_PORT_NUMBER} \ 66 | --weight 1 67 | done 68 | 69 | pynsxvg lb add_rule \ 70 | -n $NSX_EDGE_GEN_NAME \ 71 | -rn ${RULE_NAME} \ 72 | -rs "$(cat app_rule)" 73 | 74 | pynsxvg lb add_rule_to_vip \ 75 | -n $NSX_EDGE_GEN_NAME \ 76 | --vip_name "$NSX_EDGE_GEN_VIP_NAME" \ 77 | --rule_name ${RULE_NAME} 78 | 79 | } 80 | 81 | pynsxvg () { 82 | /opt/pynsxv/cli.py "$@" 83 | } 84 | 85 | getYamlPropertyValue() { 86 | propertyName="${1}" 87 | yamlFile="${2}" 88 | grep "$propertyName" "$yamlFile" | grep "^[^#;]" | cut -d ":" -f 2 | tr -d " " 89 | } 90 | 91 | main 92 | -------------------------------------------------------------------------------- /tasks/vsphere/nsxv/configure-nsxv-lb-rules/task.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platform: linux 3 | 4 | image_resource: 5 | type: docker-image 6 | source: 7 | repository: pivotalservices/pynsxv 8 | 9 | inputs: 10 | - name: pcf-pipelines-utils 11 | - name: nsxv-pool-data 12 | 13 | run: 14 | path: pcf-pipelines-utils/tasks/vsphere/nsxv/configure-nsxv-lb-rules/task.sh 15 | 16 | params: 17 | VCENTER_HOST: 18 | VCENTER_USR: 19 | VCENTER_PWD: 20 | VCENTER_DATA_CENTER: 21 | 22 | NSX_EDGE_GEN_NSX_MANAGER_ADDRESS: 23 | NSX_EDGE_GEN_NSX_MANAGER_ADMIN_USER: 24 | NSX_EDGE_GEN_NSX_MANAGER_ADMIN_PASSWD: 25 | NSX_EDGE_GEN_NSX_MANAGER_TRANSPORT_ZONE: 26 | NSX_EDGE_GEN_NAME: 27 | NSX_EDGE_GEN_VIP_NAME: 28 | NSX_EDGE_GEN_EDGE_CLUSTER: 29 | NSX_EDGE_GEN_EDGE_DATASTORE: 30 | -------------------------------------------------------------------------------- /tasks/vsphere/nsxv/create-edge/task.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | platform: linux 4 | 5 | image_resource: 6 | type: docker-image 7 | source: {repository: pivotalservices/pynsxv} 8 | 9 | inputs: 10 | - name: pcf-pipelines-utils 11 | 12 | run: 13 | path: pcf-pipelines-utils/tasks/vsphere/nsxv/create-edge/task.sh 14 | 15 | params: 16 | VCENTER_HOST: 17 | VCENTER_USR: 18 | VCENTER_PWD: 19 | VCENTER_DATA_CENTER: 20 | OWNER_NAME: 21 | ERT_SSL_CERT_CN: 22 | ERT_SSL_CERT: 23 | ERT_SSL_PRIVATE_KEY: 24 | ESG_SNAT_UPLINK_IP_1: 25 | NSX_EDGE_GEN_NSX_MANAGER_ADDRESS: 26 | NSX_EDGE_GEN_NSX_MANAGER_ADMIN_USER: 27 | NSX_EDGE_GEN_NSX_MANAGER_ADMIN_PASSWD: 28 | NSX_EDGE_GEN_NSX_MANAGER_TRANSPORT_ZONE: 29 | NSX_EDGE_GEN_NSX_MANAGER_TRANSPORT_ZONE_CLUSTERS: 30 | NSX_EDGE_GEN_NSX_MANAGER_DISTRIBUTED_PORTGROUP: 31 | NSX_EDGE_GEN_EDGE_DATASTORE: 32 | NSX_EDGE_GEN_EDGE_CLUSTER: 33 | NSX_EDGE_GEN_NAME: 34 | NSX_EDGE_GEN_ENABLE_DLR: 35 | NSX_EDGE_GEN_BOSH_NSX_ENABLED: 36 | ESG_SIZE: 37 | ESG_OSPF_PASSWORD_1: 38 | ESG_CLI_USERNAME_1: 39 | ESG_CLI_PASSWORD_1: 40 | ESG_ERT_CERTS_NAME_1: 41 | ESG_ERT_CERTS_CONFIG_SYSTEMDOMAIN_1: 42 | ESG_ERT_CERTS_CONFIG_APPDOMAIN_1: 43 | ESG_ERT_CERTS_CONFIG_OU_1: 44 | ESG_ERT_CERTS_CONFIG_COUNTRY_1: 45 | ESG_ISO_CERTS_NAME_1_1: 46 | ESG_ISO_CERTS_SWITCH_1_1: 47 | ESG_ISO_CERTS_CONFIG_DOMAIN_1_1: 48 | ESG_ISO_CERTS_CONFIG_OU_1_1: 49 | ESG_ISO_CERTS_CONFIG_COUNTRY_1_1: 50 | ESG_OPSMGR_UPLINK_IP_1: 51 | ESG_GO_ROUTER_UPLINK_IP_1: 52 | ESG_DIEGO_BRAIN_UPLINK_IP_1: 53 | ESG_TCP_ROUTER_UPLINK_IP_1: 54 | ESG_GO_ROUTER_SSL_TERM_1: 55 | ESG_GO_ROUTER_INSTANCES_1: 56 | ESG_DIEGO_BRAIN_INSTANCES_1: 57 | ESG_TCP_ROUTER_INSTANCES_1: 58 | ESG_MYSQL_ERT_PROXY_INSTANCES_1: 59 | ESG_MYSQL_TILE_PROXY_INSTANCES_1: 60 | ESG_RABBITMQ_TILE_PROXY_INSTANCES_1: 61 | ESG_DEFAULT_UPLINK_IP_1: 62 | ESG_DEFAULT_UPLINK_PG_1: 63 | ESG_GATEWAY_1: 64 | ISOZONE_SWITCH_NAME_1: 65 | ISOZONE_SWITCH_CIDR_1: 66 | ISOZONE_SSL_CERT_1: 67 | ISOZONE_SSL_PRIVATE_KEY_1: 68 | ESG_GO_ROUTER_ISOZONE_1_UPLINK_IP_1: 69 | ESG_GO_ROUTER_ISOZONE_1_SWITCH_1: 70 | ESG_GO_ROUTER_ISOZONE_1_INST_1: 71 | ESG_TCP_ROUTER_ISOZONE_1_UPLINK_IP_1: 72 | ESG_TCP_ROUTER_ISOZONE_1_SWITCH_1: 73 | ESG_TCP_ROUTER_ISOZONE_1_INST_1: 74 | ESG_GO_ROUTER_ISOZONE_1_SSL_TERM_1: 75 | ISOZONE_SWITCH_NAME_2: 76 | ISOZONE_SWITCH_CIDR_2: 77 | ISOZONE_SSL_CERT_2: 78 | ISOZONE_SSL_PRIVATE_KEY_2: 79 | ESG_GO_ROUTER_ISOZONE_2_UPLINK_IP_1: 80 | ESG_GO_ROUTER_ISOZONE_2_SWITCH_1: 81 | ESG_GO_ROUTER_ISOZONE_2_INST_1: 82 | ESG_TCP_ROUTER_ISOZONE_2_UPLINK_IP_1: 83 | ESG_TCP_ROUTER_ISOZONE_2_SWITCH_1: 84 | ESG_TCP_ROUTER_ISOZONE_2_INST_1: 85 | ESG_GO_ROUTER_ISOZONE_2_SSL_TERM_1: 86 | ISOZONE_SWITCH_NAME_3: 87 | ISOZONE_SWITCH_CIDR_3: 88 | ISOZONE_SSL_CERT_3: 89 | ISOZONE_SSL_PRIVATE_KEY_3: 90 | ESG_GO_ROUTER_ISOZONE_3_UPLINK_IP_1: 91 | ESG_GO_ROUTER_ISOZONE_3_SWITCH_1: 92 | ESG_GO_ROUTER_ISOZONE_3_INST_1: 93 | ESG_TCP_ROUTER_ISOZONE_3_UPLINK_IP_1: 94 | ESG_TCP_ROUTER_ISOZONE_3_SWITCH_1: 95 | ESG_TCP_ROUTER_ISOZONE_3_INST_1: 96 | ESG_GO_ROUTER_ISOZONE_3_SSL_TERM_1: 97 | -------------------------------------------------------------------------------- /tasks/vsphere/nsxv/destroy-edge/task.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -eu 2 | 3 | echo "Destroying edge" 4 | 5 | cat << EOF > nsx.ini 6 | [nsxv] 7 | nsx_manager = $NSX_EDGE_GEN_NSX_MANAGER_ADDRESS 8 | nsx_username = $NSX_EDGE_GEN_NSX_MANAGER_ADMIN_USER 9 | nsx_password = $NSX_EDGE_GEN_NSX_MANAGER_ADMIN_PASSWD 10 | 11 | [vcenter] 12 | vcenter = $VCENTER_HOST 13 | vcenter_user = $VCENTER_USR 14 | vcenter_passwd = $VCENTER_PWD 15 | 16 | [defaults] 17 | transport_zone = $NSX_EDGE_GEN_NSX_MANAGER_TRANSPORT_ZONE 18 | datacenter_name = $VCENTER_DATA_CENTER 19 | edge_datastore = $NSX_EDGE_GEN_EDGE_DATASTORE 20 | edge_cluster = $NSX_EDGE_GEN_EDGE_CLUSTER 21 | EOF 22 | 23 | pynsxv_local() { 24 | /opt/pynsxv/cli.py "$@" 25 | return $? 26 | } 27 | 28 | get_cidr() { 29 | IP=$1 30 | MASK=$2 31 | FIRST_THREE=$(echo $IP|cut -d. -f 1,2,3) 32 | echo "$FIRST_THREE.0/$MASK" 33 | } 34 | 35 | if [ $NUM_LOGICAL_SWITCHES -gt 9 -o $NUM_LOGICAL_SWITCHES -lt 1 ] 36 | then 37 | echo 'Number must be between 1 and 9' 38 | exit 1 39 | fi 40 | 41 | # Create an edge 42 | pynsxv_local esg delete -n $NSX_EDGE_GEN_NAME 43 | 44 | # Create logical switches 45 | for labwire_id in $(seq $NUM_LOGICAL_SWITCHES); do 46 | pynsxv_local lswitch -n "labwire-$NSX_EDGE_GEN_NAME-$OWNER_NAME-$labwire_id" delete 47 | done 48 | -------------------------------------------------------------------------------- /tasks/vsphere/nsxv/destroy-edge/task.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | platform: linux 4 | 5 | image_resource: 6 | type: docker-image 7 | source: {repository: pivotalservices/pynsxv} 8 | 9 | inputs: 10 | - name: pcf-pipelines-utils 11 | 12 | run: 13 | path: pcf-pipelines-utils/tasks/vsphere/nsxv/destroy-edge/task.sh 14 | 15 | params: 16 | VCENTER_HOST: 17 | VCENTER_USR: 18 | VCENTER_PWD: 19 | VCENTER_DATA_CENTER: 20 | ERT_SSL_CERT: 21 | ERT_SSL_PRIVATE_KEY: 22 | NSX_EDGE_GEN_NSX_MANAGER_ADDRESS: 23 | NSX_EDGE_GEN_NSX_MANAGER_ADMIN_USER: 24 | NSX_EDGE_GEN_NSX_MANAGER_ADMIN_PASSWD: 25 | NSX_EDGE_GEN_NSX_MANAGER_TRANSPORT_ZONE: 26 | NSX_EDGE_GEN_NSX_MANAGER_TRANSPORT_ZONE_CLUSTERS: 27 | NSX_EDGE_GEN_NSX_MANAGER_DISTRIBUTED_PORTGROUP: 28 | NSX_EDGE_GEN_EDGE_DATASTORE: 29 | NSX_EDGE_GEN_EDGE_CLUSTER: 30 | NSX_EDGE_GEN_NAME: 31 | NSX_EDGE_GEN_ENABLE_DLR: 32 | NSX_EDGE_GEN_BOSH_NSX_ENABLED: 33 | ESG_SIZE: 34 | ESG_OSPF_PASSWORD_1: 35 | ESG_CLI_USERNAME_1: 36 | ESG_CLI_PASSWORD_1: 37 | ESG_ERT_CERTS_NAME_1: 38 | ESG_ERT_CERTS_CONFIG_SYSTEMDOMAIN_1: 39 | ESG_ERT_CERTS_CONFIG_APPDOMAIN_1: 40 | ESG_ERT_CERTS_CONFIG_OU_1: 41 | ESG_ERT_CERTS_CONFIG_COUNTRY_1: 42 | ESG_ISO_CERTS_NAME_1_1: 43 | ESG_ISO_CERTS_SWITCH_1_1: 44 | ESG_ISO_CERTS_CONFIG_DOMAIN_1_1: 45 | ESG_ISO_CERTS_CONFIG_OU_1_1: 46 | ESG_ISO_CERTS_CONFIG_COUNTRY_1_1: 47 | ESG_OPSMGR_UPLINK_IP_1: 48 | ESG_GO_ROUTER_UPLINK_IP_1: 49 | ESG_DIEGO_BRAIN_UPLINK_IP_1: 50 | ESG_TCP_ROUTER_UPLINK_IP_1: 51 | ESG_GO_ROUTER_SSL_TERM_1: 52 | ESG_GO_ROUTER_INSTANCES_1: 53 | ESG_DIEGO_BRAIN_INSTANCES_1: 54 | ESG_TCP_ROUTER_INSTANCES_1: 55 | ESG_MYSQL_ERT_PROXY_INSTANCES_1: 56 | ESG_MYSQL_TILE_PROXY_INSTANCES_1: 57 | ESG_RABBITMQ_TILE_PROXY_INSTANCES_1: 58 | ESG_DEFAULT_UPLINK_IP_1: 59 | ESG_DEFAULT_UPLINK_PG_1: 60 | ESG_GATEWAY_1: 61 | ISOZONE_SWITCH_NAME_1: 62 | ISOZONE_SWITCH_CIDR_1: 63 | ISOZONE_SSL_CERT_1: 64 | ISOZONE_SSL_PRIVATE_KEY_1: 65 | ESG_GO_ROUTER_ISOZONE_1_UPLINK_IP_1: 66 | ESG_GO_ROUTER_ISOZONE_1_SWITCH_1: 67 | ESG_GO_ROUTER_ISOZONE_1_INST_1: 68 | ESG_TCP_ROUTER_ISOZONE_1_UPLINK_IP_1: 69 | ESG_TCP_ROUTER_ISOZONE_1_SWITCH_1: 70 | ESG_TCP_ROUTER_ISOZONE_1_INST_1: 71 | ESG_GO_ROUTER_ISOZONE_1_SSL_TERM_1: 72 | ISOZONE_SWITCH_NAME_2: 73 | ISOZONE_SWITCH_CIDR_2: 74 | ISOZONE_SSL_CERT_2: 75 | ISOZONE_SSL_PRIVATE_KEY_2: 76 | ESG_GO_ROUTER_ISOZONE_2_UPLINK_IP_1: 77 | ESG_GO_ROUTER_ISOZONE_2_SWITCH_1: 78 | ESG_GO_ROUTER_ISOZONE_2_INST_1: 79 | ESG_TCP_ROUTER_ISOZONE_2_UPLINK_IP_1: 80 | ESG_TCP_ROUTER_ISOZONE_2_SWITCH_1: 81 | ESG_TCP_ROUTER_ISOZONE_2_INST_1: 82 | ESG_GO_ROUTER_ISOZONE_2_SSL_TERM_1: 83 | ISOZONE_SWITCH_NAME_3: 84 | ISOZONE_SWITCH_CIDR_3: 85 | ISOZONE_SSL_CERT_3: 86 | ISOZONE_SSL_PRIVATE_KEY_3: 87 | ESG_GO_ROUTER_ISOZONE_3_UPLINK_IP_1: 88 | ESG_GO_ROUTER_ISOZONE_3_SWITCH_1: 89 | ESG_GO_ROUTER_ISOZONE_3_INST_1: 90 | ESG_TCP_ROUTER_ISOZONE_3_UPLINK_IP_1: 91 | ESG_TCP_ROUTER_ISOZONE_3_SWITCH_1: 92 | ESG_TCP_ROUTER_ISOZONE_3_INST_1: 93 | ESG_GO_ROUTER_ISOZONE_3_SSL_TERM_1: 94 | --------------------------------------------------------------------------------