├── .dockerignore
├── .github
└── workflows
│ ├── image-build-and-publish.yml
│ └── manual-build.yml
├── .gitignore
├── Dockerfile
├── LICENSE
├── README.md
├── RELEASE.md
├── VERSION
├── assets
├── app-migration.yml
├── async-dr.yaml
├── autopilot
│ ├── expand-pool.yaml
│ └── rebalance-pool.yaml
├── backup-restore
│ ├── applicationBackup.yml
│ ├── applicationRestore.yml
│ └── backupLocation.yml
├── cassandra
│ ├── cassandra-populate
│ └── cassandra.yaml
├── ceph
│ └── values-override.yaml
├── install-vault.sh
├── jupyter
│ └── jupyter.yaml
├── kubevirt
│ ├── dv-ubuntu.yml
│ ├── migration.yml
│ ├── ocp
│ │ ├── async-dr.yml
│ │ ├── hyperconverged.yml
│ │ ├── ocp-template.yml
│ │ ├── operator.yml
│ │ └── pxbbq-route.yml
│ ├── px-rwx-kubevirt.yml
│ ├── px-virt-sc.yml
│ ├── pxbbq-ns.yml
│ ├── pxbbq-ubuntu.yml
│ └── storageprofile.yml
├── metro-c1-down.yml
├── metro-schedule.yml
├── minio
│ └── minio-deployment.yml
├── mongo
│ └── mongo.yml
├── mysql
│ └── mysql.yml
├── nginx.yml
├── pds-petclinic
│ └── pds-petclinic.tpl
├── petclinic
│ ├── loadtest.md
│ ├── petclinic-with-mysql-cluster.yaml
│ └── petclinic.yml
├── postgres
│ ├── pgadmin.yml
│ ├── postgres-autopilot-rule.yaml
│ ├── postgres-restore.yml
│ ├── postgres-snap.yml
│ └── postgres.yml
├── proxy-nfs
│ ├── nginx-deployment.yml
│ ├── pvc.yml
│ └── sc.yml
├── pure
│ ├── multipath.conf
│ └── pure.json
├── pxbbq
│ └── pxbbq.yml
├── redis
│ └── redis.yml
├── sock-shop
│ ├── loadtest.md
│ └── sock-shop.yaml
├── training
│ ├── Autopilot
│ │ ├── autopilot-expand-pool-rule.yaml
│ │ ├── autopilot-rebalance-pool-rule.yaml
│ │ ├── create-volumes.sh
│ │ ├── delete-volumes.sh
│ │ ├── postgres-autopilot-rule.yaml
│ │ ├── postgres.yaml
│ │ ├── show-replicas.sh
│ │ └── watch-events.sh
│ ├── Basic_Volume_Management
│ │ └── nginx-pod.yaml
│ ├── Cloudsnaps
│ │ ├── create-objectstore.sh
│ │ ├── px-postgres-cloudrestore.yaml
│ │ └── px-postgres-cloudsnap.yaml
│ ├── Database_Deployment_PostgreSQL
│ │ ├── cheatsheet.txt
│ │ ├── px-postgres-app.yaml
│ │ ├── px-postgres-pvc.yaml
│ │ └── px-postgres-sc.yaml
│ ├── Disaster_Recovery
│ │ ├── cheatsheet.txt
│ │ ├── etcd.sh
│ │ ├── migration-schedule-async.yaml
│ │ ├── migration-schedule-sync.yaml
│ │ ├── postgres.yaml
│ │ └── schedule-policy.yaml
│ ├── Group_Snapshots_Cassandra
│ │ ├── px-cassandra-group-cloudsnap.yaml
│ │ ├── px-cassandra-presnap-rule.yaml
│ │ ├── px-cassandra-restore-pvcs.yaml
│ │ └── px-cassandra.yaml
│ ├── PDS
│ │ ├── cheatsheet.txt
│ │ └── pgadmin.yml
│ ├── PX_Migrate
│ │ ├── migration.yaml
│ │ └── redis.yaml
│ ├── Stateful_Application_Backups
│ │ ├── application-backup.yaml
│ │ ├── application-restore.yaml
│ │ ├── backup-location.yaml
│ │ ├── cheatsheet.txt
│ │ ├── minio-deployment.yml
│ │ └── postgres.yaml
│ ├── Volume_Snapshots
│ │ ├── cheatsheet.txt
│ │ ├── postgres-app-restore.yaml
│ │ ├── px-snap-pvc.yaml
│ │ └── px-snap.yaml
│ ├── Wordpress_Lab
│ │ ├── header.php
│ │ ├── mysql-deploy.yaml
│ │ ├── mysql-vol.yaml
│ │ ├── wp-deploy.yaml
│ │ └── wp-vol.yaml
│ ├── cheatsheet.txt
│ └── install-pxc.sh
└── wordpress
│ └── wordpress.yml
├── aws.go
├── azure.go
├── cmdTesting.go
├── defaults.yml
├── docs
├── bucket.tf
├── cloud
│ ├── aws
│ │ └── ocp4.md
│ └── vsphere
│ │ ├── 1_vsphere_deploy.png
│ │ ├── 2_vsphere_ovf_source.png
│ │ ├── 3_vsphere_name_folder.png
│ │ ├── 4_vsphere_resource.png
│ │ ├── 5_vsphere_datastore.png
│ │ ├── 6_vsphere_network.png
│ │ ├── 7_vsphere_template.png
│ │ └── README.md
├── templates
│ ├── async-dr
│ │ └── README.md
│ ├── backup-restore
│ │ └── README.md
│ ├── metro
│ │ └── README.md
│ ├── migration
│ │ └── README.md
│ ├── ocp-kubevirt
│ │ └── readme.MD
│ └── pds-petclinic
│ │ ├── README.md
│ │ ├── pds_access_key.png
│ │ └── pds_project.png
└── training_iam_user.tf
├── gcp.go
├── go.mod
├── go.sum
├── infra
├── aks-master
├── all-common
├── all-master
├── eks-master
├── gke-master
├── k8s-common
├── k8s-master
├── k8s-node
├── ocp4-master
└── rancher-master
├── install.sh
├── px-deploy.go
├── scripts
├── aws-elb
├── backup-restore
├── cat
├── clusterpair
├── clusterpair-metro
├── dude
├── eks-multicloud-target
├── etcd
├── helm-backup
├── helm-backup-apps
├── helm-backup-ocp4-kubevirt
├── install-awscli
├── install-ceph
├── install-px
├── kubevirt
├── kubevirt-apps
├── licenses
├── metro-post
├── metro-pre
├── ocp-kubevirt
├── pds-petclinic
├── petclinic
├── px-fio-example
├── px-wait
├── show-ip
├── sock-shop
└── training
├── templates
├── async-dr.yml
├── backup-restore.yml
├── ceph.yml
├── dude.yml
├── eks-multicloud-target.yml
├── kubevirt.yml
├── metro.yml
├── migration.yml
├── ocp-kubevirt.yml
├── pds-petclinic.yml
├── px-backup.yml
├── px-fio-example.yml
├── px.yml
└── training.yml
├── terraform
├── aws
│ ├── aws-returns.tpl
│ ├── cloud-init.tpl
│ ├── eks
│ │ ├── eks.tf
│ │ └── eks_run_everywhere.tpl
│ ├── main.tf
│ ├── ocp4
│ │ ├── ocp4-install-config.tpl
│ │ └── ocp4.tf
│ ├── rancher
│ │ ├── TODO.txt
│ │ ├── rancher-server.tf
│ │ └── rancher-variables.tf
│ └── variables.tf
├── azure
│ ├── aks
│ │ └── aks.tf
│ ├── cloud-init.tpl
│ ├── main.tf
│ └── variables.tf
├── gcp
│ ├── gcp-returns.tpl
│ ├── gke
│ │ └── gke.tf
│ ├── main.tf
│ ├── startup-script.tpl
│ └── variables.tf
└── vsphere
│ ├── cloud-init.tpl
│ ├── main.tf
│ ├── metadata.tpl
│ └── variables.tf
├── vsphere-build
└── vsphere-build.sh
└── vsphere.go
/.dockerignore:
--------------------------------------------------------------------------------
1 | **/.terraform/
2 | **/.terraform.lock.hcl
3 |
--------------------------------------------------------------------------------
/.github/workflows/manual-build.yml:
--------------------------------------------------------------------------------
1 | name: manual-build
2 |
3 | # This workflow uses actions that are not certified by GitHub.
4 | # They are provided by a third-party and are governed by
5 | # separate terms of service, privacy policy, and support
6 | # documentation.
7 |
8 | on:
9 | workflow_dispatch:
10 |
11 | env:
12 | # Use docker.io for Docker Hub if empty
13 | REGISTRY: ghcr.io
14 | # github.repository as /
15 | IMAGE_NAME: ${{ github.repository }}
16 |
17 |
18 | jobs:
19 | build:
20 |
21 | runs-on: ubuntu-latest
22 | permissions:
23 | contents: read
24 | packages: write
25 | # This is used to complete the identity challenge
26 | # with sigstore/fulcio when running outside of PRs.
27 | id-token: write
28 |
29 | steps:
30 | - name: downcase REPO
31 | run: |
32 | echo "REPO=${GITHUB_REPOSITORY,,}" >>${GITHUB_ENV}
33 | - name: Checkout repository
34 | uses: actions/checkout@v4
35 |
36 | # Install the cosign tool except on PR
37 | # https://github.com/sigstore/cosign-installer
38 | #- name: Install cosign
39 | # if: github.event_name != 'pull_request'
40 | # uses: sigstore/cosign-installer@f3c664df7af409cb4873aa5068053ba9d61a57b6 #v2.6.0
41 | # with:
42 | # cosign-release: 'v1.13.1'
43 |
44 |
45 | # Workaround: https://github.com/docker/build-push-action/issues/461
46 | - name: Setup Docker buildx
47 | uses: docker/setup-buildx-action@v3
48 |
49 | # Login against a Docker registry except on PR
50 | # https://github.com/docker/login-action
51 | - name: Log into registry ${{ env.REGISTRY }}
52 | if: github.event_name != 'pull_request'
53 | uses: docker/login-action@v3
54 | with:
55 | registry: ${{ env.REGISTRY }}
56 | username: ${{ github.actor }}
57 | password: ${{ secrets.GITHUB_TOKEN }}
58 |
59 | # Extract metadata (tags, labels) for Docker
60 | # https://github.com/docker/metadata-action
61 | - name: Extract Docker metadata
62 | id: meta
63 | uses: docker/metadata-action@v5
64 | with:
65 | images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
66 |
67 | - name: Create tags (version and latest)
68 | id: tag-master
69 | run: |
70 | VERSION=$(cat VERSION)
71 | echo "VERSION=$VERSION" >> $GITHUB_ENV
72 | TAGS=$(echo "ghcr.io/$REPO:$VERSION" , "ghcr.io/$REPO:latest")
73 | echo "TAGS=$TAGS" >> $GITHUB_ENV
74 | echo "Version: $VERSION "
75 | echo "Tags: $TAGS"
76 |
77 | - name: Show versions and tags
78 | run: |
79 | echo "Version: ${{ env.VERSION }}"
80 | echo "Tags: ${{ env.TAGS }}"
81 |
82 | # Build and push Docker image with Buildx (don't push on PR)
83 | # https://github.com/docker/build-push-action
84 | - name: Build and push Docker image
85 | id: build-and-push
86 | uses: docker/build-push-action@v5
87 | with:
88 | context: .
89 | push: ${{ github.event_name != 'pull_request' }}
90 | tags: ${{ env.TAGS }}
91 | platforms: linux/amd64,linux/arm64
92 | labels: ${{ steps.meta.outputs.labels }}
93 | cache-from: type=gha
94 | cache-to: type=gha,mode=max
95 |
96 | # Sign the resulting Docker image digest except on PRs.
97 | # This will only write to the public Rekor transparency log when the Docker
98 | # repository is public to avoid leaking data. If you would like to publish
99 | # transparency data even for private images, pass --force to cosign below.
100 | # https://github.com/sigstore/cosign
101 | #- name: Sign the published Docker image
102 | # if: ${{ github.event_name != 'pull_request' }}
103 | # env:
104 | # COSIGN_EXPERIMENTAL: "true"
105 | # # This step uses the identity token to provision an ephemeral certificate
106 | # # against the sigstore community Fulcio instance.
107 | # run: echo "${{ steps.meta.outputs.tags }}" | xargs -I {} cosign sign {}@${{ steps.build-and-push.outputs.digest }}
108 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Local .terraform directories
2 | **/.terraform/*
3 |
4 | # .tfstate files
5 | *.tfstate
6 | *.tfstate.*
7 | .terraform.*
8 |
9 | # Crash log files
10 | crash.log
11 |
12 | # Ignore any .tfvars files that are generated automatically for each Terraform run. Most
13 | # .tfvars files are managed as part of configuration and so should be included in
14 | # version control.
15 | #
16 | # example.tfvars
17 | .tfvars*
18 |
19 | # Ignore override files as they are usually used to override resources locally and so
20 | # are not checked in
21 | override.tf
22 | override.tf.json
23 | *_override.tf
24 | *_override.tf.json
25 |
26 | # Include override files you do wish to add to version control using negated pattern
27 | #
28 | # !example_override.tf
29 |
30 | # Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
31 | # example: *tfplan*
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | # HINT: prepare a docker multi platform build
2 | #docker buildx create --use --platform=linux/arm64,linux/amd64 --name multi-platform-builder
3 | #docker buildx inspect --bootstrap
4 | #docker buildx build --platform=linux/arm64,linux/amd64 --push -t ghcr.io/danpaul81/px-deploy:dev .
5 | FROM --platform=$BUILDPLATFORM golang:1.23-alpine3.21 AS build
6 | RUN mkdir -p /linux/amd64
7 | RUN mkdir -p /linux/arm64
8 | RUN wget -P / https://releases.hashicorp.com/terraform/1.9.8/terraform_1.9.8_linux_amd64.zip
9 | RUN wget -P / https://releases.hashicorp.com/terraform/1.9.8/terraform_1.9.8_linux_arm64.zip
10 | RUN unzip /terraform_1.9.8_linux_amd64.zip -d /linux/amd64
11 | RUN unzip /terraform_1.9.8_linux_arm64.zip -d /linux/arm64
12 | RUN wget -P / https://github.com/vmware/govmomi/releases/download/v0.37.1/govc_Linux_x86_64.tar.gz
13 | RUN wget -P / https://github.com/vmware/govmomi/releases/download/v0.37.1/govc_Linux_arm64.tar.gz
14 | RUN tar -xzf /govc_Linux_x86_64.tar.gz -C /linux/amd64
15 | RUN tar -xzf /govc_Linux_arm64.tar.gz -C /linux/arm64
16 | RUN mkdir -p /root/go/src/px-deploy
17 | COPY go.mod go.sum *.go /root/go/src/px-deploy/
18 | ARG TARGETOS TARGETARCH TARGETPLATFORM
19 | RUN cd /root/go/src/px-deploy; GOOS=$TARGETOS GOARCH=$TARGETARCH go build -o /$TARGETPLATFORM/px-deploy
20 | COPY terraform /px-deploy/terraform
21 |
22 | FROM --platform=$TARGETPLATFORM alpine:3.21
23 | RUN apk add --no-cache openssh-client-default bash rsync
24 | RUN echo ServerAliveInterval 300 >/etc/ssh/ssh_config
25 | RUN echo ServerAliveCountMax 2 >>/etc/ssh/ssh_config
26 | RUN echo TCPKeepAlive yes >>/etc/ssh/ssh_config
27 | ARG TARGETPLATFORM
28 | COPY --from=build /$TARGETPLATFORM/terraform /usr/bin/terraform
29 | COPY --from=build /$TARGETPLATFORM/govc /usr/bin/govc
30 | COPY --from=build /$TARGETPLATFORM/px-deploy /root/go/bin/px-deploy
31 | COPY assets /px-deploy/assets
32 | COPY scripts /px-deploy/scripts
33 | COPY templates /px-deploy/templates
34 | COPY infra /px-deploy/infra
35 | COPY defaults.yml /px-deploy/versions.yml
36 | COPY VERSION /
37 | COPY --from=build /px-deploy/terraform/aws /px-deploy/terraform/aws
38 | COPY --from=build /px-deploy/terraform/azure /px-deploy/terraform/azure
39 | COPY --from=build /px-deploy/terraform/gcp /px-deploy/terraform/gcp
40 | COPY --from=build /px-deploy/terraform/vsphere /px-deploy/terraform/vsphere
41 | RUN terraform -chdir=/px-deploy/terraform/aws/ init
42 | RUN terraform -chdir=/px-deploy/terraform/azure/ init
43 | RUN terraform -chdir=/px-deploy/terraform/gcp/ init
44 | RUN terraform -chdir=/px-deploy/terraform/vsphere/ init
45 |
46 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright (C) 2021 by Andrew Hill
2 |
3 | Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted.
4 |
5 | THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
6 |
--------------------------------------------------------------------------------
/VERSION:
--------------------------------------------------------------------------------
1 | 6.4.4
2 |
--------------------------------------------------------------------------------
/assets/app-migration.yml:
--------------------------------------------------------------------------------
1 | apiVersion: stork.libopenstorage.org/v1alpha1
2 | kind: Migration
3 | metadata:
4 | name: appmigration
5 | namespace: kube-system
6 | spec:
7 | clusterPair: remotecluster
8 | includeResources: true
9 | startApplications: true
10 | namespaces:
11 | - pxbbq
12 | - petclinic
13 |
14 |
15 |
--------------------------------------------------------------------------------
/assets/async-dr.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: stork.libopenstorage.org/v1alpha1
2 | kind: SchedulePolicy
3 | metadata:
4 | name: 2-min
5 | policy:
6 | interval:
7 | intervalMinutes: 2
8 | ---
9 | apiVersion: stork.libopenstorage.org/v1alpha1
10 | kind: MigrationSchedule
11 | metadata:
12 | name: dr-schedule
13 | namespace: kube-system
14 | spec:
15 | template:
16 | spec:
17 | clusterPair: remotecluster
18 | includeResources: true
19 | startApplications: false
20 | namespaces:
21 | - pxbbq
22 | - petclinic
23 | schedulePolicyName: 2-min
24 |
--------------------------------------------------------------------------------
/assets/autopilot/expand-pool.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: autopilot.libopenstorage.org/v1alpha1
2 | kind: AutopilotRule
3 | metadata:
4 | name: pool-expand
5 | spec:
6 | enforcement: required
7 | ##### conditions are the symptoms to evaluate. All conditions are AND'ed
8 | conditions:
9 | for: 5
10 | expressions:
11 | # pool available capacity less than 50%
12 | - key: "100 * ( px_pool_stats_available_bytes/ px_pool_stats_total_bytes)"
13 | operator: Lt
14 | values:
15 | - "50"
16 | # pool total capacity should not exceed 2TB
17 | - key: "px_pool_stats_total_bytes/(1024*1024*1024)"
18 | operator: Lt
19 | values:
20 | - "2000"
21 | ##### action to perform when condition is true
22 | actions:
23 | - name: "openstorage.io.action.storagepool/expand"
24 | params:
25 | # resize pool by scalepercentage of current size
26 | scalepercentage: "50"
27 | # when scaling, resize disk
28 | scaletype: "resize-disk"
29 |
--------------------------------------------------------------------------------
/assets/autopilot/rebalance-pool.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: autopilot.libopenstorage.org/v1alpha1
2 | kind: AutopilotRule
3 | metadata:
4 | name: pool-rebalance
5 | spec:
6 | conditions:
7 | for: 5
8 | requiredMatches: 1
9 | expressions:
10 | - keyAlias: PoolProvDeviationPerc
11 | operator: NotInRange
12 | values:
13 | - "-20"
14 | - "20"
15 | - keyAlias: PoolUsageDeviationPerc
16 | operator: NotInRange
17 | values:
18 | - "-20"
19 | - "20"
20 | actions:
21 | - name: "openstorage.io.action.storagepool/rebalance"
22 |
--------------------------------------------------------------------------------
/assets/backup-restore/applicationBackup.yml:
--------------------------------------------------------------------------------
1 | apiVersion: stork.libopenstorage.org/v1alpha1
2 | kind: ApplicationBackup
3 | metadata:
4 | name: pet-backup
5 | namespace: petclinic
6 | spec:
7 | backupLocation: pet-backup-location
8 | namespaces:
9 | - petclinic
10 | reclaimPolicy: Retain
11 | selectors:
12 | preExecRule:
13 | postExecRule:
--------------------------------------------------------------------------------
/assets/backup-restore/applicationRestore.yml:
--------------------------------------------------------------------------------
1 | apiVersion: stork.libopenstorage.org/v1alpha1
2 | kind: ApplicationRestore
3 | metadata:
4 | name: pet-restore
5 | namespace: petclinic
6 | spec:
7 | backupName:
8 | backupLocation: pet-backup-location
--------------------------------------------------------------------------------
/assets/backup-restore/backupLocation.yml:
--------------------------------------------------------------------------------
1 | apiVersion: stork.libopenstorage.org/v1alpha1
2 | kind: BackupLocation
3 | metadata:
4 | name: pet-backup-location
5 | namespace: petclinic
6 | annotations:
7 | stork.libopenstorage.org/skipresource: "false"
8 | location:
9 | type: s3
10 | sync: true
11 | path: "portworx"
12 | s3Config:
13 | region: default
14 | accessKeyID: minio
15 | secretAccessKey: minio123
16 | endpoint: "http://xxxx:30221"
17 | disableSSL: true
--------------------------------------------------------------------------------
/assets/cassandra/cassandra-populate:
--------------------------------------------------------------------------------
1 | CREATE KEYSPACE classicmodels WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 3 };
2 | CONSISTENCY QUORUM;
3 | use classicmodels;
4 | CREATE TABLE offices (officeCode text PRIMARY KEY, city text, phone text, addressLine1 text, addressLine2 text, state text, country text, postalCode text, territory text);
5 | BEGIN BATCH
6 | INSERT into offices(officeCode, city, phone, addressLine1, addressLine2, state, country ,postalCode, territory) values ('1','San Francisco','+1 650 219 4782','100 Market Street','Suite 300','CA','USA','94080','NA');
7 | INSERT into offices(officeCode, city, phone, addressLine1, addressLine2, state, country ,postalCode, territory) values ('2','Boston','+1 215 837 0825','1550 Court Place','Suite 102','MA','USA','02107','NA');
8 | INSERT into offices(officeCode, city, phone, addressLine1, addressLine2, state, country ,postalCode, territory) values ('3','NYC','+1 212 555 3000','523 East 53rd Street','apt. 5A','NY','USA','10022','NA');
9 | INSERT into offices(officeCode, city, phone, addressLine1, addressLine2, state, country ,postalCode, territory) values ('4','Paris','+33 14 723 4404','43 Rue Jouffroy abbans', NULL ,NULL,'France','75017','EMEA');
10 | INSERT into offices(officeCode, city, phone, addressLine1, addressLine2, state, country ,postalCode, territory) values ('5','Tokyo','+81 33 224 5000','4-1 Kioicho',NULL,'Chiyoda-Ku','Japan','102-8578','Japan');
11 | INSERT into offices(officeCode, city, phone, addressLine1, addressLine2, state, country ,postalCode, territory) values ('6','Sydney','+61 2 9264 2451','5-11 Wentworth Avenue','Floor #2',NULL,'Australia','NSW 2010','APAC');
12 | INSERT into offices(officeCode, city, phone, addressLine1, addressLine2, state, country ,postalCode, territory) values ('7','London','+44 20 7877 2041','25 Old Broad Street','Level 7',NULL,'UK','EC2N 1HN','EMEA');
13 | INSERT into offices(officeCode, city, phone, addressLine1, addressLine2, state, country ,postalCode, territory) values ('8','Mumbai','+91 22 8765434','BKC','Building 2',NULL,'MH','400051','APAC');
14 | APPLY BATCH;
--------------------------------------------------------------------------------
/assets/cassandra/cassandra.yaml:
--------------------------------------------------------------------------------
1 | kind: Namespace
2 | apiVersion: v1
3 | metadata:
4 | name: cassandra
5 | ---
6 | kind: StorageClass
7 | apiVersion: storage.k8s.io/v1
8 | metadata:
9 | name: px-repl-2
10 | provisioner: pxd.portworx.com
11 | parameters:
12 | #openstorage.io/auth-secret-name: px-user-token
13 | #openstorage.io/auth-secret-namespace: portworx
14 | repl: "2"
15 | io_profile: "db_remote"
16 | ---
17 | apiVersion: v1
18 | kind: Service
19 | metadata:
20 | namespace: cassandra
21 | labels:
22 | app: cassandra
23 | name: cassandra
24 | spec:
25 | clusterIP: None
26 | ports:
27 | - port: 9042
28 | selector:
29 | app: cassandra
30 | ---
31 | apiVersion: apps/v1
32 | kind: StatefulSet
33 | metadata:
34 | name: cassandra
35 | namespace: cassandra
36 | spec:
37 | selector:
38 | matchLabels:
39 | app: cassandra
40 | serviceName: cassandra
41 | replicas: 3
42 | template:
43 | metadata:
44 | labels:
45 | app: cassandra
46 | spec:
47 | schedulerName: stork
48 | containers:
49 | - name: cassandra
50 | image: cassandra:3
51 | ports:
52 | - containerPort: 7000
53 | name: intra-node
54 | - containerPort: 7001
55 | name: tls-intra-node
56 | - containerPort: 7199
57 | name: jmx
58 | - containerPort: 9042
59 | name: cql
60 | env:
61 | - name: CASSANDRA_SEEDS
62 | value: cassandra-0.cassandra.cassandra.svc.cluster.local
63 | - name: MAX_HEAP_SIZE
64 | value: 512M
65 | - name: HEAP_NEWSIZE
66 | value: 512M
67 | - name: CASSANDRA_CLUSTER_NAME
68 | value: "Cassandra"
69 | - name: CASSANDRA_DC
70 | value: "DC1"
71 | - name: CASSANDRA_RACK
72 | value: "Rack1"
73 | - name: CASSANDRA_AUTO_BOOTSTRAP
74 | value: "false"
75 | - name: CASSANDRA_ENDPOINT_SNITCH
76 | value: GossipingPropertyFileSnitch
77 | volumeMounts:
78 | - name: cassandra-data
79 | mountPath: /var/lib/cassandra
80 | volumeClaimTemplates:
81 | - metadata:
82 | name: cassandra-data
83 | labels:
84 | app: cassandra
85 | spec:
86 | storageClassName: px-repl-2
87 | accessModes: [ "ReadWriteOnce" ]
88 | resources:
89 | requests:
90 | storage: 10Gi
91 |
--------------------------------------------------------------------------------
/assets/install-vault.sh:
--------------------------------------------------------------------------------
1 | IP=$(hostname -i)
2 | PORT=8200
3 | VERSION=1.6.3
4 | export VAULT_ADDR=http://$IP:$PORT
5 |
6 | echo "Fetching Vault..."
7 | cd /tmp
8 | curl -sLo vault.zip https://releases.hashicorp.com/vault/${VERSION}/vault_${VERSION}_linux_amd64.zip
9 |
10 | echo "Installing Vault..."
11 | unzip vault.zip >/dev/null
12 | chmod +x vault
13 | mv vault /usr/bin/vault
14 |
15 | # Setup Vault
16 | mkdir -p /tmp/vault-data
17 | mkdir -p /etc/vault.d
18 | cat >/etc/vault.d/config.json </etc/systemd/system/vault.service <&/tmp/vault.txt
52 | for i in $(grep Unseal /tmp/vault.txt | head -3 | cut -f 4 -d " "); do
53 | vault operator unseal $i
54 | done
55 | vault login $(grep Initial /tmp/vault.txt | cut -f 4 -d " ")
56 | vault secrets enable -version=2 -path=secret kv
57 |
58 | kubectl apply -f - </tmp/px-policy.hcl < VAULT_BASE_PATH=portworx to PX (optional)
90 | path "secret/*"
91 | {
92 | capabilities = ["create", "read", "update", "delete", "list"]
93 | }
94 | EOF
95 |
96 | vault policy write portworx /tmp/px-policy.hcl
97 | echo "export VAULT_ADDR=$VAULT_ADDR" >>/root/.bashrc
98 |
99 | echo "Vault configuration complete."
100 |
--------------------------------------------------------------------------------
/assets/kubevirt/dv-ubuntu.yml:
--------------------------------------------------------------------------------
1 | apiVersion: cdi.kubevirt.io/v1beta1
2 | kind: DataVolume
3 | metadata:
4 | name: ubuntu-image
5 | namespace: pxbbq
6 | labels:
7 | px-dr: 'false'
8 | annotations:
9 | cdi.kubevirt.io/storage.bind.immediate.requested: "true"
10 | cdi.kubevirt.io/storage.usePopulator: "false"
11 | spec:
12 | source:
13 | http:
14 | url: "https://cloud-images.ubuntu.com/minimal/releases/noble/release/ubuntu-24.04-minimal-cloudimg-amd64.img"
15 | pvc:
16 | storageClassName: px-rwx-kubevirt
17 | accessModes:
18 | - ReadWriteMany
19 | resources:
20 | requests:
21 | storage: 5Gi
22 |
--------------------------------------------------------------------------------
/assets/kubevirt/migration.yml:
--------------------------------------------------------------------------------
1 | apiVersion: kubevirt.io/v1
2 | kind: VirtualMachineInstanceMigration
3 | metadata:
4 | name: migrate-ubuntu-mongodb
5 | namespace: pxbbq
6 | spec:
7 | vmiName: ubuntu-mongodb
--------------------------------------------------------------------------------
/assets/kubevirt/ocp/async-dr.yml:
--------------------------------------------------------------------------------
1 | apiVersion: stork.libopenstorage.org/v1alpha1
2 | kind: SchedulePolicy
3 | metadata:
4 | name: 5-min
5 | policy:
6 | interval:
7 | intervalMinutes: 5
8 | ---
9 | apiVersion: stork.libopenstorage.org/v1alpha1
10 | kind: MigrationSchedule
11 | metadata:
12 | name: pxbbq
13 | namespace: kube-system
14 | spec:
15 | template:
16 | spec:
17 | clusterPair: remotecluster
18 | includeResources: true
19 | startApplications: false
20 | excludeSelectors:
21 | px-dr: 'false'
22 | namespaces:
23 | - pxbbq
24 | schedulePolicyName: 5-min
25 |
--------------------------------------------------------------------------------
/assets/kubevirt/ocp/hyperconverged.yml:
--------------------------------------------------------------------------------
1 | apiVersion: hco.kubevirt.io/v1beta1
2 | kind: HyperConverged
3 | metadata:
4 | name: kubevirt-hyperconverged
5 | namespace: openshift-cnv
6 | spec:
7 |
--------------------------------------------------------------------------------
/assets/kubevirt/ocp/ocp-template.yml:
--------------------------------------------------------------------------------
1 | kind: Template
2 | apiVersion: template.openshift.io/v1
3 | metadata:
4 | name: ubuntu-minimal
5 | namespace: openshift-cnv
6 | labels:
7 | app.kubernetes.io/name: custom-templates
8 | os.template.kubevirt.io/ubuntu: 'true'
9 | template.kubevirt.io/type: vm
10 | vm.kubevirt.io/template: example
11 | vm.kubevirt.io/template.namespace: openshift-cnv
12 | workload.template.kubevirt.io/server: 'true'
13 | annotations:
14 | description: Ubuntu Minimal VM
15 | iconClass: icon-ubuntu
16 | name.os.template.kubevirt.io/ubuntu: ubuntu-minimal
17 | openshift.io/display-name: Ubuntu VM
18 | openshift.io/provider-display-name: ''
19 | template.kubevirt.io/provider: ''
20 | objects:
21 | - apiVersion: kubevirt.io/v1
22 | kind: VirtualMachine
23 | metadata:
24 | name: '${NAME}'
25 | annotations:
26 | description: Ubuntu Minimal
27 | labels:
28 | app: '${NAME}'
29 | vm.kubevirt.io/template: ubuntu-minimal
30 | os.template.kubevirt.io/ubuntu: 'true'
31 | vm.kubevirt.io/template.namespace: openshift-cnv
32 | spec:
33 | running: false
34 | template:
35 | metadata:
36 | annotations:
37 | vm.kubevirt.io/flavor: small
38 | vm.kubevirt.io/os: ubuntu
39 | vm.kubevirt.io/workload: server
40 | labels:
41 | kubevirt.io/domain: '${NAME}'
42 | kubevirt.io/size: small
43 | spec:
44 | domain:
45 | cpu:
46 | cores: 1
47 | sockets: 1
48 | threads: 1
49 | devices:
50 | disks:
51 | - name: rootdisk
52 | disk:
53 | bus: virtio
54 | bootOrder: 1
55 | - disk:
56 | bus: virtio
57 | name: cloudinitdisk
58 | bootOrder: 2
59 | interfaces:
60 | - masquerade: {}
61 | model: virtio
62 | name: default
63 | networkInterfaceMultiqueue: true
64 | rng: {}
65 | features:
66 | acpi: {}
67 | smm:
68 | enabled: true
69 | firmware:
70 | bootloader:
71 | efi: {}
72 | machine:
73 | type: q35
74 | resources:
75 | requests:
76 | memory: 2Gi
77 | hostname: '${NAME}'
78 | networks:
79 | - name: default
80 | pod: {}
81 | terminationGracePeriodSeconds: 180
82 | volumes:
83 | - name: rootdisk
84 | dataVolume:
85 | name: '${NAME}-rootdisk'
86 | - name: cloudinitdisk
87 | cloudInitNoCloud:
88 | userData: |
89 | #cloud-config
90 | password: portworx
91 | chpasswd:
92 | expire: false
93 | user: kubevirt
94 | runcmd:
95 | - apt update
96 | - apt install -y qemu-guest-agent
97 | dataVolumeTemplates:
98 | - metadata:
99 | name: '${NAME}-rootdisk'
100 | spec:
101 | storage:
102 | resources:
103 | requests:
104 | storage: 5Gi
105 | storageClassName: px-csi-db
106 | preallocation: false
107 | source:
108 | http:
109 | url: >-
110 | https://cloud-images.ubuntu.com/minimal/releases/mantic/release/ubuntu-23.10-minimal-cloudimg-amd64.img
111 | parameters:
112 | - name: NAME
113 | description: Name for the new VM
114 | generate: expression
115 | from: 'ubuntu-[a-z0-9]{3}'
116 |
--------------------------------------------------------------------------------
/assets/kubevirt/ocp/operator.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: openshift-cnv
5 | ---
6 | apiVersion: operators.coreos.com/v1
7 | kind: OperatorGroup
8 | metadata:
9 | name: kubevirt-hyperconverged-group
10 | namespace: openshift-cnv
11 | spec:
12 | targetNamespaces:
13 | - openshift-cnv
14 | ---
15 | apiVersion: operators.coreos.com/v1alpha1
16 | kind: Subscription
17 | metadata:
18 | name: hco-operatorhub
19 | namespace: openshift-cnv
20 | spec:
21 | source: redhat-operators
22 | sourceNamespace: openshift-marketplace
23 | name: kubevirt-hyperconverged
24 | startingCSV: kubevirt-hyperconverged-operator.v4.12.8
25 | channel: "stable"
26 |
--------------------------------------------------------------------------------
/assets/kubevirt/ocp/pxbbq-route.yml:
--------------------------------------------------------------------------------
1 | kind: Route
2 | apiVersion: route.openshift.io/v1
3 | metadata:
4 | name: pxbbq
5 | namespace: pxbbq
6 | labels:
7 | app: pxbbq-web
8 | px-dr: 'false'
9 | spec:
10 | to:
11 | kind: Service
12 | name: pxbbq-svc
13 | weight: 100
14 | port:
15 | targetPort: 8080
16 | wildcardPolicy: None
--------------------------------------------------------------------------------
/assets/kubevirt/px-rwx-kubevirt.yml:
--------------------------------------------------------------------------------
1 | apiVersion: storage.k8s.io/v1
2 | kind: StorageClass
3 | metadata:
4 | name: px-rwx-kubevirt
5 | annotations:
6 | storageclass.kubernetes.io/is-default-class: "true"
7 | provisioner: pxd.portworx.com
8 | parameters:
9 | repl: "3"
10 | sharedv4: "true"
11 | sharedv4_mount_options: vers=3.0,nolock
12 | volumeBindingMode: WaitForFirstConsumer
13 | allowVolumeExpansion: true
14 |
--------------------------------------------------------------------------------
/assets/kubevirt/px-virt-sc.yml:
--------------------------------------------------------------------------------
1 | apiVersion: storage.k8s.io/v1
2 | kind: StorageClass
3 | metadata:
4 | name: px-virtualization
5 | parameters:
6 | autofstrim: "true"
7 | cow_ondemand: "true"
8 | disable_io_profile_protection: "1"
9 | io_profile: auto
10 | nodiscard: "true"
11 | priority_io: high
12 | repl: "3"
13 | sharedv4: "true"
14 | sharedv4_svc_type: ClusterIP
15 | sharedv4_mount_options: vers=3.0,nolock
16 | provisioner: pxd.portworx.com
17 | reclaimPolicy: Retain
18 | volumeBindingMode: Immediate
19 |
--------------------------------------------------------------------------------
/assets/kubevirt/pxbbq-ns.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: pxbbq
5 | labels:
6 | app: pxbbq
7 | backup: "true"
8 |
--------------------------------------------------------------------------------
/assets/kubevirt/storageprofile.yml:
--------------------------------------------------------------------------------
1 | apiVersion: cdi.kubevirt.io/v1beta1
2 | kind: StorageProfile
3 | metadata:
4 | name: px-rwx-kubevirt
5 | spec:
6 | claimPropertySets:
7 | - accessModes:
8 | - ReadWriteMany
9 | volumeMode: Filesystem
--------------------------------------------------------------------------------
/assets/metro-c1-down.yml:
--------------------------------------------------------------------------------
1 | apiVersion: stork.libopenstorage.org/v1alpha1
2 | kind: ClusterDomainUpdate
3 | metadata:
4 | name: deactivate-cluster1
5 | namespace: kube-system
6 | spec:
7 | clusterdomain: cluster-1
8 | active: false
9 |
--------------------------------------------------------------------------------
/assets/metro-schedule.yml:
--------------------------------------------------------------------------------
1 | apiVersion: stork.libopenstorage.org/v1alpha1
2 | kind: SchedulePolicy
3 | metadata:
4 | name: appschedule
5 | namespace: kube-system
6 | policy:
7 | interval:
8 | intervalMinutes: 2
9 | ---
10 | apiVersion: stork.libopenstorage.org/v1alpha1
11 | kind: MigrationSchedule
12 | metadata:
13 | name: appmigrationschedule
14 | namespace: kube-system
15 | spec:
16 | template:
17 | spec:
18 | clusterPair: remotecluster-2
19 | includeResources: true
20 | startApplications: false
21 | includeVolumes: false
22 | namespaces:
23 | - pxbbq
24 | - petclinic
25 | schedulePolicyName: appschedule
26 |
--------------------------------------------------------------------------------
/assets/minio/minio-deployment.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: minio
5 | ---
6 | kind: StorageClass
7 | apiVersion: storage.k8s.io/v1
8 | metadata:
9 | name: minio-sc
10 | provisioner: pxd.portworx.com
11 | parameters:
12 | repl: "1"
13 | ---
14 | apiVersion: v1
15 | kind: PersistentVolumeClaim
16 | metadata:
17 | name: minio-pv-claim
18 | namespace: minio
19 | spec:
20 | storageClassName: minio-sc
21 | accessModes:
22 | - ReadWriteOnce
23 | resources:
24 | requests:
25 | storage: 10Gi
26 | ---
27 | apiVersion: v1
28 | kind: Service
29 | metadata:
30 | name: minio-service
31 | namespace: minio
32 | spec:
33 | type: NodePort
34 | ports:
35 | - port: 9000
36 | targetPort: 9000
37 | nodePort: 30221
38 | protocol: TCP
39 | selector:
40 | app: minio
41 | ---
42 | apiVersion: v1
43 | kind: Service
44 | metadata:
45 | name: minio-ui
46 | namespace: minio
47 | spec:
48 | type: NodePort
49 | ports:
50 | - port: 9001
51 | targetPort: 9001
52 | nodePort: 30222
53 | protocol: TCP
54 | selector:
55 | app: minio
56 | ---
57 | apiVersion: apps/v1
58 | kind: Deployment
59 | metadata:
60 | name: minio
61 | namespace: minio
62 | spec:
63 | selector:
64 | matchLabels:
65 | app: minio
66 | template:
67 | metadata:
68 | labels:
69 | app: minio
70 | spec:
71 | volumes:
72 | - name: data
73 | persistentVolumeClaim:
74 | claimName: minio-pv-claim
75 | containers:
76 | - name: minio
77 | volumeMounts:
78 | - name: data
79 | mountPath: "/data"
80 | image: quay.io/minio/minio:RELEASE.2024-11-07T00-52-20Z
81 | command:
82 | - /bin/bash
83 | - -c
84 | args:
85 | - minio server /data --console-address :9001
86 | env:
87 | # MinIO access key and secret key
88 | - name: MINIO_ROOT_USER
89 | value: "minio"
90 | - name: MINIO_ROOT_PASSWORD
91 | value: "minio123"
92 | ports:
93 | - containerPort: 9000
94 | - containerPort: 9001
95 | readinessProbe:
96 | httpGet:
97 | path: /minio/health/ready
98 | port: 9000
99 | initialDelaySeconds: 120
100 | periodSeconds: 20
101 | livenessProbe:
102 | httpGet:
103 | path: /minio/health/live
104 | port: 9000
105 | initialDelaySeconds: 120
106 | periodSeconds: 20
107 |
--------------------------------------------------------------------------------
/assets/mongo/mongo.yml:
--------------------------------------------------------------------------------
1 | kind: PersistentVolumeClaim
2 | apiVersion: v1
3 | metadata:
4 | name: px-mongo-pvc
5 | spec:
6 | storageClassName: px-csi-db
7 | accessModes:
8 | - ReadWriteOnce
9 | resources:
10 | requests:
11 | storage: 3Gi
12 | ---
13 | apiVersion: apps/v1
14 | kind: Deployment
15 | metadata:
16 | name: mongodb
17 | spec:
18 | selector:
19 | matchLabels:
20 | app: mongodb
21 | strategy:
22 | rollingUpdate:
23 | maxSurge: 1
24 | maxUnavailable: 1
25 | type: RollingUpdate
26 | replicas: 1
27 | template:
28 | metadata:
29 | labels:
30 | app: mongodb
31 | spec:
32 | schedulerName: stork
33 | containers:
34 | - name: mongodb
35 | image: mongo:4
36 | imagePullPolicy: "IfNotPresent"
37 | env:
38 | - name: MONGODB_ROOT_PASSWORD
39 | value: password
40 | - name: MONGODB_USERNAME
41 | value: admin
42 | - name: MONGODB_PASSWORD
43 | value: password
44 | - name: MONGODB_DATABASE
45 | value: demo
46 | ports:
47 | - name: mongodb
48 | containerPort: 27017
49 | volumeMounts:
50 | - name: data
51 | mountPath: /data/db
52 | volumes:
53 | - name: data
54 | persistentVolumeClaim:
55 | claimName: px-mongo-pvc
56 |
--------------------------------------------------------------------------------
/assets/mysql/mysql.yml:
--------------------------------------------------------------------------------
1 | kind: PersistentVolumeClaim
2 | apiVersion: v1
3 | metadata:
4 | name: px-mysql-pvc
5 | spec:
6 | storageClassName: px-csi-db
7 | accessModes:
8 | - ReadWriteOnce
9 | resources:
10 | requests:
11 | storage: 1Gi
12 | ---
13 | apiVersion: apps/v1
14 | kind: Deployment
15 | metadata:
16 | name: mysql
17 | spec:
18 | selector:
19 | matchLabels:
20 | app: mysql
21 | strategy:
22 | rollingUpdate:
23 | maxSurge: 1
24 | maxUnavailable: 1
25 | type: RollingUpdate
26 | replicas: 1
27 | template:
28 | metadata:
29 | labels:
30 | app: mysql
31 | spec:
32 | schedulerName: stork
33 | containers:
34 | - name: mysql
35 | image: mysql:5.7.33
36 | imagePullPolicy: "IfNotPresent"
37 | ports:
38 | - containerPort: 5432
39 | env:
40 | - name: MYSQL_USER
41 | value: mysql
42 | - name: MYSQL_PASSWORD
43 | value: supermysql
44 | - name: MYSQL_DATABASE
45 | value: pxdemo
46 | - name: MYSQL_ROOT_PASSWORD
47 | value: supermysql
48 | volumeMounts:
49 | - mountPath: /var/lib/mysql
50 | name: mysql
51 | volumes:
52 | - name: mysql
53 | persistentVolumeClaim:
54 | claimName: px-mysql-pvc
55 |
--------------------------------------------------------------------------------
/assets/nginx.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: nginx
5 | ---
6 | kind: StorageClass
7 | apiVersion: storage.k8s.io/v1
8 | metadata:
9 | name: px-nginx-sc
10 | provisioner: kubernetes.io/portworx-volume
11 | parameters:
12 | repl: "3"
13 | allowVolumeExpansion: true
14 | ---
15 | kind: PersistentVolumeClaim
16 | apiVersion: v1
17 | metadata:
18 | name: nginx-pvc
19 | namespace: nginx
20 | spec:
21 | storageClassName: px-nginx-sc
22 | accessModes:
23 | - ReadWriteMany
24 | resources:
25 | requests:
26 | storage: 50Gi
27 | ---
28 | apiVersion: v1
29 | kind: ConfigMap
30 | metadata:
31 | name: html
32 | namespace: nginx
33 | data:
34 | index.html: |-
35 | Hello world!
36 | ---
37 | apiVersion: apps/v1
38 | kind: Deployment
39 | metadata:
40 | name: nginx-deployment
41 | namespace: nginx
42 | spec:
43 | selector:
44 | matchLabels:
45 | app: nginx
46 | replicas: 3
47 | template:
48 | metadata:
49 | labels:
50 | app: nginx
51 | spec:
52 | containers:
53 | - name: nginx
54 | image: nginx:latest
55 | ports:
56 | - containerPort: 80
57 | volumeMounts:
58 | - mountPath: /usr/share/nginx/html
59 | name: nginx-pvc
60 | initContainers:
61 | - name: nginx-init
62 | image: nginx:latest
63 | command: [ 'sh', '-c', "cp -r /html/* /usr/share/nginx/html/" ]
64 | volumeMounts:
65 | - mountPath: /usr/share/nginx/html
66 | name: nginx-pvc
67 | - mountPath: /html/index.html
68 | name: nginx-html
69 | subPath: index.html
70 | volumes:
71 | - name: nginx-pvc
72 | persistentVolumeClaim:
73 | claimName: nginx-pvc
74 | - name: nginx-html
75 | configMap:
76 | name: html
77 | ---
78 | apiVersion: v1
79 | kind: Service
80 | metadata:
81 | name: nginx
82 | namespace: nginx
83 | labels:
84 | app: nginx
85 | spec:
86 | type: NodePort
87 | ports:
88 | - name: http
89 | protocol: TCP
90 | port: 80
91 | nodePort: 30080
92 | selector:
93 | app: nginx
94 |
--------------------------------------------------------------------------------
/assets/pds-petclinic/pds-petclinic.tpl:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Secret
3 | metadata:
4 | name: petclinic-db
5 | namespace: (NAMESPACE)
6 | type: Opaque
7 | stringData:
8 | PG_URL: 'jdbc:postgresql://(VIP):(PORT)/pds'
9 | PG_USERNAME: 'pds'
10 | ---
11 | apiVersion: v1
12 | kind: Service
13 | metadata:
14 | name: petclinic
15 | labels:
16 | app: petclinic
17 | namespace: (NAMESPACE)
18 | spec:
19 | type: NodePort
20 | ports:
21 | - name: http
22 | protocol: TCP
23 | port: 8080
24 | targetPort: 8080
25 | nodePort: 30333
26 | selector:
27 | app: petclinic
28 | ---
29 | apiVersion: apps/v1
30 | kind: Deployment
31 | metadata:
32 | name: petclinic
33 | labels:
34 | app: petclinic
35 | namespace: (NAMESPACE)
36 | spec:
37 | replicas: 1
38 | selector:
39 | matchLabels:
40 | app: petclinic
41 | template:
42 | metadata:
43 | labels:
44 | app: petclinic
45 | spec:
46 | schedulerName: stork
47 | containers:
48 | - name: petclinic
49 | image: danpaul81/spring-petclinic:2.7.3
50 | imagePullPolicy: IfNotPresent
51 | livenessProbe:
52 | httpGet:
53 | port: 8080
54 | path: /actuator/health/liveness
55 | initialDelaySeconds: 90
56 | periodSeconds: 5
57 | readinessProbe:
58 | httpGet:
59 | port: 8080
60 | path: /actuator/health/readiness
61 | initialDelaySeconds: 15
62 | ports:
63 | - containerPort: 8080
64 | env:
65 | - name: SPRING_PROFILES_ACTIVE
66 | value: 'postgres'
67 | - name: SPRING_DATASOURCE_URL
68 | valueFrom:
69 | secretKeyRef:
70 | name: petclinic-db
71 | key: PG_URL
72 | - name: SPRING_DATASOURCE_USERNAME
73 | valueFrom:
74 | secretKeyRef:
75 | name: petclinic-db
76 | key: PG_USERNAME
77 | - name: SPRING_DATASOURCE_PASSWORD
78 | valueFrom:
79 | secretKeyRef:
80 | name: (CREDS)
81 | key: password
82 |
83 |
--------------------------------------------------------------------------------
/assets/petclinic/loadtest.md:
--------------------------------------------------------------------------------
1 |
2 | Load test / Dummy content
3 | This is a useful tool for demonstrating what happens whilst under load during a failover event.
4 |
5 | The load test packages a test script in a container for Locust that simulates user traffic to Pet CLinic, please run it against the front-end service.
6 | The address and port of the frontend will be different and depend on which platform you've deployed to.
7 | See the notes for each deployment.
8 |
9 | docker run grdnr/load-test-petclinic:0.12 -h 3.8.156.114:30333
10 |
11 |
12 | The syntax for running the load test container is:
13 |
docker run grdnr/load-test-petclinic:0.12 -h $frontend-ip[:$port] -r 100 -c 2
14 |
15 |
16 | The help command provides more details about the parameters:
17 |
$ docker run weaveworksdemos/load-test --help
18 | Usage:
19 | docker run grdnr/load-test-petclinic:0.12 [ hostname ] OPTIONS
20 |
21 | Options:
22 | -d Delay before starting
23 | -h Target host url, e.g. localhost:80
24 | -c Number of clients (default 2)
25 | -r Number of requests (default 10)
26 |
27 | Description:
28 | Runs a Locust load simulation against specified host.
29 |
30 |
--------------------------------------------------------------------------------
/assets/postgres/pgadmin.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: pgadmin
5 | ---
6 | apiVersion: v1
7 | kind: Service
8 | metadata:
9 | name: pgadmin-service
10 | namespace: pgadmin
11 | spec:
12 | ports:
13 | - protocol: TCP
14 | port: 80
15 | targetPort: http
16 | selector:
17 | app: pgadmin
18 | type: NodePort
19 | ---
20 | apiVersion: apps/v1
21 | kind: StatefulSet
22 | metadata:
23 | name: pgadmin
24 | namespace: pgadmin
25 | spec:
26 | serviceName: pgadmin-service
27 | podManagementPolicy: Parallel
28 | replicas: 1
29 | updateStrategy:
30 | type: RollingUpdate
31 | selector:
32 | matchLabels:
33 | app: pgadmin
34 | template:
35 | metadata:
36 | labels:
37 | app: pgadmin
38 | spec:
39 | terminationGracePeriodSeconds: 10
40 | initContainers:
41 | - name: pgadmin-chown
42 | image: busybox:1.28
43 | command: ["sh", "-c", "mkdir -p /var/lib/pgadmin ; chown 5050 /var/lib/pgadmin"]
44 | volumeMounts:
45 | - name: pgadmin-data
46 | mountPath: /var/lib/pgadmin
47 | containers:
48 | - name: pgadmin
49 | image: dpage/pgadmin4:5.4
50 | imagePullPolicy: Always
51 | env:
52 | - name: PGADMIN_DEFAULT_EMAIL
53 | value: admin@portworx.com
54 | - name: PGADMIN_DEFAULT_PASSWORD
55 | value: admin
56 | ports:
57 | - name: http
58 | containerPort: 80
59 | protocol: TCP
60 | volumeMounts:
61 | - name: pgadmin-data
62 | mountPath: /var/lib/pgadmin
63 | volumes:
64 | - name: pgadmin-config
65 | configMap:
66 | name: pgadmin-config
67 | volumeClaimTemplates:
68 | - metadata:
69 | name: pgadmin-data
70 | spec:
71 | storageClassName: px-replicated
72 | accessModes: [ "ReadWriteOnce" ]
73 | resources:
74 | requests:
75 | storage: 3Gi
76 |
--------------------------------------------------------------------------------
/assets/postgres/postgres-autopilot-rule.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: autopilot.libopenstorage.org/v1alpha1
2 | kind: AutopilotRule
3 | metadata:
4 | name: postgres-resize
5 | spec:
6 | ##### selector filters the objects affected by this rule given labels
7 | selector:
8 | matchLabels:
9 | app: postgres
10 | pollInterval: 2
11 | ##### conditions are the symptoms to evaluate. All conditions are AND'ed
12 | conditions:
13 | for: 5
14 | # volume usage should be less than 30%
15 | expressions:
16 | - key: "100 * (px_volume_usage_bytes / px_volume_capacity_bytes)"
17 | operator: Gt
18 | values:
19 | - "30"
20 | ##### action to perform when condition is true
21 | actions:
22 | - name: openstorage.io.action.volume/resize
23 | params:
24 | # resize volume by scalepercentage of current size
25 | scalepercentage: "100"
26 | maxsize: "30Gi"
27 |
--------------------------------------------------------------------------------
/assets/postgres/postgres-restore.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolumeClaim
3 | metadata:
4 | name: px-postgres-snap-clone
5 | annotations:
6 | snapshot.alpha.kubernetes.io/snapshot: px-postgres-snapshot ## use snapshot name
7 | spec:
8 | accessModes:
9 | - ReadWriteOnce
10 | storageClassName: stork-snapshot-sc ## reference stork SC to handle cloning of volume
11 | resources:
12 | requests:
13 | storage: 2Gi
14 | ---
15 | apiVersion: apps/v1
16 | kind: Deployment
17 | metadata:
18 | name: postgres-snap
19 | spec:
20 | selector:
21 | matchLabels:
22 | app: postgres-snap
23 | strategy:
24 | rollingUpdate:
25 | maxSurge: 1
26 | maxUnavailable: 1
27 | type: RollingUpdate
28 | replicas: 1
29 | template:
30 | metadata:
31 | labels:
32 | app: postgres-snap
33 | spec:
34 | affinity:
35 | nodeAffinity:
36 | requiredDuringSchedulingIgnoredDuringExecution:
37 | nodeSelectorTerms:
38 | - matchExpressions:
39 | - key: px/running
40 | operator: NotIn
41 | values:
42 | - "false"
43 | - key: px/enabled
44 | operator: NotIn
45 | values:
46 | - "false"
47 | containers:
48 | - name: postgres
49 | image: postgres:9.5
50 | imagePullPolicy: "IfNotPresent"
51 | ports:
52 | - containerPort: 5432
53 | env:
54 | - name: POSTGRES_USER
55 | value: pgbench
56 | - name: PGUSER
57 | value: pgbench
58 | - name: POSTGRES_PASSWORD
59 | value: superpostgres
60 | - name: PGBENCH_PASSWORD
61 | value: superpostgres
62 | - name: PGDATA
63 | value: /var/lib/postgresql/data/pgdata
64 | volumeMounts:
65 | - mountPath: /var/lib/postgresql/data
66 | name: postgredb
67 | volumes:
68 | - name: postgredb
69 | persistentVolumeClaim:
70 | claimName: px-postgres-snap-clone
--------------------------------------------------------------------------------
/assets/postgres/postgres-snap.yml:
--------------------------------------------------------------------------------
1 | ## Local snapshot spec
2 | apiVersion: volumesnapshot.external-storage.k8s.io/v1
3 | kind: VolumeSnapshot
4 | metadata:
5 | name: px-postgres-snapshot
6 | namespace: default
7 | spec:
8 | persistentVolumeClaimName: postgres-data
--------------------------------------------------------------------------------
/assets/postgres/postgres.yml:
--------------------------------------------------------------------------------
1 | kind: StorageClass
2 | apiVersion: storage.k8s.io/v1
3 | metadata:
4 | name: px-postgres-sc
5 | provisioner: pxd.portworx.com
6 | parameters:
7 | #openstorage.io/auth-secret-name: px-user-token
8 | #openstorage.io/auth-secret-namespace: portworx
9 | repl: "3"
10 | io_profile: "db_remote"
11 | priority_io: "high"
12 | allowVolumeExpansion: true
13 | ---
14 | kind: PersistentVolumeClaim
15 | apiVersion: v1
16 | metadata:
17 | labels:
18 | app: postgres
19 | name: postgres-data
20 | spec:
21 | accessModes:
22 | - ReadWriteOnce
23 | storageClassName: px-postgres-sc
24 | resources:
25 | requests:
26 | storage: 2Gi
27 | ---
28 | apiVersion: apps/v1
29 | kind: Deployment
30 | metadata:
31 | name: postgres
32 | spec:
33 | selector:
34 | matchLabels:
35 | app: postgres
36 | strategy:
37 | rollingUpdate:
38 | maxSurge: 1
39 | maxUnavailable: 1
40 | type: RollingUpdate
41 | replicas: 1
42 | template:
43 | metadata:
44 | labels:
45 | app: postgres
46 | spec:
47 | schedulerName: stork
48 | containers:
49 | - name: postgres
50 | image: postgres:9.5
51 | imagePullPolicy: "IfNotPresent"
52 | ports:
53 | - containerPort: 5432
54 | env:
55 | - name: POSTGRES_USER
56 | value: pgbench
57 | - name: PGUSER
58 | value: pgbench
59 | - name: POSTGRES_PASSWORD
60 | value: superpostgres
61 | - name: PGBENCH_PASSWORD
62 | value: superpostgres
63 | - name: PGDATA
64 | value: /var/lib/postgresql/data/pgdata
65 | volumeMounts:
66 | - mountPath: /var/lib/postgresql/data
67 | name: postgres-data
68 | volumes:
69 | - name: postgres-data
70 | persistentVolumeClaim:
71 | claimName: postgres-data
72 |
--------------------------------------------------------------------------------
/assets/proxy-nfs/nginx-deployment.yml:
--------------------------------------------------------------------------------
1 | kind: Deployment
2 | metadata:
3 | name: nginx
4 | spec:
5 | replicas: 1
6 | selector:
7 | matchLabels:
8 | app: nginx
9 | template:
10 | metadata:
11 | labels:
12 | app: nginx
13 | spec:
14 | containers:
15 | - name: nginx
16 | image: bitnami/nginx
17 | ports:
18 | - containerPort: 80
19 | volumeMounts:
20 | - name: nginx-persistent-storage
21 | mountPath: /usr/share/nginx/html
22 | volumes:
23 | - name: nginx-persistent-storage
24 | persistentVolumeClaim:
25 | claimName: nfs-data
26 |
--------------------------------------------------------------------------------
/assets/proxy-nfs/pvc.yml:
--------------------------------------------------------------------------------
1 | kind: PersistentVolumeClaim
2 | apiVersion: v1
3 | metadata:
4 | name: nfs-data
5 | labels:
6 | app: nginx
7 | spec:
8 | storageClassName: portworx-proxy-volume-volume
9 | accessModes:
10 | - ReadWriteOnce
11 | resources:
12 | requests:
13 | storage: 2Gi
14 |
--------------------------------------------------------------------------------
/assets/proxy-nfs/sc.yml:
--------------------------------------------------------------------------------
1 | kind: StorageClass
2 | apiVersion: storage.k8s.io/v1
3 | metadata:
4 | name: portworx-proxy-volume-volume
5 | provisioner: pxd.portworx.comw
6 | parameters:
7 | #openstorage.io/auth-secret-name: px-user-token
8 | #openstorage.io/auth-secret-namespace: portworx
9 | proxy_endpoint: "nfs://master-1"
10 | proxy_nfs_exportpath: "/var/nfs"
11 | allowVolumeExpansion: true
12 |
--------------------------------------------------------------------------------
/assets/pure/multipath.conf:
--------------------------------------------------------------------------------
1 | devices {
2 | device {
3 | vendor "NVME"
4 | product "Pure Storage FlashArray"
5 | path_selector "queue-length 0"
6 | path_grouping_policy group_by_prio
7 | prio ana
8 | failback immediate
9 | fast_io_fail_tmo 10
10 | user_friendly_names no
11 | no_path_retry 0
12 | features 0
13 | dev_loss_tmo 60
14 | }
15 | device {
16 | vendor "PURE"
17 | product "FlashArray"
18 | path_selector "service-time 0"
19 | hardware_handler "1 alua"
20 | path_grouping_policy group_by_prio
21 | prio alua
22 | failback immediate
23 | path_checker tur
24 | fast_io_fail_tmo 10
25 | user_friendly_names no
26 | no_path_retry 0
27 | features 0
28 | dev_loss_tmo 600
29 | }
30 | }
31 |
32 |
--------------------------------------------------------------------------------
/assets/pure/pure.json:
--------------------------------------------------------------------------------
1 | {
2 | "FlashArrays": [
3 | {
4 | "MgmtEndPoint": "",
5 | "APIToken": ""
6 | },
7 | {
8 | "MgmtEndPoint": "",
9 | "APIToken": ""
10 | }
11 | ],
12 | "FlashBlades": [
13 | {
14 | "MgmtEndPoint": "",
15 | "APIToken": "",
16 | "NFSEndPoint": ""
17 | },
18 | {
19 | "MgmtEndPoint": "",
20 | "APIToken": "",
21 | "NFSEndPoint": ""
22 | }
23 | ]
24 | }
25 |
--------------------------------------------------------------------------------
/assets/redis/redis.yml:
--------------------------------------------------------------------------------
1 | kind: PersistentVolumeClaim
2 | apiVersion: v1
3 | metadata:
4 | name: px-redis-data
5 | spec:
6 | storageClassName: px-csi-db
7 | accessModes:
8 | - ReadWriteOnce
9 | resources:
10 | requests:
11 | storage: 1Gi
12 | ---
13 | kind: PersistentVolumeClaim
14 | apiVersion: v1
15 | metadata:
16 | name: px-redis-conf
17 | spec:
18 | storageClassName: px-csi-db
19 | accessModes:
20 | - ReadWriteOnce
21 | resources:
22 | requests:
23 | storage: 1Gi
24 | ---
25 | apiVersion: apps/v1
26 | kind: Deployment
27 | metadata:
28 | name: redis
29 | spec:
30 | selector:
31 | matchLabels:
32 | app: redis
33 | template:
34 | metadata:
35 | labels:
36 | app: redis
37 | spec:
38 | containers:
39 | - name: redis
40 | image: "redis"
41 | command:
42 | - "redis-server"
43 | args:
44 | - "--protected-mode"
45 | - "no"
46 | - "--appendonly"
47 | - "yes"
48 | resources:
49 | requests:
50 | cpu: "100m"
51 | memory: "100Mi"
52 | ports:
53 | - name: redis
54 | containerPort: 6379
55 | protocol: "TCP"
56 | - name: cluster
57 | containerPort: 16379
58 | protocol: "TCP"
59 | volumeMounts:
60 | - name: "redis-conf"
61 | mountPath: "/etc/redis"
62 | - name: "redis-data"
63 | mountPath: "/data"
64 | volumes:
65 | - name: "redis-data"
66 | persistentVolumeClaim:
67 | claimName: px-redis-data
68 | - name: "redis-conf"
69 | persistentVolumeClaim:
70 | claimName: px-redis-conf
71 |
--------------------------------------------------------------------------------
/assets/sock-shop/loadtest.md:
--------------------------------------------------------------------------------
1 |
2 | Load test / Dummy content
3 | This is a useful tool for demonstrating what happens whilst under load during a failover event.
4 |
5 | The load test packages a test script in a container for Locust that simulates user traffic to Sock Shop, please run it against the front-end service.
6 | The address and port of the frontend will be different and depend on which platform you've deployed to.
7 | See the notes for each deployment.
8 |
9 |
10 | For example, on the Docker (single-host with Weave) deployment, on Docker for Mac:
11 |
docker run --net=host weaveworksdemos/load-test -h localhost -r 100 -c 2
12 |
13 |
14 | The syntax for running the load test container is:
15 |
docker run --net=host weaveworksdemos/load-test -h $frontend-ip[:$port] -r 100 -c 2
16 |
17 |
18 | The help command provides more details about the parameters:
19 |
$ docker run weaveworksdemos/load-test --help
20 | Usage:
21 | docker run weaveworksdemos/load-test [ hostname ] OPTIONS
22 |
23 | Options:
24 | -d Delay before starting
25 | -h Target host url, e.g. localhost:80
26 | -c Number of clients (default 2)
27 | -r Number of requests (default 10)
28 |
29 | Description:
30 | Runs a Locust load simulation against specified host.
31 |
32 |
--------------------------------------------------------------------------------
/assets/training/Autopilot/autopilot-expand-pool-rule.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: autopilot.libopenstorage.org/v1alpha1
2 | kind: AutopilotRule
3 | metadata:
4 | name: pool-expand
5 | spec:
6 | enforcement: required
7 | ##### conditions are the symptoms to evaluate. All conditions are AND'ed
8 | conditions:
9 | for: 5
10 | expressions:
11 | # pool available capacity less than 50%
12 | - key: "100 * ( px_pool_stats_available_bytes/ px_pool_stats_total_bytes)"
13 | operator: Lt
14 | values:
15 | - "50"
16 | # pool total capacity should not exceed 2TB
17 | - key: "px_pool_stats_total_bytes/(1024*1024*1024)"
18 | operator: Lt
19 | values:
20 | - "2000"
21 | ##### action to perform when condition is true
22 | actions:
23 | - name: "openstorage.io.action.storagepool/expand"
24 | params:
25 | # resize pool by scalepercentage of current size
26 | scalepercentage: "50"
27 | # when scaling, resize disk
28 | scaletype: "resize-disk"
29 |
--------------------------------------------------------------------------------
/assets/training/Autopilot/autopilot-rebalance-pool-rule.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: autopilot.libopenstorage.org/v1alpha1
2 | kind: AutopilotRule
3 | metadata:
4 | name: pool-rebalance
5 | spec:
6 | conditions:
7 | for: 5
8 | requiredMatches: 1
9 | expressions:
10 | - keyAlias: PoolProvDeviationPerc
11 | operator: NotInRange
12 | values:
13 | - "-20"
14 | - "20"
15 | - keyAlias: PoolUsageDeviationPerc
16 | operator: NotInRange
17 | values:
18 | - "-20"
19 | - "20"
20 | actions:
21 | - name: "openstorage.io.action.storagepool/rebalance"
22 |
--------------------------------------------------------------------------------
/assets/training/Autopilot/create-volumes.sh:
--------------------------------------------------------------------------------
1 | nodes=$(kubectl pxc pxctl status -j | tail -n +2 | jq -r '.cluster.Nodes[].Id' | head -2 | tr '\n' ,)
2 | for i in $(seq -w 1 10); do kubectl pxc pxctl v c --nodes $nodes --repl 2 --size 10 temp$i; done
3 |
--------------------------------------------------------------------------------
/assets/training/Autopilot/delete-volumes.sh:
--------------------------------------------------------------------------------
1 | for i in $(seq -w 1 10); do kubectl pxc pxctl v d temp$i -f; done
2 |
--------------------------------------------------------------------------------
/assets/training/Autopilot/postgres-autopilot-rule.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: autopilot.libopenstorage.org/v1alpha1
2 | kind: AutopilotRule
3 | metadata:
4 | name: postgres-resize
5 | spec:
6 | ##### selector filters the objects affected by this rule given labels
7 | selector:
8 | matchLabels:
9 | app: postgres
10 | ##### conditions are the symptoms to evaluate. All conditions are AND'ed
11 | conditions:
12 | for: 5
13 | # volume usage should be less than 30%
14 | expressions:
15 | - key: "100 * (px_volume_usage_bytes / px_volume_capacity_bytes)"
16 | operator: Gt
17 | values:
18 | - "30"
19 | ##### action to perform when condition is true
20 | actions:
21 | - name: openstorage.io.action.volume/resize
22 | params:
23 | # resize volume by scalepercentage of current size
24 | scalepercentage: "100"
25 | maxsize: "30Gi"
26 |
--------------------------------------------------------------------------------
/assets/training/Autopilot/postgres.yaml:
--------------------------------------------------------------------------------
1 | kind: StorageClass
2 | apiVersion: storage.k8s.io/v1
3 | metadata:
4 | name: px-postgres-sc
5 | provisioner: pxd.portworx.com
6 | parameters:
7 | #openstorage.io/auth-secret-name: px-user-token
8 | #openstorage.io/auth-secret-namespace: portworx
9 | repl: "3"
10 | io_profile: "db_remote"
11 | priority_io: "high"
12 | allowVolumeExpansion: true
13 | ---
14 | kind: PersistentVolumeClaim
15 | apiVersion: v1
16 | metadata:
17 | name: postgres-data
18 | labels:
19 | app: postgres
20 | spec:
21 | storageClassName: px-postgres-sc
22 | accessModes:
23 | - ReadWriteOnce
24 | resources:
25 | requests:
26 | storage: 2Gi
27 | ---
28 | apiVersion: apps/v1
29 | kind: Deployment
30 | metadata:
31 | name: postgres
32 | labels:
33 | app: postgres
34 | spec:
35 | strategy:
36 | rollingUpdate:
37 | maxSurge: 1
38 | maxUnavailable: 1
39 | type: RollingUpdate
40 | replicas: 1
41 | selector:
42 | matchLabels:
43 | app: postgres
44 | template:
45 | metadata:
46 | labels:
47 | app: postgres
48 | spec:
49 | schedulerName: stork
50 | containers:
51 | - name: postgres
52 | image: postgres:9.5
53 | imagePullPolicy: "IfNotPresent"
54 | ports:
55 | - containerPort: 5432
56 | env:
57 | - name: POSTGRES_USER
58 | value: pgbench
59 | - name: PGUSER
60 | value: pgbench
61 | - name: POSTGRES_PASSWORD
62 | value: superpostgres
63 | - name: PGBENCH_PASSWORD
64 | value: superpostgres
65 | - name: PGDATA
66 | value: /var/lib/postgresql/data/pgdata
67 | volumeMounts:
68 | - mountPath: /var/lib/postgresql/data
69 | name: postgres-data
70 | volumes:
71 | - name: postgres-data
72 | persistentVolumeClaim:
73 | claimName: postgres-data
74 |
--------------------------------------------------------------------------------
/assets/training/Autopilot/show-replicas.sh:
--------------------------------------------------------------------------------
1 | for i in $(seq -w 1 10); do echo temp$i; kubectl pxc pxctl volume inspect temp$i | grep Node | cut -f 2 -d : ; done
2 |
--------------------------------------------------------------------------------
/assets/training/Autopilot/watch-events.sh:
--------------------------------------------------------------------------------
1 | watch kubectl get events --field-selector involvedObject.kind=AutopilotRule --sort-by .lastTimestamp
2 |
--------------------------------------------------------------------------------
/assets/training/Basic_Volume_Management/nginx-pod.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: nginx
5 | spec:
6 | containers:
7 | - name: nginx
8 | image: nginx:latest
9 | volumeMounts:
10 | - name: testvol
11 | mountPath: /usr/share/nginx/html
12 | ports:
13 | - containerPort: 80
14 | volumes:
15 | - name: testvol
16 | portworxVolume:
17 | volumeID: "testvol"
--------------------------------------------------------------------------------
/assets/training/Cloudsnaps/create-objectstore.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | pxctl volume create objectstorevol --size 10
4 | pxctl objectstore create -v objectstorevol
5 | pxctl objectstore enable
6 | while ! pxctl objectstore status | grep -q Running ; do echo Waiting for objectstore; sleep 1; done
7 | echo Execute this command to add objectstore credentials to Portworx:
8 | pxctl objectstore status | tail -5 | sed 's/9010 .*/9010 minio/'
9 |
--------------------------------------------------------------------------------
/assets/training/Cloudsnaps/px-postgres-cloudrestore.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolumeClaim
3 | metadata:
4 | name: postgres-cloudsnap-clone
5 | annotations:
6 | snapshot.alpha.kubernetes.io/snapshot: postgres-snapshot
7 | spec:
8 | accessModes:
9 | - ReadWriteOnce
10 | storageClassName: stork-snapshot-sc
11 | resources:
12 | requests:
13 | storage: 2Gi
14 |
--------------------------------------------------------------------------------
/assets/training/Cloudsnaps/px-postgres-cloudsnap.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: volumesnapshot.external-storage.k8s.io/v1
2 | kind: VolumeSnapshot
3 | metadata:
4 | name: postgres-snapshot
5 | namespace: default
6 | annotations:
7 | portworx/snapshot-type: cloud
8 | portworx/cloud-cred-id: minio
9 | spec:
10 | persistentVolumeClaimName: postgres-data
11 |
12 |
--------------------------------------------------------------------------------
/assets/training/Database_Deployment_PostgreSQL/cheatsheet.txt:
--------------------------------------------------------------------------------
1 | kubectl exec $POD -- createdb pxdemo
2 |
3 | kubectl exec $POD -- psql -l
4 |
5 | kubectl exec $POD -- pgbench -i -s 50 pxdemo
6 |
7 | kubectl exec $POD -- psql pxdemo -c 'select count(*) from pgbench_accounts'
8 |
--------------------------------------------------------------------------------
/assets/training/Database_Deployment_PostgreSQL/px-postgres-app.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: postgres
5 | labels:
6 | app: postgres
7 | spec:
8 | strategy:
9 | rollingUpdate:
10 | maxSurge: 1
11 | maxUnavailable: 1
12 | type: RollingUpdate
13 | replicas: 1
14 | selector:
15 | matchLabels:
16 | app: postgres
17 | template:
18 | metadata:
19 | labels:
20 | app: postgres
21 | spec:
22 | schedulerName: stork
23 | containers:
24 | - name: postgres
25 | image: postgres:9.5
26 | imagePullPolicy: "IfNotPresent"
27 | ports:
28 | - containerPort: 5432
29 | env:
30 | - name: POSTGRES_USER
31 | value: pgbench
32 | - name: PGUSER
33 | value: pgbench
34 | - name: POSTGRES_PASSWORD
35 | value: superpostgres
36 | - name: PGBENCH_PASSWORD
37 | value: superpostgres
38 | - name: PGDATA
39 | value: /var/lib/postgresql/data/pgdata
40 | volumeMounts:
41 | - mountPath: /var/lib/postgresql/data
42 | name: postgres-data
43 | volumes:
44 | - name: postgres-data
45 | persistentVolumeClaim:
46 | claimName: postgres-data
47 |
--------------------------------------------------------------------------------
/assets/training/Database_Deployment_PostgreSQL/px-postgres-pvc.yaml:
--------------------------------------------------------------------------------
1 | kind: PersistentVolumeClaim
2 | apiVersion: v1
3 | metadata:
4 | name: postgres-data
5 | spec:
6 | storageClassName: px-postgres-sc
7 | accessModes:
8 | - ReadWriteOnce
9 | resources:
10 | requests:
11 | storage: 2Gi
12 |
--------------------------------------------------------------------------------
/assets/training/Database_Deployment_PostgreSQL/px-postgres-sc.yaml:
--------------------------------------------------------------------------------
1 | kind: StorageClass
2 | apiVersion: storage.k8s.io/v1
3 | metadata:
4 | name: px-postgres-sc
5 | provisioner: pxd.portworx.com
6 | parameters:
7 | repl: "3"
8 | io_profile: "db_remote"
9 | priority_io: "high"
10 |
--------------------------------------------------------------------------------
/assets/training/Disaster_Recovery/cheatsheet.txt:
--------------------------------------------------------------------------------
1 | pxctl license activate
2 |
3 | storkctl create clusterpair dest-cluster \
4 | --provider s3 \
5 | --s3-endpoint s3.amazonaws.com \
6 | --s3-access-key \
7 | --s3-secret-key \
8 | --s3-region eu-west-1 \
9 | --bucket \
10 | --namespace kube-system \
11 | --src-kube-file $HOME/.kube/config \
12 | --dest-kube-file /home/training/.kube/config
13 |
14 | storkctl get clusterpair -n kube-system
15 |
16 | kubectl get clusterpair -n kube-system -o yaml
17 |
18 | storkctl get schedulepolicy
19 |
20 | storkctl get migrationschedule -n kube-system
21 |
22 | storkctl get migration -n kube-system
23 |
24 | kubectl get pod -l app=postgres -n postgres
25 |
26 | kubectl exec $POD -n postgres -- createdb pxdemo
27 |
28 | kubectl exec $POD -n postgres -- pgbench -i -s 50 pxdemo
29 |
30 | kubectl exec $POD -n postgres -- psql pxdemo -c 'select count(*) from pgbench_accounts'
31 |
32 | kubectl describe deployment postgres -n postgres
33 |
34 | storkctl suspend migrationschedule migrationschedule -n kube-system
35 |
36 | storkctl activate migration -n postgres
37 |
38 | kubectl get deployment -n postgres
39 |
--------------------------------------------------------------------------------
/assets/training/Disaster_Recovery/etcd.sh:
--------------------------------------------------------------------------------
1 | # Run an etcd container
2 | docker run -d --restart unless-stopped -v /usr/share/ca-certificates/:/etc/ssl/certs -p 2382:2382 \
3 | --name etcd quay.io/coreos/etcd:latest \
4 | /usr/local/bin/etcd \
5 | -name etcd0 \
6 | -auto-compaction-retention=3 -quota-backend-bytes=8589934592 \
7 | -advertise-client-urls http://$(hostname -i):2382 \
8 | -listen-client-urls http://0.0.0.0:2382
9 |
--------------------------------------------------------------------------------
/assets/training/Disaster_Recovery/migration-schedule-async.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: stork.libopenstorage.org/v1alpha1
2 | kind: MigrationSchedule
3 | metadata:
4 | name: migrationschedule
5 | namespace: kube-system
6 | spec:
7 | template:
8 | spec:
9 | clusterPair: dest-cluster
10 | includeResources: true
11 | startApplications: false
12 | namespaces:
13 | - postgres
14 | schedulePolicyName: testpolicy
15 |
--------------------------------------------------------------------------------
/assets/training/Disaster_Recovery/migration-schedule-sync.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: stork.libopenstorage.org/v1alpha1
2 | kind: MigrationSchedule
3 | metadata:
4 | name: migrationschedule
5 | namespace: kube-system
6 | spec:
7 | template:
8 | spec:
9 | clusterPair: dest-cluster
10 | includeResources: true
11 | startApplications: false
12 | includeVolumes: false
13 | namespaces:
14 | - postgres
15 | schedulePolicyName: testpolicy
16 |
--------------------------------------------------------------------------------
/assets/training/Disaster_Recovery/postgres.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: postgres
5 | ---
6 | kind: StorageClass
7 | apiVersion: storage.k8s.io/v1
8 | metadata:
9 | name: px-postgres-sc
10 | provisioner: pxd.portworx.com
11 | parameters:
12 | #openstorage.io/auth-secret-name: px-user-token
13 | #openstorage.io/auth-secret-namespace: portworx
14 | repl: "3"
15 | io_profile: "db_remote"
16 | priority_io: "high"
17 | allowVolumeExpansion: true
18 | ---
19 | kind: PersistentVolumeClaim
20 | apiVersion: v1
21 | metadata:
22 | name: postgres-data
23 | namespace: postgres
24 | labels:
25 | app: postgres
26 | spec:
27 | storageClassName: px-postgres-sc
28 | accessModes:
29 | - ReadWriteOnce
30 | resources:
31 | requests:
32 | storage: 2Gi
33 | ---
34 | apiVersion: apps/v1
35 | kind: Deployment
36 | metadata:
37 | name: postgres
38 | namespace: postgres
39 | labels:
40 | app: postgres
41 | spec:
42 | strategy:
43 | rollingUpdate:
44 | maxSurge: 1
45 | maxUnavailable: 1
46 | type: RollingUpdate
47 | replicas: 1
48 | selector:
49 | matchLabels:
50 | app: postgres
51 | template:
52 | metadata:
53 | labels:
54 | app: postgres
55 | spec:
56 | schedulerName: stork
57 | containers:
58 | - name: postgres
59 | image: postgres:9.5
60 | imagePullPolicy: "IfNotPresent"
61 | ports:
62 | - containerPort: 5432
63 | env:
64 | - name: POSTGRES_USER
65 | value: pgbench
66 | - name: PGUSER
67 | value: pgbench
68 | - name: POSTGRES_PASSWORD
69 | value: superpostgres
70 | - name: PGBENCH_PASSWORD
71 | value: superpostgres
72 | - name: PGDATA
73 | value: /var/lib/postgresql/data/pgdata
74 | volumeMounts:
75 | - mountPath: /var/lib/postgresql/data
76 | name: postgres-data
77 | volumes:
78 | - name: postgres-data
79 | persistentVolumeClaim:
80 | claimName: postgres-data
81 |
--------------------------------------------------------------------------------
/assets/training/Disaster_Recovery/schedule-policy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: stork.libopenstorage.org/v1alpha1
2 | kind: SchedulePolicy
3 | metadata:
4 | name: testpolicy
5 | policy:
6 | interval:
7 | intervalMinutes: 1
8 |
--------------------------------------------------------------------------------
/assets/training/Group_Snapshots_Cassandra/px-cassandra-group-cloudsnap.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: stork.libopenstorage.org/v1alpha1
2 | kind: GroupVolumeSnapshot
3 | metadata:
4 | name: cassandra-group-cloudsnapshot
5 | spec:
6 | preExecRule: cassandra-presnap-rule
7 | pvcSelector:
8 | matchLabels:
9 | app: cassandra
10 | options:
11 | portworx/snapshot-type: cloud
12 |
--------------------------------------------------------------------------------
/assets/training/Group_Snapshots_Cassandra/px-cassandra-presnap-rule.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: stork.libopenstorage.org/v1alpha1
2 | kind: Rule
3 | metadata:
4 | name: cassandra-presnap-rule
5 | rules:
6 | - podSelector:
7 | app: cassandra
8 | actions:
9 | - type: command
10 | value: nodetool flush
11 |
--------------------------------------------------------------------------------
/assets/training/Group_Snapshots_Cassandra/px-cassandra-restore-pvcs.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolumeClaim
3 | metadata:
4 | annotations:
5 | snapshot.alpha.kubernetes.io/snapshot: ""
6 | labels:
7 | app: cassandra
8 | name: cassandra-data-cassandra-0
9 | namespace: default
10 | spec:
11 | accessModes:
12 | - ReadWriteOnce
13 | storageClassName: stork-snapshot-sc
14 | resources:
15 | requests:
16 | storage: 2Gi
17 | ---
18 | apiVersion: v1
19 | kind: PersistentVolumeClaim
20 | metadata:
21 | annotations:
22 | snapshot.alpha.kubernetes.io/snapshot: ""
23 | labels:
24 | app: cassandra
25 | name: cassandra-data-cassandra-1
26 | namespace: default
27 | spec:
28 | accessModes:
29 | - ReadWriteOnce
30 | storageClassName: stork-snapshot-sc
31 | resources:
32 | requests:
33 | storage: 2Gi
34 | ---
35 | apiVersion: v1
36 | kind: PersistentVolumeClaim
37 | metadata:
38 | annotations:
39 | snapshot.alpha.kubernetes.io/snapshot: ""
40 | labels:
41 | app: cassandra
42 | name: cassandra-data-cassandra-2
43 | namespace: default
44 | spec:
45 | accessModes:
46 | - ReadWriteOnce
47 | storageClassName: stork-snapshot-sc
48 | resources:
49 | requests:
50 | storage: 2Gi
51 |
--------------------------------------------------------------------------------
/assets/training/Group_Snapshots_Cassandra/px-cassandra.yaml:
--------------------------------------------------------------------------------
1 | kind: StorageClass
2 | apiVersion: storage.k8s.io/v1
3 | metadata:
4 | name: px-cassandra-sc
5 | provisioner: pxd.portworx.com
6 | parameters:
7 | #openstorage.io/auth-secret-name: px-user-token
8 | #openstorage.io/auth-secret-namespace: portworx
9 | repl: "2"
10 | priority_io: "high"
11 | group: "cassandra_vg"
12 | ---
13 | apiVersion: v1
14 | kind: Service
15 | metadata:
16 | labels:
17 | app: cassandra
18 | name: cassandra
19 | spec:
20 | clusterIP: None
21 | ports:
22 | - port: 9042
23 | selector:
24 | app: cassandra
25 | ---
26 | apiVersion: "apps/v1"
27 | kind: StatefulSet
28 | metadata:
29 | name: cassandra
30 | spec:
31 | selector:
32 | matchLabels:
33 | app: cassandra
34 | serviceName: cassandra
35 | replicas: 3
36 | template:
37 | metadata:
38 | labels:
39 | app: cassandra
40 | spec:
41 | # Use the stork scheduler to enable more efficient placement of the pods
42 | schedulerName: stork
43 | containers:
44 | - name: cassandra
45 | image: gcr.io/google-samples/cassandra:v14
46 | imagePullPolicy: Always
47 | ports:
48 | - containerPort: 7000
49 | name: intra-node
50 | - containerPort: 7001
51 | name: tls-intra-node
52 | - containerPort: 7199
53 | name: jmx
54 | - containerPort: 9042
55 | name: cql
56 | resources:
57 | limits:
58 | cpu: "500m"
59 | memory: 1Gi
60 | requests:
61 | cpu: "500m"
62 | memory: 1Gi
63 | securityContext:
64 | capabilities:
65 | add:
66 | - IPC_LOCK
67 | lifecycle:
68 | preStop:
69 | exec:
70 | command: ["/bin/sh", "-c", "PID=$(pidof java) && kill $PID && while ps -p $PID > /dev/null; do sleep 1; done"]
71 | env:
72 | - name: MAX_HEAP_SIZE
73 | value: 512M
74 | - name: HEAP_NEWSIZE
75 | value: 100M
76 | - name: CASSANDRA_SEEDS
77 | value: "cassandra-0.cassandra.default.svc.cluster.local"
78 | - name: CASSANDRA_CLUSTER_NAME
79 | value: "K8Demo"
80 | - name: CASSANDRA_DC
81 | value: "DC1-K8Demo"
82 | - name: CASSANDRA_RACK
83 | value: "Rack1-K8Demo"
84 | - name: CASSANDRA_AUTO_BOOTSTRAP
85 | value: "false"
86 | - name: POD_IP
87 | valueFrom:
88 | fieldRef:
89 | fieldPath: status.podIP
90 | - name: POD_NAMESPACE
91 | valueFrom:
92 | fieldRef:
93 | fieldPath: metadata.namespace
94 | readinessProbe:
95 | exec:
96 | command:
97 | - /bin/bash
98 | - -c
99 | - /ready-probe.sh
100 | initialDelaySeconds: 15
101 | timeoutSeconds: 5
102 | # These volume mounts are persistent. They are like inline claims,
103 | # but not exactly because the names need to match exactly one of
104 | # the stateful pod volumes.
105 | volumeMounts:
106 | - name: cassandra-data
107 | mountPath: /cassandra_data
108 | # These are converted to volume claims by the controller
109 | # and mounted at the paths mentioned above.
110 | volumeClaimTemplates:
111 | - metadata:
112 | name: cassandra-data
113 | spec:
114 | storageClassName: px-cassandra-sc
115 | accessModes: [ "ReadWriteOnce" ]
116 | resources:
117 | requests:
118 | storage: 1Gi
119 | ---
120 | apiVersion: v1
121 | kind: Pod
122 | metadata:
123 | name: cqlsh
124 | spec:
125 | containers:
126 | - name: cqlsh
127 | image: mikewright/cqlsh
128 | command:
129 | - sh
130 | - -c
131 | - "exec tail -f /dev/null"
132 |
--------------------------------------------------------------------------------
/assets/training/PDS/cheatsheet.txt:
--------------------------------------------------------------------------------
1 | kubectl create ns pds-postgres
2 |
3 | kubectl label ns pds-postgres pds.portworx.com/available=true
4 |
5 | kubectl exec $(kubectl get pod -n pds-postgres -l role=master -o jsonpath='{.items[].metadata.name}') -n pds-postgres -c postgresql -- curl -s https://ipinfo.io/ip
6 |
7 | kubectl get service -n pgadmin
8 |
9 | kubectl get services -n pds-postgres
10 |
--------------------------------------------------------------------------------
/assets/training/PDS/pgadmin.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: pgadmin
5 | ---
6 | apiVersion: v1
7 | kind: Service
8 | metadata:
9 | name: pgadmin-service
10 | namespace: pgadmin
11 | spec:
12 | ports:
13 | - protocol: TCP
14 | port: 80
15 | targetPort: http
16 | selector:
17 | app: pgadmin
18 | type: NodePort
19 | ---
20 | apiVersion: apps/v1
21 | kind: StatefulSet
22 | metadata:
23 | name: pgadmin
24 | namespace: pgadmin
25 | spec:
26 | serviceName: pgadmin-service
27 | podManagementPolicy: Parallel
28 | replicas: 1
29 | updateStrategy:
30 | type: RollingUpdate
31 | selector:
32 | matchLabels:
33 | app: pgadmin
34 | template:
35 | metadata:
36 | labels:
37 | app: pgadmin
38 | spec:
39 | terminationGracePeriodSeconds: 10
40 | initContainers:
41 | - name: pgadmin-chown
42 | image: busybox:1.28
43 | command: ["sh", "-c", "mkdir -p /var/lib/pgadmin ; chown 5050 /var/lib/pgadmin"]
44 | volumeMounts:
45 | - name: pgadmin-data
46 | mountPath: /var/lib/pgadmin
47 | containers:
48 | - name: pgadmin
49 | image: dpage/pgadmin4:5.4
50 | imagePullPolicy: Always
51 | env:
52 | - name: PGADMIN_DEFAULT_EMAIL
53 | value: admin@portworx.com
54 | - name: PGADMIN_DEFAULT_PASSWORD
55 | value: admin
56 | ports:
57 | - name: http
58 | containerPort: 80
59 | protocol: TCP
60 | volumeMounts:
61 | - name: pgadmin-data
62 | mountPath: /var/lib/pgadmin
63 | volumes:
64 | - name: pgadmin-config
65 | configMap:
66 | name: pgadmin-config
67 | volumeClaimTemplates:
68 | - metadata:
69 | name: pgadmin-data
70 | spec:
71 | storageClassName: px-replicated
72 | accessModes: [ "ReadWriteOnce" ]
73 | resources:
74 | requests:
75 | storage: 3Gi
76 |
--------------------------------------------------------------------------------
/assets/training/PX_Migrate/migration.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: stork.libopenstorage.org/v1alpha1
2 | kind: Migration
3 | metadata:
4 | name: redis-migration
5 | namespace: kube-system
6 | spec:
7 | clusterPair: dest-cluster
8 | includeResources: true
9 | startApplications: true
10 | namespaces:
11 | - redis
12 | purgeDeletedResources: false
13 |
--------------------------------------------------------------------------------
/assets/training/PX_Migrate/redis.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: redis
5 | ---
6 | kind: StorageClass
7 | apiVersion: storage.k8s.io/v1
8 | metadata:
9 | name: px-redis-sc
10 | provisioner: pxd.portworx.com
11 | parameters:
12 | #openstorage.io/auth-secret-name: px-user-token
13 | #openstorage.io/auth-secret-namespace: portworx
14 | repl: "3"
15 | io_profile: "db_remote"
16 | allowVolumeExpansion: true
17 | ---
18 | kind: PersistentVolumeClaim
19 | apiVersion: v1
20 | metadata:
21 | labels:
22 | app: redis
23 | name: redis-data
24 | namespace: redis
25 | spec:
26 | storageClassName: px-redis-sc
27 | accessModes:
28 | - ReadWriteOnce
29 | resources:
30 | requests:
31 | storage: 2Gi
32 | ---
33 | apiVersion: v1
34 | kind: Service
35 | metadata:
36 | name: redis
37 | namespace: redis
38 | spec:
39 | ports:
40 | - port: 6379
41 | name: redis
42 | clusterIP: None
43 | selector:
44 | app: redis
45 | ---
46 | apiVersion: apps/v1
47 | kind: Deployment
48 | metadata:
49 | name: redis
50 | namespace: redis
51 | labels:
52 | app: redis
53 | spec:
54 | selector:
55 | matchLabels:
56 | app: redis
57 | template:
58 | metadata:
59 | labels:
60 | app: redis
61 | spec:
62 | schedulerName: stork
63 | containers:
64 | - name: redis
65 | image: redis:3.2-alpine
66 | imagePullPolicy: Always
67 | args: ["--requirepass", "$(REDIS_PASS)"]
68 | ports:
69 | - containerPort: 6379
70 | name: redis
71 | env:
72 | - name: REDIS_PASS
73 | value: password
74 | volumeMounts:
75 | - name: redis-vol
76 | mountPath: /data
77 | volumes:
78 | - name: redis-vol
79 | persistentVolumeClaim:
80 | claimName: redis-data
81 |
--------------------------------------------------------------------------------
/assets/training/Stateful_Application_Backups/application-backup.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: stork.libopenstorage.org/v1alpha1
2 | kind: ApplicationBackup
3 | metadata:
4 | name: postgres-backup
5 | namespace: backup-lab
6 | spec:
7 | backupLocation: minio
8 | namespaces:
9 | - backup-lab
10 | reclaimPolicy: Retain
11 | selectors:
12 | preExecRule:
13 | postExecRule:
14 |
--------------------------------------------------------------------------------
/assets/training/Stateful_Application_Backups/application-restore.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: stork.libopenstorage.org/v1alpha1
2 | kind: ApplicationRestore
3 | metadata:
4 | name: postgres-restore
5 | namespace: backup-lab
6 | spec:
7 | backupName:
8 | backupLocation: minio
9 |
--------------------------------------------------------------------------------
/assets/training/Stateful_Application_Backups/backup-location.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: stork.libopenstorage.org/v1alpha1
2 | kind: BackupLocation
3 | metadata:
4 | name: minio
5 | namespace: backup-lab
6 | annotations:
7 | stork.libopenstorage.org/skipresource: "false"
8 | location:
9 | type: s3
10 | sync: true
11 | path: "portworx"
12 | s3Config:
13 | region: default
14 | accessKeyID: minio
15 | secretAccessKey: minio123
16 | endpoint: "http://192.168.1.101:30221"
17 | disableSSL: true
18 |
--------------------------------------------------------------------------------
/assets/training/Stateful_Application_Backups/cheatsheet.txt:
--------------------------------------------------------------------------------
1 | kubectl get pod -l app=postgres -n backup-lab
2 |
3 | kubectl exec $POD -n backup-lab -- createdb pxdemo
4 |
5 | kubectl exec $POD -n backup-lab -- pgbench -i -s 50 pxdemo
6 |
7 | kubectl exec $POD -n backup-lab -- psql pxdemo -c 'select count(*) from pgbench_accounts'
8 |
9 | kubectl get backuplocation -n backup-lab
10 |
11 | storkctl get backuplocation -n backup-lab
12 |
13 | storkctl get applicationbackups -n backup-lab
14 |
15 | storkctl get applicationrestores -n backup-lab
16 |
--------------------------------------------------------------------------------
/assets/training/Stateful_Application_Backups/minio-deployment.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: minio
5 | ---
6 | kind: StorageClass
7 | apiVersion: storage.k8s.io/v1
8 | metadata:
9 | name: minio-sc
10 | provisioner: kubernetes.io/portworx-volume
11 | parameters:
12 | repl: "3"
13 | ---
14 | apiVersion: v1
15 | kind: PersistentVolumeClaim
16 | metadata:
17 | # This name uniquely identifies the PVC. This is used in deployment.
18 | name: minio-pv-claim
19 | namespace: minio
20 | spec:
21 | storageClassName: minio-sc
22 | # Read more about access modes here: http://kubernetes.io/docs/user-guide/persistent-volumes/#access-modes
23 | accessModes:
24 | # The volume is mounted as read-write by a single node
25 | - ReadWriteOnce
26 | resources:
27 | # This is the request for storage. Should be available in the cluster.
28 | requests:
29 | storage: 10Gi
30 | ---
31 | apiVersion: v1
32 | kind: Service
33 | metadata:
34 | # This name uniquely identifies the service
35 | name: minio-service
36 | namespace: minio
37 | spec:
38 | type: NodePort
39 | ports:
40 | - port: 9000
41 | targetPort: 9000
42 | nodePort: 30221
43 | protocol: TCP
44 | selector:
45 | # Looks for labels `app:minio` in the namespace and applies the spec
46 | app: minio
47 | ---
48 | apiVersion: apps/v1
49 | kind: Deployment
50 | metadata:
51 | # This name uniquely identifies the Deployment
52 | name: minio
53 | namespace: minio
54 | spec:
55 | selector:
56 | matchLabels:
57 | app: minio
58 | strategy:
59 | # Specifies the strategy used to replace old Pods by new ones
60 | # Refer: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
61 | type: Recreate
62 | template:
63 | metadata:
64 | labels:
65 | # This label is used as a selector in Service definition
66 | app: minio
67 | spec:
68 | # Volumes used by this deployment
69 | volumes:
70 | - name: data
71 | # This volume is based on PVC
72 | persistentVolumeClaim:
73 | # Name of the PVC created earlier
74 | claimName: minio-pv-claim
75 | containers:
76 | - name: minio
77 | # Volume mounts for this container
78 | volumeMounts:
79 | # Volume 'data' is mounted to path '/data'
80 | - name: data
81 | mountPath: "/data"
82 | # Pulls the lastest Minio image from Docker Hub
83 | image: minio/minio:RELEASE.2019-09-05T23-24-38Z
84 | args:
85 | - server
86 | - /data
87 | env:
88 | # MinIO access key and secret key
89 | - name: MINIO_ACCESS_KEY
90 | value: "minio"
91 | - name: MINIO_SECRET_KEY
92 | value: "minio123"
93 | ports:
94 | - containerPort: 9000
95 | # Readiness probe detects situations when MinIO server instance
96 | # is not ready to accept traffic. Kubernetes doesn't forward
97 | # traffic to the pod while readiness checks fail.
98 | readinessProbe:
99 | httpGet:
100 | path: /minio/health/ready
101 | port: 9000
102 | initialDelaySeconds: 120
103 | periodSeconds: 20
104 | # Liveness probe detects situations where MinIO server instance
105 | # is not working properly and needs restart. Kubernetes automatically
106 | # restarts the pods if liveness checks fail.
107 | livenessProbe:
108 | httpGet:
109 | path: /minio/health/live
110 | port: 9000
111 | initialDelaySeconds: 120
112 | periodSeconds: 20
113 |
--------------------------------------------------------------------------------
/assets/training/Stateful_Application_Backups/postgres.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: backup-lab
5 | ---
6 | kind: StorageClass
7 | apiVersion: storage.k8s.io/v1
8 | metadata:
9 | name: px-postgres-sc
10 | provisioner: kubernetes.io/portworx-volume
11 | parameters:
12 | repl: "3"
13 | io_profile: "db_remote"
14 | priority_io: "high"
15 | allowVolumeExpansion: true
16 | ---
17 | kind: PersistentVolumeClaim
18 | apiVersion: v1
19 | metadata:
20 | name: postgres-data
21 | namespace: backup-lab
22 | labels:
23 | app: postgres
24 | annotations:
25 | volume.beta.kubernetes.io/storage-class: px-postgres-sc
26 | spec:
27 | accessModes:
28 | - ReadWriteOnce
29 | resources:
30 | requests:
31 | storage: 2Gi
32 | ---
33 | apiVersion: apps/v1
34 | kind: Deployment
35 | metadata:
36 | name: postgres
37 | namespace: backup-lab
38 | labels:
39 | app: postgres
40 | spec:
41 | strategy:
42 | rollingUpdate:
43 | maxSurge: 1
44 | maxUnavailable: 1
45 | type: RollingUpdate
46 | replicas: 1
47 | selector:
48 | matchLabels:
49 | app: postgres
50 | template:
51 | metadata:
52 | labels:
53 | app: postgres
54 | spec:
55 | schedulerName: stork
56 | containers:
57 | - name: postgres
58 | image: postgres:9.5
59 | imagePullPolicy: "IfNotPresent"
60 | ports:
61 | - containerPort: 5432
62 | env:
63 | - name: POSTGRES_USER
64 | value: pgbench
65 | - name: PGUSER
66 | value: pgbench
67 | - name: POSTGRES_PASSWORD
68 | value: superpostgres
69 | - name: PGBENCH_PASSWORD
70 | value: superpostgres
71 | - name: PGDATA
72 | value: /var/lib/postgresql/data/pgdata
73 | volumeMounts:
74 | - mountPath: /var/lib/postgresql/data
75 | name: postgres-data
76 | volumes:
77 | - name: postgres-data
78 | persistentVolumeClaim:
79 | claimName: postgres-data
80 |
--------------------------------------------------------------------------------
/assets/training/Volume_Snapshots/cheatsheet.txt:
--------------------------------------------------------------------------------
1 | kubectl exec $POD -- psql -c 'drop database pxdemo'
2 |
3 | kubectl exec $POD -- psql -l
4 |
5 | kubectl scale --replicas=0 deployment/postgres
6 |
7 | kubectl scale --replicas=1 deployment/postgres
8 |
9 | kubectl exec $POD -- psql pxdemo -c 'select count(*) from pgbench_accounts'
10 |
--------------------------------------------------------------------------------
/assets/training/Volume_Snapshots/postgres-app-restore.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: postgres-snap
5 | labels:
6 | app: postgres-snap
7 | spec:
8 | strategy:
9 | rollingUpdate:
10 | maxSurge: 1
11 | maxUnavailable: 1
12 | type: RollingUpdate
13 | replicas: 1
14 | selector:
15 | matchLabels:
16 | app: postgres-snap
17 | template:
18 | metadata:
19 | labels:
20 | app: postgres-snap
21 | spec:
22 | affinity:
23 | nodeAffinity:
24 | requiredDuringSchedulingIgnoredDuringExecution:
25 | nodeSelectorTerms:
26 | - matchExpressions:
27 | - key: px/running
28 | operator: NotIn
29 | values:
30 | - "false"
31 | - key: px/enabled
32 | operator: NotIn
33 | values:
34 | - "false"
35 | containers:
36 | - name: postgres
37 | image: postgres:9.5
38 | imagePullPolicy: "IfNotPresent"
39 | ports:
40 | - containerPort: 5432
41 | env:
42 | - name: POSTGRES_USER
43 | value: pgbench
44 | - name: PGUSER
45 | value: pgbench
46 | - name: POSTGRES_PASSWORD
47 | value: superpostgres
48 | - name: PGBENCH_PASSWORD
49 | value: superpostgres
50 | - name: PGDATA
51 | value: /var/lib/postgresql/data/pgdata
52 | volumeMounts:
53 | - mountPath: /var/lib/postgresql/data
54 | name: postgres-data-from-snap
55 | volumes:
56 | - name: postgres-data-from-snap
57 | persistentVolumeClaim:
58 | claimName: px-postgres-snap-clone
59 |
--------------------------------------------------------------------------------
/assets/training/Volume_Snapshots/px-snap-pvc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolumeClaim
3 | metadata:
4 | name: px-postgres-snap-clone
5 | annotations:
6 | snapshot.alpha.kubernetes.io/snapshot: px-postgres-snapshot
7 | spec:
8 | accessModes:
9 | - ReadWriteOnce
10 | storageClassName: stork-snapshot-sc
11 | resources:
12 | requests:
13 | storage: 2Gi
--------------------------------------------------------------------------------
/assets/training/Volume_Snapshots/px-snap.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: volumesnapshot.external-storage.k8s.io/v1
2 | kind: VolumeSnapshot
3 | metadata:
4 | name: px-postgres-snapshot
5 | namespace: default
6 | spec:
7 | persistentVolumeClaimName: postgres-data
8 |
--------------------------------------------------------------------------------
/assets/training/Wordpress_Lab/header.php:
--------------------------------------------------------------------------------
1 |
12 | class="no-js">
13 |
14 |
15 |
16 |
17 |
18 |
21 |
22 |
23 |
24 | >
25 |
26 |
27 |
28 |
52 |
53 |
--------------------------------------------------------------------------------
/assets/training/Wordpress_Lab/mysql-deploy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: wordpress-mysql
5 | labels:
6 | app: wordpress
7 | spec:
8 | ports:
9 | - port: 3306
10 | selector:
11 | app: wordpress
12 | tier: mysql
13 | clusterIP: None
14 | ---
15 | apiVersion: apps/v1
16 | kind: Deployment
17 | metadata:
18 | name: wordpress-mysql
19 | labels:
20 | app: wordpress
21 | spec:
22 | selector:
23 | matchLabels:
24 | app: wordpress
25 | strategy:
26 | type: Recreate
27 | template:
28 | metadata:
29 | labels:
30 | app: wordpress
31 | tier: mysql
32 | spec:
33 | # Use the stork scheduler to enable more efficient placement of the pods
34 | schedulerName: stork
35 | containers:
36 | - image: mysql:5.6
37 | imagePullPolicy:
38 | name: mysql
39 | env:
40 | - name: MYSQL_ROOT_PASSWORD
41 | value: password
42 | ports:
43 | - containerPort: 3306
44 | name: mysql
45 | volumeMounts:
46 | - name: mysql-persistent-storage
47 | mountPath: /var/lib/mysql
48 | volumes:
49 | - name: mysql-persistent-storage
50 | persistentVolumeClaim:
51 | claimName: mysql-pvc-1
52 |
--------------------------------------------------------------------------------
/assets/training/Wordpress_Lab/mysql-vol.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: storage.k8s.io/v1
2 | kind: StorageClass
3 | metadata:
4 | name: portworx-sc-repl3
5 | provisioner: pxd.portworx.com
6 | parameters:
7 | #openstorage.io/auth-secret-name: px-user-token
8 | #openstorage.io/auth-secret-namespace: portworx
9 | repl: "3"
10 | priority_io: "high"
11 | ---
12 | apiVersion: v1
13 | kind: PersistentVolumeClaim
14 | metadata:
15 | name: mysql-pvc-1
16 | spec:
17 | storageClassName: portworx-sc-repl3
18 | accessModes:
19 | - ReadWriteOnce
20 | resources:
21 | requests:
22 | storage: 2Gi
23 |
--------------------------------------------------------------------------------
/assets/training/Wordpress_Lab/wp-deploy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: wordpress
5 | labels:
6 | app: wordpress
7 | spec:
8 | ports:
9 | - port: 80
10 | nodePort: 30303
11 | selector:
12 | app: wordpress
13 | tier: frontend
14 | type: NodePort
15 | ---
16 | apiVersion: apps/v1
17 | kind: Deployment
18 | metadata:
19 | name: wordpress
20 | labels:
21 | app: wordpress
22 | spec:
23 | replicas: 3
24 | selector:
25 | matchLabels:
26 | app: wordpress
27 | strategy:
28 | type: Recreate
29 | template:
30 | metadata:
31 | labels:
32 | app: wordpress
33 | tier: frontend
34 | spec:
35 | # Use the stork scheduler to enable more efficient placement of the pods
36 | schedulerName: stork
37 | containers:
38 | - image: wordpress:4.8-apache
39 | name: wordpress
40 | imagePullPolicy:
41 | env:
42 | - name: WORDPRESS_DB_HOST
43 | value: wordpress-mysql
44 | - name: WORDPRESS_DB_PASSWORD
45 | value: password
46 | ports:
47 | - containerPort: 80
48 | name: wordpress
49 | volumeMounts:
50 | - name: wordpress-persistent-storage
51 | mountPath: /var/www/html
52 | volumes:
53 | - name: wordpress-persistent-storage
54 | persistentVolumeClaim:
55 | claimName: wp-pv-claim
56 |
--------------------------------------------------------------------------------
/assets/training/Wordpress_Lab/wp-vol.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: storage.k8s.io/v1
2 | kind: StorageClass
3 | metadata:
4 | name: portworx-sc-repl3-shared
5 | provisioner: pxd.portworx.com
6 | parameters:
7 | #openstorage.io/auth-secret-name: px-user-token
8 | #openstorage.io/auth-secret-namespace: portworx
9 | repl: "3"
10 | priority_io: "high"
11 | sharedv4: "true"
12 | ---
13 | apiVersion: v1
14 | kind: PersistentVolumeClaim
15 | metadata:
16 | name: wp-pv-claim
17 | labels:
18 | app: wordpress
19 | spec:
20 | storageClassName: portworx-sc-repl3-shared
21 | accessModes:
22 | - ReadWriteMany
23 | resources:
24 | requests:
25 | storage: 1Gi
26 |
--------------------------------------------------------------------------------
/assets/training/cheatsheet.txt:
--------------------------------------------------------------------------------
1 | kubectl get pods -n portworx -l name=portworx -o wide
2 |
3 | kubectl logs -f px-cluster-
-c portworx -n portworx
4 |
--------------------------------------------------------------------------------
/assets/training/install-pxc.sh:
--------------------------------------------------------------------------------
1 | mkdir -p $HOME/bin
2 | curl -Ls https://github.com/portworx/pxc/releases/download/v0.33.0/pxc-v0.33.0.linux.amd64.tar.gz | tar Oxzf - pxc/kubectl-pxc | tee $HOME/bin/kubectl-pxc >/dev/null
3 | curl -so $HOME/bin/pxc-pxctl https://raw.githubusercontent.com/portworx/pxc/master/component/pxctl/pxc-pxctl
4 | kubectl cp -n portworx $(kubectl get pod -n portworx -l name=stork -o jsonpath='{.items[0].metadata.name}'):/storkctl/linux/storkctl $HOME/bin/storkctl
5 | chmod +x $HOME/bin/pxc-pxctl $HOME/bin/kubectl-pxc $HOME/bin/storkctl
6 | kubectl pxc config cluster set --portworx-service-namespace portworx
7 | echo "alias pxctl='kubectl pxc pxctl'" >>$HOME/.bashrc
8 |
--------------------------------------------------------------------------------
/assets/wordpress/wordpress.yml:
--------------------------------------------------------------------------------
1 | apiVersion: storage.k8s.io/v1
2 | kind: StorageClass
3 | metadata:
4 | name: portworx-sc-repl3-shared
5 | provisioner: pxd.portworx.com
6 | parameters:
7 | #openstorage.io/auth-secret-name: px-user-token
8 | #openstorage.io/auth-secret-namespace: portworx
9 | repl: "2"
10 | sharedv4: "true"
11 | allowVolumeExpansion: true
12 | ---
13 | apiVersion: v1
14 | kind: PersistentVolumeClaim
15 | metadata:
16 | name: wp-pv-claim
17 | labels:
18 | app: wordpress
19 | spec:
20 | storageClassName: portworx-sc-repl3-shared
21 | accessModes:
22 | - ReadWriteMany
23 | resources:
24 | requests:
25 | storage: 1Gi
26 | ---
27 | apiVersion: v1
28 | kind: Service
29 | metadata:
30 | name: wordpress
31 | labels:
32 | app: wordpress
33 | spec:
34 | ports:
35 | - port: 80
36 | nodePort: 30303
37 | selector:
38 | app: wordpress
39 | tier: frontend
40 | type: NodePort
41 | ---
42 | apiVersion: apps/v1
43 | kind: Deployment
44 | metadata:
45 | name: wordpress
46 | labels:
47 | app: wordpress
48 | spec:
49 | selector:
50 | matchLabels:
51 | app: wordpress
52 | tier: frontend
53 | replicas: 3
54 | strategy:
55 | type: Recreate
56 | template:
57 | metadata:
58 | labels:
59 | app: wordpress
60 | tier: frontend
61 | spec:
62 | # Use the stork scheduler to enable more efficient placement of the pods
63 | schedulerName: stork
64 | containers:
65 | - image: wordpress:4.8-apache
66 | name: wordpress
67 | imagePullPolicy:
68 | env:
69 | - name: WORDPRESS_DB_HOST
70 | value: wordpress-mysql
71 | - name: WORDPRESS_DB_PASSWORD
72 | value: password
73 | ports:
74 | - containerPort: 80
75 | name: wordpress
76 | volumeMounts:
77 | - name: wordpress-persistent-storage
78 | mountPath: /var/www/html
79 | volumes:
80 | - name: wordpress-persistent-storage
81 | persistentVolumeClaim:
82 | claimName: wp-pv-claim
83 | ---
84 | apiVersion: storage.k8s.io/v1
85 | kind: StorageClass
86 | metadata:
87 | name: portworx-sc-repl3
88 | provisioner: pxd.portworx.com
89 | parameters:
90 | #openstorage.io/auth-secret-name: px-user-token
91 | #openstorage.io/auth-secret-namespace: portworx
92 | repl: "3"
93 | priority_io: "high"
94 | allowVolumeExpansion: true
95 | ---
96 | apiVersion: v1
97 | kind: PersistentVolumeClaim
98 | metadata:
99 | name: mysql-pvc-1
100 | spec:
101 | storageClassName: portworx-sc-repl3
102 | accessModes:
103 | - ReadWriteOnce
104 | resources:
105 | requests:
106 | storage: 2Gi
107 | ---
108 | apiVersion: v1
109 | kind: Service
110 | metadata:
111 | name: wordpress-mysql
112 | labels:
113 | app: wordpress
114 | spec:
115 | ports:
116 | - port: 3306
117 | selector:
118 | app: wordpress
119 | tier: mysql
120 | clusterIP: None
121 | ---
122 | apiVersion: apps/v1
123 | kind: Deployment
124 | metadata:
125 | name: wordpress-mysql
126 | labels:
127 | app: wordpress
128 | spec:
129 | selector:
130 | matchLabels:
131 | app: wordpress
132 | tier: mysql
133 | strategy:
134 | type: Recreate
135 | template:
136 | metadata:
137 | labels:
138 | app: wordpress
139 | tier: mysql
140 | spec:
141 | # Use the stork scheduler to enable more efficient placement of the pods
142 | schedulerName: stork
143 | containers:
144 | - image: mysql:5.6
145 | imagePullPolicy:
146 | name: mysql
147 | env:
148 | # $ kubectl create secret generic mysql-pass --from-file=password.txt
149 | # make sure password.txt does not have a trailing newline
150 | - name: MYSQL_ROOT_PASSWORD
151 | value: password
152 | ports:
153 | - containerPort: 3306
154 | name: mysql
155 | volumeMounts:
156 | - name: mysql-persistent-storage
157 | mountPath: /var/lib/mysql
158 | volumes:
159 | - name: mysql-persistent-storage
160 | persistentVolumeClaim:
161 | claimName: mysql-pvc-1
162 |
--------------------------------------------------------------------------------
/cmdTesting.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "github.com/spf13/cobra"
5 | )
6 |
7 | var cmdTesting = &cobra.Command{
8 | Use: "testing",
9 | Short: "Runs testing defined in template",
10 | Long: "Runs testing defined in template",
11 | Run: RunTesting,
12 | }
13 |
14 | func RunTesting(cmd *cobra.Command, args []string) {
15 | var flags Config
16 | //fmt.Printf("testrun named %s template %s\n", testingName, testingTemplate)
17 |
18 | config := parse_yaml("defaults.yml")
19 |
20 | prep_error := prepare_deployment(&config, &flags, testingName, "", testingTemplate, "")
21 | if prep_error != "" {
22 | die(prep_error)
23 | }
24 | _ = create_deployment(config)
25 |
26 | }
27 |
--------------------------------------------------------------------------------
/defaults.yml:
--------------------------------------------------------------------------------
1 | cloud: aws
2 | platform: k8s
3 | clusters: 1
4 | nodes: 3
5 | #ssh_pub_key: "ssh-rsa XXXXXXXXXXXXXXXXXXXXXX user@example"
6 | k8s_version: 1.31.8
7 | px_version: 3.2.3
8 | stop_after: 6
9 | post_script: show-ip
10 | quiet: false
11 | tags: "project=pxdeploy"
12 |
13 | aws_region: eu-west-1
14 | aws_type: t3.large
15 | aws_ebs: "gp2:50"
16 | aws_access_key_id: ""
17 | aws_secret_access_key: ""
18 | eks_version: "1.31"
19 | #aws_ebs: "gp2:20 standard:30"
20 |
21 | gcp_region: europe-north1
22 | gcp_zone: b
23 | gcp_type: n1-standard-4
24 | gcp_disks: "pd-standard:50"
25 | gke_version: "1.31"
26 | #gcp_disks: "pd-standard:20 pd-ssd:30"
27 | #gcp_project: "px-deploy"
28 |
29 |
30 | azure_region: uksouth
31 | azure_type: Standard_B2ms
32 | azure_disks: "Standard_LRS:50"
33 | #azure_disks: "Standard_LRS:50 Premium_LRS:50"
34 | azure_client_secret: ""
35 | azure_client_id: ""
36 | azure_tenant_id: ""
37 | azure_subscription_id: ""
38 | aks_version: "1.31"
39 |
40 | rancher_version: "2.10.1"
41 | rancher_k3s_version: "1.30.8+k3s1"
42 | rancher_k8s_version: "1.30.8+rke2r1"
43 |
44 | vsphere_host: ""
45 | vsphere_compute_resource: ""
46 | vsphere_resource_pool: ""
47 | vsphere_datacenter: ""
48 | vsphere_user: ""
49 | vsphere_password: ""
50 | vsphere_template: "pxdeploy-template"
51 | vsphere_datastore: ""
52 | vsphere_disks: "64"
53 | vsphere_network: ""
54 | vsphere_memory: 8
55 | vsphere_cpu: 2
56 | vsphere_repo: "https://px-deploy.s3.eu-west-1.amazonaws.com/templates/"
57 | #vsphere_folder: ""
58 |
59 | ocp4_domain: ""
60 | ocp4_version: 4.18.8
61 | ocp4_pull_secret: ""
62 |
63 | #env:
64 | # licenses: "XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX ..."
65 |
--------------------------------------------------------------------------------
/docs/bucket.tf:
--------------------------------------------------------------------------------
1 | # create a AWS iam user limited only to access the given (existing) S3 bucket
2 | # name of bucket must be set in variable s3_bucket (comment out OR create .tfvars OR provide interactively on terraform runtime)
3 | # you may also modify aws_region
4 | #
5 | # HOWTO:
6 | #
7 | # terraform init
8 | # terraform plan
9 | # terraform apply
10 | #
11 | # note the output!
12 | # destroy after training:
13 | # terraform destroy
14 |
15 | terraform {
16 | required_providers {
17 | aws = {
18 | source = "hashicorp/aws"
19 | }
20 | }
21 | }
22 |
23 | provider "aws" {
24 | region = var.aws_region
25 | }
26 |
27 | variable "aws_region" {
28 | description = "AWS region e.g: eu-west-1"
29 | type = string
30 | default = "eu-west-1"
31 | }
32 |
33 | variable "s3_bucket" {
34 | description = "name of existing s3 bucket"
35 | type = string
36 | # default = "trainig-bucket"
37 | }
38 |
39 | resource "aws_iam_user" "aws-s3-user" {
40 | name = format("px-s3-%s",var.s3_bucket)
41 | path = "/"
42 | }
43 |
44 | resource "aws_iam_user_policy" "s3-user" {
45 | name = "s3-pol"
46 | user = aws_iam_user.aws-s3-user.name
47 | policy = data.aws_iam_policy_document.s3_user.json
48 | }
49 |
50 | data "aws_iam_policy_document" "s3_user" {
51 | statement {
52 | effect = "Allow"
53 | actions = [
54 | "s3:ListAllMyBuckets",
55 | "s3:GetBucketLocation"
56 | ]
57 | resources = ["*"]
58 | }
59 |
60 | statement {
61 | effect = "Allow"
62 | actions = ["s3:*"]
63 | resources = [
64 | format("arn:aws:s3:::%s",var.s3_bucket),
65 | format("arn:aws:s3:::%s/*",var.s3_bucket)
66 | ]
67 | }
68 | }
69 |
70 | resource "aws_iam_access_key" "s3-user" {
71 | user = aws_iam_user.aws-s3-user.name
72 | }
73 |
74 | output "aws_access_key_id" {
75 | value = aws_iam_access_key.s3-user.id
76 | }
77 |
78 |
79 | output "aws_secret_access_key" {
80 | value = nonsensitive(aws_iam_access_key.s3-user.secret)
81 |
82 | }
83 |
84 |
--------------------------------------------------------------------------------
/docs/cloud/aws/ocp4.md:
--------------------------------------------------------------------------------
1 |
2 | # Notes for OCP4 on AWS
3 |
4 | A "master" node will be provisioned for each cluster. This is not really a master node - it is just where `openshift-install` is run. The root user will have a kubeconfig, so it can be treated as a master node for the purposes of the scripts used in the templates.
5 |
6 | The following settings are mandatory for ocp4 deployments.
7 |
8 | ## ocp4_domain setting
9 |
10 | A subdomain must be delegated to Route53 on the same AWS account, so you will need to be able to create records for your own domain:
11 |
12 | 1. Login to the AWS console and go to Route53.
13 |
14 | 2. Click on "Hosted Zones". "Click on Created hosted zone".
15 |
16 | 3. Enter the subdomain, eg openshift.example.com and click "Created hosted zone". It will give you 4 authoritative nameservers for the subdomain.
17 |
18 | 4. Login to your DNS provider.
19 |
20 | 5. Create an NS record for each of the nameservers for the subdomain, eg:
21 | ```
22 | $ host -t ns openshift.example.com
23 | openshift.example.com name server ns-1386.awsdns-45.org.
24 | openshift.example.com name server ns-1845.awsdns-38.co.uk.
25 | openshift.example.com name server ns-282.awsdns-35.com.
26 | openshift.example.com name server ns-730.awsdns-27.net.
27 | ```
28 |
29 | 6. Wait a few minutes for the changes to be reflected. Then validate all is well in Route53:
30 | ```
31 | $ host -t soa openshift.example.com
32 | openshift.example.com has SOA record ns-730.awsdns-227.net. awsdns-hostmaster.amazon.com. 1 7200 900 1209600 86400
33 | ```
34 |
35 | 7. add the subdomain as `ocp4_domain` in `defaults.yml` e.g. `ocp4_domain: openshift.example.com`
36 |
37 | ## ocp4_pull_secret
38 |
39 | You need to obtain an Openshift pull secret
40 |
41 | 1. login to https://console.redhat.com/openshift
42 |
43 | 2. select Clusters -> Create cluster -> Local -> Copy Pull Secret
44 |
45 | 3. enter pull secret into value of `ocp4_pull_secret` in `defaults.yml` and ensure its enclosed by single quotation marks and does not contain line breaks
46 |
47 | ```
48 | ocp4_pull_secret: '{"auths":{"cloud.openshift.com":{"auth":"a4E<..omitted...>lcUR==","email":"mail@foo.bar"}}}'
49 | ```
50 |
--------------------------------------------------------------------------------
/docs/cloud/vsphere/1_vsphere_deploy.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PureStorage-OpenConnect/px-deploy/b124017de45f46a8415d131a522cb01cf81241be/docs/cloud/vsphere/1_vsphere_deploy.png
--------------------------------------------------------------------------------
/docs/cloud/vsphere/2_vsphere_ovf_source.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PureStorage-OpenConnect/px-deploy/b124017de45f46a8415d131a522cb01cf81241be/docs/cloud/vsphere/2_vsphere_ovf_source.png
--------------------------------------------------------------------------------
/docs/cloud/vsphere/3_vsphere_name_folder.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PureStorage-OpenConnect/px-deploy/b124017de45f46a8415d131a522cb01cf81241be/docs/cloud/vsphere/3_vsphere_name_folder.png
--------------------------------------------------------------------------------
/docs/cloud/vsphere/4_vsphere_resource.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PureStorage-OpenConnect/px-deploy/b124017de45f46a8415d131a522cb01cf81241be/docs/cloud/vsphere/4_vsphere_resource.png
--------------------------------------------------------------------------------
/docs/cloud/vsphere/5_vsphere_datastore.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PureStorage-OpenConnect/px-deploy/b124017de45f46a8415d131a522cb01cf81241be/docs/cloud/vsphere/5_vsphere_datastore.png
--------------------------------------------------------------------------------
/docs/cloud/vsphere/6_vsphere_network.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PureStorage-OpenConnect/px-deploy/b124017de45f46a8415d131a522cb01cf81241be/docs/cloud/vsphere/6_vsphere_network.png
--------------------------------------------------------------------------------
/docs/cloud/vsphere/7_vsphere_template.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PureStorage-OpenConnect/px-deploy/b124017de45f46a8415d131a522cb01cf81241be/docs/cloud/vsphere/7_vsphere_template.png
--------------------------------------------------------------------------------
/docs/cloud/vsphere/README.md:
--------------------------------------------------------------------------------
1 | ## Manual creation of px-deploy vSphere template
2 |
3 | instead of running `px-deploy vsphere-init` it can be faster to create the template using vCenter UI
4 |
5 | #### 1. check your `~/.px-deploy/defaults.yml`
6 |
7 | take note of the following options (showing example settings here):
8 | - **vsphere_template** (example contains folder part `DanielP`)
9 |
10 | `vsphere_template: "DanielP\pxdeploy-template"`
11 |
12 | - **vsphere_datacenter**
13 |
14 | `vsphere_datacenter: "LAB"`
15 |
16 | - **vsphere_folder**
17 |
18 | `vsphere_folder: "DanielP"`
19 |
20 | - **vsphere_resource_pool**
21 |
22 | `vsphere_resource_pool: "DanielP"`
23 |
24 | - **vsphere_datastore**
25 |
26 | `vsphere_datastore: "DanielP"`
27 |
28 | #### 2. Login to vCenter UI
29 |
30 | A) if **vsphere_template** contains a folder, locate this folder (picture showing folder `DanielP`)
31 |
32 | B) if **vsphere_template** does not contain a folder, locate **vsphere_resource_pool**
33 |
34 | C) if an older version of the template exists, delete it (right click, 'Delete from Disk')
35 |
36 | 
37 |
38 | #### 3. "Select an OVF Template"
39 |
40 | right click folder/resource pool, select "Deploy OVF Template"
41 |
42 | for URL enter `https://px-deploy.s3.eu-west-1.amazonaws.com/templates/template.ova`
43 |
44 | 
45 |
46 | click NEXT
47 |
48 | click "Yes" on "Source Verification"
49 |
50 | #### 4. "Select a name and folder"
51 |
52 | for `Virtual machine name` enter the name of **vsphere_template** (without folder part)
53 |
54 | choose folder part of **vsphere_template** for "Select a location..."
55 |
56 | 
57 |
58 | click NEXT
59 |
60 | #### 6. "Select a compute resource"
61 |
62 | select **vsphere_datacenter** and **vsphere_resource_pool**
63 |
64 | do NOT select "Automatically power on deployed VM"
65 |
66 | 
67 |
68 | click NEXT
69 |
70 | #### 7. "Review Details"
71 |
72 | click NEXT
73 |
74 | #### 8. "Select Storage"
75 |
76 | select **vsphere_datastore**
77 |
78 | you may change Disk Format to "thin provision"
79 |
80 | 
81 |
82 | click NEXT
83 |
84 | #### 9. "Select Networks"
85 |
86 | select **vsphere_network**
87 |
88 | 
89 |
90 | click NEXT
91 |
92 | #### 10. "Ready to complete"
93 |
94 | click Finish
95 |
96 | #### 11. As soon as import has been finished (import Task is 100%) right-click the newly created vm and select "Template -> Convert to Template"
97 |
98 | 
99 |
100 |
101 | ## px-deploy vsphere cloud known issues
102 |
103 | error on terraform apply:
104 |
105 | ```
106 |
107 | Error: error reconfiguring virtual machine: error processing disk changes post-clone: disk.0: ServerFaultCode: NoPermission: RESOURCE (vm-537470:2000), ACTION (queryAssociatedProfile): RESOURCE (vm-537470), ACTION (PolicyIDByVirtualDisk)
108 |
109 | ```
110 |
111 | Resolution:
112 |
113 | ensure your vsphere_user has StorageProfile.View privilege (non-propagating) on the root vCenter object
114 |
115 | error on destroy:
116 |
117 | ```
118 |
119 | Error: Invalid datastore path '/vmfs/volumes/...'
120 |
121 | ```
122 |
123 | Resolution:
124 |
125 | re-run the destroy command
126 |
127 |
128 | ## How does px-deploy vsphere templating work
129 |
130 | we provide a pre-created px-deploy ova template stored in a public s3 bucket
131 |
132 | px-deploy vsphere-init deploys this template (taken from setting vsphere_repo) to the vcenter instance, removes existing one and converts it into a vsphere template (setting vsphere_template)
133 |
134 | px-deploy deployments creates instances out of it by cloning it
135 |
136 | to create a custom template run ~/.px-deploy/vsphere-build/vsphere-build.sh
--------------------------------------------------------------------------------
/docs/templates/backup-restore/README.md:
--------------------------------------------------------------------------------
1 | # Stork Backups
2 |
3 | Deploys a cluster with Portworx, MinIO and Petclinic
4 |
5 | # Supported Environments
6 |
7 | * Any
8 |
9 | # Requirements
10 |
11 | ## Deploy the template
12 |
13 | It is a best practice to use your initials or name as part of the name of the deployment in order to make it easier for others to see the ownership of the deployment in the AWS console.
14 |
15 | ```
16 | px-deploy create -t backup-restore -n
17 | ```
18 |
19 | # Demo Workflow
20 |
21 | 1. Obtain the external IP for the cluster:
22 |
23 | ```
24 | px-deploy status -n
25 | ```
26 |
27 | 2. Open a browser tab fand go to http://.
28 |
29 | 3. Connect to the deployment in a terminal.
30 |
31 | 4. Show it is a Kubernetes and Portworx cluster:
32 |
33 | ```
34 | kubectl get nodes
35 | pxctl status
36 | ```
37 |
38 | 5. Go to your browser. Click Find Owners, Add Owner and populate the form with some dummy data and then click Add Owner. Click Find Owners and Find Owner, and show that there is the entry at the bottom of the list.
39 |
40 | 6. In the terminal, show the BackupLocation YAML that is to be applied:
41 |
42 | ```
43 | cat /assets/backup-restore/backupLocation.yml
44 | ```
45 |
46 | Mention that the BackupLocation is in the `petclinic` namespace which means we can use it to backup only that namespace. If we were to create it in the `kube-system` namespace, we would be able to backup any namespace. Talk about it being an S3 target with standard S3 parameters. Note the `sync: true` parameter and say we will come back to it later.
47 |
48 | 7. Apply the BackupLocation object:
49 |
50 | ```
51 | kubectl apply -f /assets/backup-restore/backupLocation.yml
52 | ```
53 |
54 | 8. In the terminal, show the ApplicationBackup YAML that is to be applied:
55 |
56 | ```
57 | cat /assets/backup-restore/applicationBackup.yml
58 | ```
59 |
60 | Mention that the ApplicationBackup is in the `petclinic` namespace which means we can use it to migrate only that namespace. If we were to create it in the `kube-system` namespace, we would be able to backup any namespace.
61 |
62 | 9. Apply the ApplicationBackup object:
63 |
64 | ```
65 | kubectl apply -f /assets/backup-restore/applicationBackup.yml
66 | ```
67 |
68 | 10. Show the ApplicationBackup object:
69 |
70 | ```
71 | kubectl get applicationbackup -n petclinic
72 | storkctl get applicationbackup -n petclinic
73 | ```
74 |
75 | Do not continue until the ApplicationBackup has succeeded.
76 |
77 | 11. Delete the `petclinic` namespace:
78 |
79 | ```
80 | kubectl delete ns petclinic
81 | ```
82 |
83 | Refresh the browser to prove the application no longer exists.
84 |
85 | 12. Recreate the `petclinic` namespace, along with the BackupLocation object:
86 |
87 | ```
88 | kubectl create ns petclinic
89 | kubectl apply -f /assets/backup-restore/backupLocation.yml
90 | ```
91 |
92 | Watch for the ApplicationBackup objects to be recreated automatically:
93 |
94 | ```
95 | watch storkctl get applicationbackups -n petclinic
96 | ```
97 |
98 | Go back to the `sync: true` parameter we discussed earlier. This is triggering Stork to communicate with the S3 bucket defined in the BackupLocation to pull the metadata associcated with the backup that we took earlier. Once it has retrieved that metadata, it will create an ApplicationBackup object to abstract it. Wait for that object to appear in the output. Copy the name of the object to the clipboard.
99 |
100 | 13. Edit `/assets/backup-restore/applicationRestore.yml`. Talk about the `backupLocation` object referencing the BackupLocation we just created. Paste the name of the ApplicationBackup object we just found into the `backupName` parameter. Save and exit.
101 |
102 | 14. Apply the ApplicationRestore object:
103 |
104 | ```
105 | kubectl apply -f /assets/backup-restore/applicationRestore.yml
106 | ```
107 |
108 | 15. Monitor the status of the restore:
109 |
110 | ```
111 | watch storkctl get applicationrestores -n petclinic
112 | ```
113 |
114 | 16. Show the application has been restored:
115 |
116 | ```
117 | kubectl get all,pvc -n petclinic
118 | ```
119 |
120 | 17. Show the pods starting:
121 |
122 | ```
123 | kubectl get pod -n petclinic
124 | ```
125 |
126 | 18. Refresh the browser tab. Click Find Owners and Find Owner and show the data is still there.
127 |
--------------------------------------------------------------------------------
/docs/templates/ocp-kubevirt/readme.MD:
--------------------------------------------------------------------------------
1 | ## How to deploy
2 |
3 | * ensure your px-deploy environment is able to create OCP4 deployments
4 |
5 | * please consult the [readme](https://github.com/purestorage-openconnect/px-deploy?tab=readme-ov-file#notes-for-ocp4-on-aws)
6 |
7 | ```
8 | $ px-deploy create -n mydeployment -t ocp-kubevirt
9 | ```
10 |
11 | * ensure your ~/.px-deploy/defaults.yml contains env variables with valid DR license and defining existing & accessible S3 Buckets
12 |
13 | ```
14 | env:
15 | DR_BUCKET: "mys3bucket"
16 | BACKUP_BUCKET: "mys3bucket"
17 | licenses: "xxx-xxxx-xxx"
18 | ```
19 |
20 | ## Prepare
21 |
22 | * open 2 cli consoles
23 |
24 | * login to deployment on both consoles `$ px-deploy connect -n mydeployment`
25 |
26 | * on console 1 stay on master-1 (**c1cli**), on console 2 ssh into master-2 (**c2cli**)
27 |
28 | * both consoles will show login credentials to OCP, **c1cli** will also show PX-Backup credentials
29 |
30 | * login to PX-Backup UI (**pxbui**) & both OCP4 UI on Browser (**c1ui** **c2ui**)
31 |
32 | ### Check
33 |
34 | * **c1ui**: on namespace pxbbq ubuntu-mongodb VM and 3 pxbbq pods are running
35 |
36 | * get route to app, open app, login to app and place an order
37 |
38 | * **c2ui**: show pxbbq namespace does not exist
39 |
40 |
41 | * **c1cli**:
42 |
43 | * show and talk about ClusterPair, SchedulePolicy and MigrationSchedule
44 |
45 | * `$ storkctl get clusterpair -n kube-system`
46 |
47 | * `$ vi /assets/kubevirt/ocp/async-dr.yml`
48 |
49 | * apply SchedulePolicy & MigrationSchedule
50 |
51 | * `$ kubectl apply -f /assets/kubevirt/ocp/async-dr.yml`
52 |
53 | * wait for first migration to finish
54 |
55 | * `$ storkctl get migrations -n kube-system`
56 |
57 | * **c2cli**: run a controlled failover of application `$ storkctl perform failover -m pxbbq -n kube-system`
58 |
59 | * **c2cli**: watch the failover progressing `storkctl get failover failover-pxbbq-xxxxxxxxxxxx -n kube-system`
60 |
61 | * **c2ui**: show pods / vm in pxbbq namespace coming up
62 |
63 | * **c1cli**: DELETE PXBBQ namespace as this takes some time
64 |
65 | * **c2ui**: show missing route, talk about not having route migrated
66 |
67 | * create route to pxbbq app (service pxbbq-svc)
68 |
69 | * access app from cluster to via route
70 |
71 | * login to app, show existing order, add another one
72 |
73 | * **pxbui**: login to px-backup gui
74 |
75 | * check if cloud, object target are set
76 |
77 | * onboard cluster-2
78 |
79 | * take a backup from pxbbq namespace on cluster-2. can use namespace label app=pxbbq
80 |
81 | * when backup is taken, restore to cluster-1. exclude route
82 |
83 | * while restore running, switch to cluster1 ui to show everything is empty
84 |
--------------------------------------------------------------------------------
/docs/templates/pds-petclinic/README.md:
--------------------------------------------------------------------------------
1 | This template leverages PDS API to register the px-deploy created ec2 k8s cluster to PDS, creates a PDS Postgres deployment and runs spring-petlinic application deployment within the same namespace accessing the Postgres DB.
2 |
3 | It also creates a script to delete the Postgres deployment and unregister the cluster from PDS Control plane. (found on master node /px-deploy/scripts-delete/pds-petclinic.sh)
4 |
5 | ## contents
6 | .px-deploy/assets/pds-petclinic/
7 |
8 | .px-deploy/templates/pds-petclinic.yml
9 |
10 | .px-deploy/scripts/pds-petclinic
11 |
12 | ## getting started
13 | ### 1. Login to PDS
14 |
15 | note your ACCOUNT / TENANT / PROJECT Names (shown at login)
16 | 
17 |
18 | create a User API Key
19 | 
20 |
21 |
22 | ### 2. review (and edit) template settings
23 | in `.px-deploy/templates/pds-petclinic.yml`
24 |
25 | check PDS_ACCOUNT / PDS_TENANT / PDS_PROJECT
26 |
27 | check PDS_ENDPOINT
28 |
29 | if you need to change settings create your own template file and modify.
30 |
31 | template pds-petclinic.yml will be updated regulary and your changes will be lost
32 |
33 | ### 3. set PDS API Key in defaults.yml
34 | in `.px-deploy/defaults.yml` add the following
35 |
36 | ```
37 | env:
38 | PDS_TOKEN: "your_PDS_User_API_Key"
39 | ```
40 |
41 | ### 4. create deployment
42 | `px-deploy create -n nameyourdeployment -t pds-petclinic`
43 |
44 | when deployment is finished you should be able to connect to spring-petclinic app using
45 | http://[external ip]:30333
46 |
47 | You can also see the Deployment Target and the Postgres Deployment on PDS
48 |
49 | ### 4. uninstall
50 |
51 | Deletion of pds-petclinic and pds-system deployments will be done by "px-deploy destroy"
52 |
53 | ## known issues / limitations
54 | This template is currently designed for k8s/EKS/OCP4 clusters being deployed on aws
55 |
56 |
57 |
--------------------------------------------------------------------------------
/docs/templates/pds-petclinic/pds_access_key.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PureStorage-OpenConnect/px-deploy/b124017de45f46a8415d131a522cb01cf81241be/docs/templates/pds-petclinic/pds_access_key.png
--------------------------------------------------------------------------------
/docs/templates/pds-petclinic/pds_project.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PureStorage-OpenConnect/px-deploy/b124017de45f46a8415d131a522cb01cf81241be/docs/templates/pds-petclinic/pds_project.png
--------------------------------------------------------------------------------
/docs/training_iam_user.tf:
--------------------------------------------------------------------------------
1 | # create a AWS iam user limited only to create px-deploy ec2 instances and access a defined S3 bucket
2 | # name of bucket must be set in variable s3_bucket (comment out OR create .tfvars OR provide interactively on terraform runtime)
3 | # you may also modify aws_region
4 | #
5 | # HOWTO:
6 | #
7 | # terraform init
8 | # terraform plan
9 | # terraform apply
10 | #
11 | # note the output!
12 | # destroy after training:
13 | # terraform destroy
14 |
15 | terraform {
16 | required_providers {
17 | aws = {
18 | source = "hashicorp/aws"
19 | }
20 | }
21 | }
22 |
23 | provider "aws" {
24 | region = var.aws_region
25 | }
26 |
27 | variable "aws_region" {
28 | description = "AWS region e.g: eu-west-1"
29 | type = string
30 | default = "eu-west-1"
31 | }
32 |
33 | variable "s3_bucket" {
34 | description = "name of existing s3 bucket to be used by training users"
35 | type = string
36 | #default = "demo-bucket"
37 | }
38 |
39 | variable "training_user" {
40 | description = "name of limited AWS IAM user to be created for training account"
41 | type = string
42 | #default = "px-deploy-training"
43 | }
44 |
45 | resource "aws_iam_user" "aws-training-user" {
46 | name = var.training_user
47 | path = "/"
48 | }
49 |
50 | resource "aws_iam_user_policy" "training-user" {
51 | name = "px-deploy-training-policy"
52 | user = aws_iam_user.aws-training-user.name
53 | policy = data.aws_iam_policy_document.training_user.json
54 | }
55 |
56 | data "aws_iam_policy_document" "training_user" {
57 |
58 | statement {
59 | effect = "Allow"
60 | actions = [
61 | "iam:CreateInstanceProfile",
62 | "iam:GetPolicyVersion",
63 | "iam:UntagRole",
64 | "iam:TagRole",
65 | "iam:RemoveRoleFromInstanceProfile",
66 | "iam:DeletePolicy",
67 | "iam:CreateRole",
68 | "iam:AttachRolePolicy",
69 | "iam:AddRoleToInstanceProfile",
70 | "iam:ListInstanceProfilesForRole",
71 | "iam:PassRole",
72 | "iam:DetachRolePolicy",
73 | "iam:ListAttachedRolePolicies",
74 | "iam:ListRolePolicies",
75 | "iam:ListAccessKeys",
76 | "iam:DeleteInstanceProfile",
77 | "iam:GetRole",
78 | "iam:GetInstanceProfile",
79 | "iam:GetPolicy",
80 | "iam:DeleteRole",
81 | "iam:TagPolicy",
82 | "iam:CreatePolicy",
83 | "iam:ListPolicyVersions",
84 | "iam:UntagPolicy",
85 | "iam:UntagInstanceProfile",
86 | "iam:TagInstanceProfile",
87 | "ec2:*",
88 | "elasticloadbalancing:*",
89 | "s3:ListAllMyBuckets",
90 | "s3:GetBucketLocation"
91 | ]
92 | resources = ["*"]
93 | }
94 |
95 | statement {
96 | effect = "Allow"
97 | actions = ["s3:*"]
98 | resources = [
99 | format("arn:aws:s3:::%s",var.s3_bucket),
100 | format("arn:aws:s3:::%s/*",var.s3_bucket)
101 | ]
102 | }
103 | }
104 |
105 | resource "aws_iam_access_key" "training-user" {
106 | user = aws_iam_user.aws-training-user.name
107 | }
108 |
109 | output "aws_access_key_id" {
110 | value = aws_iam_access_key.training-user.id
111 | }
112 |
113 | output "aws_secret_access_key" {
114 | value = nonsensitive(aws_iam_access_key.training-user.secret)
115 | }
116 |
117 |
--------------------------------------------------------------------------------
/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/purestorage-openconnect/px-deploy
2 |
3 | go 1.23.0
4 |
5 | toolchain go1.23.7
6 |
7 | require (
8 | github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0
9 | github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2
10 | github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resourcegraph/armresourcegraph v0.9.0
11 | github.com/aws/aws-sdk-go-v2 v1.36.3
12 | github.com/aws/aws-sdk-go-v2/config v1.29.9
13 | github.com/aws/aws-sdk-go-v2/credentials v1.17.62
14 | github.com/aws/aws-sdk-go-v2/service/ec2 v1.210.0
15 | github.com/aws/aws-sdk-go-v2/service/eks v1.60.1
16 | github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing v1.29.1
17 | github.com/aws/aws-sdk-go-v2/service/iam v1.40.1
18 | github.com/go-yaml/yaml v2.1.0+incompatible
19 | github.com/google/uuid v1.6.0
20 | github.com/hashicorp/go-version v1.7.0
21 | github.com/imdario/mergo v0.3.16
22 | github.com/olekukonko/tablewriter v0.0.5
23 | github.com/spf13/cobra v1.9.1
24 | google.golang.org/api v0.226.0
25 | )
26 |
27 | require (
28 | cloud.google.com/go/auth v0.15.0 // indirect
29 | cloud.google.com/go/auth/oauth2adapt v0.2.7 // indirect
30 | cloud.google.com/go/compute/metadata v0.6.0 // indirect
31 | github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
32 | github.com/AzureAD/microsoft-authentication-library-for-go v1.4.1 // indirect
33 | github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect
34 | github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 // indirect
35 | github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 // indirect
36 | github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect
37 | github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 // indirect
38 | github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 // indirect
39 | github.com/aws/aws-sdk-go-v2/service/sso v1.25.1 // indirect
40 | github.com/aws/aws-sdk-go-v2/service/ssooidc v1.29.1 // indirect
41 | github.com/aws/aws-sdk-go-v2/service/sts v1.33.17 // indirect
42 | github.com/aws/smithy-go v1.22.3 // indirect
43 | github.com/felixge/httpsnoop v1.0.4 // indirect
44 | github.com/go-logr/logr v1.4.2 // indirect
45 | github.com/go-logr/stdr v1.2.2 // indirect
46 | github.com/golang-jwt/jwt/v5 v5.2.2 // indirect
47 | github.com/google/s2a-go v0.1.9 // indirect
48 | github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect
49 | github.com/googleapis/gax-go/v2 v2.14.1 // indirect
50 | github.com/inconshreveable/mousetrap v1.1.0 // indirect
51 | github.com/kylelemons/godebug v1.1.0 // indirect
52 | github.com/mattn/go-runewidth v0.0.16 // indirect
53 | github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
54 | github.com/rivo/uniseg v0.4.7 // indirect
55 | github.com/spf13/pflag v1.0.6 // indirect
56 | go.opentelemetry.io/auto/sdk v1.1.0 // indirect
57 | go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 // indirect
58 | go.opentelemetry.io/otel v1.35.0 // indirect
59 | go.opentelemetry.io/otel/metric v1.35.0 // indirect
60 | go.opentelemetry.io/otel/trace v1.35.0 // indirect
61 | golang.org/x/crypto v0.36.0 // indirect
62 | golang.org/x/net v0.38.0 // indirect
63 | golang.org/x/oauth2 v0.28.0 // indirect
64 | golang.org/x/sys v0.31.0 // indirect
65 | golang.org/x/text v0.23.0 // indirect
66 | google.golang.org/genproto/googleapis/rpc v0.0.0-20250313205543-e70fdf4c4cb4 // indirect
67 | google.golang.org/grpc v1.71.0 // indirect
68 | google.golang.org/protobuf v1.36.5 // indirect
69 | gopkg.in/yaml.v2 v2.2.8 // indirect
70 | )
71 |
--------------------------------------------------------------------------------
/infra/aks-master:
--------------------------------------------------------------------------------
1 | cat </etc/yum.repos.d/azure-cli.repo
2 | [azure-cli]
3 | name=Azure CLI
4 | baseurl=https://packages.microsoft.com/yumrepos/azure-cli
5 | enabled=1
6 | gpgcheck=1
7 | gpgkey=https://packages.microsoft.com/keys/microsoft.asc
8 | EOF
9 |
10 | while ! dnf install -y docker azure-cli; do
11 | sleep 2
12 | done
13 |
14 | curl -sL -o /usr/bin/kubectl "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
15 | chmod 755 /usr/bin/kubectl
16 |
17 | az login --service-principal -u $azure_client_id -p=$azure_client_secret --tenant $azure_tenant_id
18 | az aks get-credentials --resource-group $azure__group --name px-deploy-$name-$cluster
19 |
--------------------------------------------------------------------------------
/infra/all-common:
--------------------------------------------------------------------------------
1 | sysctl -w net.ipv6.conf.all.disable_ipv6=1 >>/etc/sysctl.conf
2 | sysctl -w net.ipv6.conf.default.disable_ipv6=1 >>/etc/sysctl.conf
3 | sysctl -w net.ipv4.ip_forward=1 >>/etc/sysctl.conf
4 |
5 | sed -i '/^127.0.0.1.*\(master\|node\)/d' /etc/hosts
6 | while ! yum install -y dnf; do sleep 1; done
7 | while ! dnf install -y epel-release; do sleep 1; done
8 | while ! dnf install -y jq; do sleep 1; done
9 | if [ -f /etc/selinux/config ]; then
10 | setenforce 0
11 | sed -i s/SELINUX=enforcing/SELINUX=disabled/g /etc/selinux/config
12 | fi
13 | swapoff -a
14 | sed -i /swap/d /etc/fstab
15 |
16 | mkdir -p /root/.ssh
17 | mv /tmp/id_rsa /root/.ssh
18 | chown root.root /root/.ssh/id_rsa
19 | chmod 600 /root/.ssh/id_rsa
20 | ssh-keygen -y -f /root/.ssh/id_rsa >/root/.ssh/authorized_keys
21 |
22 | if [ ! -z "$ssh_pub_key" ]; then
23 | echo $ssh_pub_key >> /root/.ssh/authorized_keys
24 | fi
25 |
26 | cat </etc/ssh/sshd_config
27 | HostKey /etc/ssh/ssh_host_rsa_key
28 | HostKey /etc/ssh/ssh_host_ecdsa_key
29 | HostKey /etc/ssh/ssh_host_ed25519_key
30 | SyslogFacility AUTHPRIV
31 | AuthorizedKeysFile .ssh/authorized_keys
32 | PasswordAuthentication yes
33 | PermitRootLogin without-password
34 | ChallengeResponseAuthentication no
35 | GSSAPIAuthentication yes
36 | GSSAPICleanupCredentials no
37 | UsePAM yes
38 | UseDNS no
39 | X11Forwarding yes
40 | TCPKeepAlive yes
41 | ClientAliveInterval 30
42 | ClientAliveCountMax 99999
43 | Compression yes
44 | AcceptEnv LANG LC_CTYPE LC_NUMERIC LC_TIME LC_COLLATE LC_MONETARY LC_MESSAGES
45 | AcceptEnv LC_PAPER LC_NAME LC_ADDRESS LC_TELEPHONE LC_MEASUREMENT
46 | AcceptEnv LC_IDENTIFICATION LC_ALL LANGUAGE
47 | AcceptEnv XMODIFIERS
48 | Subsystem sftp /usr/libexec/openssh/sftp-server
49 | EOF
50 |
51 | cat </root/.ssh/config
52 | StrictHostKeyChecking no
53 | LogLevel ERROR
54 | EOF
55 |
56 | systemctl restart sshd 2>/dev/null
57 |
58 | if [ $cloud = "aws" -o $cloud = "gcp" -o $cloud = "azure" ]; then
59 | echo 127.0.0.1 localhost >/etc/hosts
60 | for i in $(seq 1 $clusters); do
61 | echo 192.168.$[100+$i].90 master-$i >>/etc/hosts
62 | for j in $(seq 1 ${clusternodes[$i]}); do
63 | echo 192.168.$[100+$i].$[100+$j] node-$i-$j >>/etc/hosts
64 | done
65 | done
66 | elif [ $cloud = "vsphere" ]; then
67 | curl -Ls https://github.com/vmware/govmomi/releases/download/v0.36.1/govc_Linux_x86_64.tar.gz | tar -xzf - -C /usr/bin/
68 | chmod 755 /usr/bin/govc
69 | export GOVC_URL=$vsphere_host
70 | export GOVC_USERNAME=$vsphere_user
71 | export GOVC_PASSWORD=$vsphere_password
72 | export GOVC_DATACENTER=$vsphere_datacenter
73 | export GOVC_INSECURE=1
74 | while :; do
75 | sleep 1
76 | rm -f /tmp/hosts
77 | IFS=$'\n'
78 | for i in $(govc find / -type m -runtime.powerState poweredOn | egrep "$name-(master|node)"); do
79 | ip=$(govc vm.info -json "$i" | jq -r '.virtualMachines[0].guest.ipAddress')
80 | name=$(govc vm.info -json "$i" | jq -r '.virtualMachines[0].config.extraConfig[] | select(.key==("pxd.hostname")).value')
81 | echo $ip $name >>/tmp/hosts
82 | done
83 | IFS=$' \t\n'
84 | [ $(cat /tmp/hosts | wc -w) -eq $[($nodes+1)*$clusters*2] ] && break
85 | done
86 | echo 127.0.0.1 localhost >/etc/hosts
87 | cat /tmp/hosts >>/etc/hosts
88 | fi
89 |
90 | hostnamectl set-hostname $(getent hosts $(hostname -I) | awk '{print$2}')
91 | dnf -y update openssh
92 | passwd --stdin root <</dev/null
94 | systemctl disable firewalld 2>/dev/null
95 | while ! yum update -y glib2; do
96 | sleep 1
97 | done
98 | while ! dnf install -y at bash-completion nfs-utils chrony unzip ca-certificates; do
99 | sleep 1
100 | done
101 | systemctl enable --now atd chronyd
102 | [ "$stop_after" != 0 ] && echo /usr/sbin/poweroff | at now + $stop_after hours
103 | modprobe br_netfilter
104 | sysctl -w net.bridge.bridge-nf-call-iptables=1 >>/etc/sysctl.conf
105 | rm -f /etc/motd.d/cockpit
106 | [ "$run_everywhere" ] && eval $run_everywhere || true
107 |
--------------------------------------------------------------------------------
/infra/all-master:
--------------------------------------------------------------------------------
1 | [ -f /tmp/credentials ] && mkdir /root/.aws && mv /tmp/credentials /root/.aws
2 | [ -f /tmp/gcp.json ] && mv /tmp/gcp.json /root/
3 | mv /tmp/assets /assets
4 | echo 'source <(kubectl completion bash 2>/dev/null)' >>/etc/bash_completion.d/kubectl
5 | echo 'source <(oc completion bash 2>/dev/null)' >>/etc/bash_completion.d/oc
6 | {
7 | echo 'alias k=kubectl'
8 | echo "alias watch='watch --color '"
9 | echo 'complete -F __start_kubectl k'
10 | } >>/root/.bashrc
11 | cat </etc/exports
12 | / 10.0.0.0/8(rw,no_root_squash)
13 | / 192.168.0.0/16(rw,no_root_squash)
14 | / 172.16.0.0/12(rw,no_root_squash)
15 | EOF
16 | systemctl enable --now nfs-server
17 |
18 | ETCD_VER=v3.4.14
19 | curl -L https://storage.googleapis.com/etcd/${ETCD_VER}/etcd-${ETCD_VER}-linux-amd64.tar.gz -o /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz
20 | cd /usr/bin
21 | tar xzvf /tmp/etcd-${ETCD_VER}-linux-amd64.tar.gz etcd-v3.4.14-linux-amd64/etcdctl --strip-components=1
22 | curl -sL https://github.com/derailed/k9s/releases/download/v0.24.8/k9s_Linux_x86_64.tar.gz | tar xz k9s
23 |
24 | while ! dnf install -y vim-enhanced git; do
25 | sleep 1
26 | done
27 | git config --global color.ui auto true
28 |
29 | curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3
30 | chmod 700 get_helm.sh
31 | HELM_INSTALL_DIR=/usr/bin ./get_helm.sh
32 |
--------------------------------------------------------------------------------
/infra/eks-master:
--------------------------------------------------------------------------------
1 | curl -sL -o /usr/bin/kubectl https://storage.googleapis.com/kubernetes-release/release/v1.23.6/bin/linux/amd64/kubectl
2 | chmod 755 /usr/bin/kubectl
3 |
4 | while ! dnf install -y docker; do
5 | sleep 2
6 | done
7 | systemctl enable --now docker
8 |
9 | # install awscli2
10 | curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "/tmp/awscliv2.zip"
11 | unzip -o /tmp/awscliv2.zip -d /tmp
12 | /tmp/aws/install
13 |
14 | # install eksctl
15 | curl -sLO "https://github.com/eksctl-io/eksctl/releases/latest/download/eksctl_linux_amd64.tar.gz"
16 | tar -xzf eksctl_linux_amd64.tar.gz -C /tmp && rm eksctl_linux_amd64.tar.gz
17 | sudo mv /tmp/eksctl /usr/local/bin
18 |
19 | aws configure set default.region $aws_region
20 | aws eks wait cluster-active --name px-deploy-$name-$cluster
21 | aws eks update-kubeconfig --region $aws_region --name px-deploy-$name-$cluster
22 |
23 |
24 | # set roles/users in configmap aws-auth which might be needed to show cluster details in AWS UI
25 | if [[ ! -z $AWS_ADD_EKS_IAM_ROLE ]]; then
26 | echo "setting iamidentitymapping for ROLE $AWS_ADD_EKS_IAM_ROLE"
27 | kubectl apply -f https://s3.us-west-2.amazonaws.com/amazon-eks/docs/eks-console-full-access.yaml
28 | AWS_ACCOUNT_ID=$(aws sts get-caller-identity --output json | jq -r '.Account')
29 | eksctl create iamidentitymapping --cluster px-deploy-$name-$cluster --region=$aws_region \
30 | --arn arn:aws:iam::$AWS_ACCOUNT_ID:role/$AWS_ADD_EKS_IAM_ROLE --group eks-console-dashboard-full-access-group \
31 | --no-duplicate-arns
32 | fi
33 |
34 | if [[ ! -z $AWS_ADD_EKS_IAM_USER ]]; then
35 | echo "setting iamidentitymapping for USER $AWS_ADD_EKS_IAM_USER"
36 | kubectl apply -f https://s3.us-west-2.amazonaws.com/amazon-eks/docs/eks-console-full-access.yaml
37 | AWS_ACCOUNT_ID=$(aws sts get-caller-identity --output json | jq -r '.Account')
38 | eksctl create iamidentitymapping --cluster px-deploy-$name-$cluster --region=$aws_region \
39 | --arn arn:aws:iam::$AWS_ACCOUNT_ID:user/$AWS_ADD_EKS_IAM_USER --group eks-console-dashboard-full-access-group \
40 | --no-duplicate-arns
41 | fi
42 |
--------------------------------------------------------------------------------
/infra/gke-master:
--------------------------------------------------------------------------------
1 | GKE_CLUSTER_NAME=px-deploy-$name-$cluster
2 |
3 | dnf install -y docker google-cloud-cli-gke-gcloud-auth-plugin
4 | systemctl enable --now docker
5 |
6 | #curl -L https://github.com/containerd/containerd/releases/download/v1.6.15/containerd-1.6.15-linux-amd64.tar.gz | tar Cxzvf /usr/local -
7 | #curl -o /etc/systemd/system/containerd.service https://raw.githubusercontent.com/containerd/containerd/main/containerd.service
8 | #mkdir /etc/containerd
9 | #containerd config default | sed 's/SystemdCgroup = false/SystemdCgroup = true/' >/etc/containerd/config.toml
10 | #curl -Lo /usr/sbin/runc https://github.com/opencontainers/runc/releases/download/v1.1.4/runc.amd64
11 | #chmod 755 /usr/sbin/runc
12 | #systemctl daemon-reload
13 | #systemctl enable --now containerd
14 | #systemctl enable --now podman
15 |
16 | curl -sL -o /usr/bin/kubectl "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
17 | chmod 755 /usr/bin/kubectl
18 |
19 | echo "export USE_GKE_GCLOUD_AUTH_PLUGIN=True" >> /root/.bashrc
20 | source /root/.bashrc
21 |
22 | gcloud auth activate-service-account --key-file=/root/gcp.json
23 |
24 | # wait for GKE cluster to be ready (might still run in terraform while master node is ready)
25 | STATUS=$(gcloud container clusters describe $GKE_CLUSTER_NAME --zone $gcp_region-$gcp_zone --format=json | jq -r ".status")
26 |
27 | while ! [ $STATUS == "RUNNING" ]; do
28 | echo "Wait for GKE Cluster $GKE_CLUSTER_NAME to get ready. re-check in 20sec"
29 | sleep 20
30 | STATUS=$(gcloud container clusters describe $GKE_CLUSTER_NAME --zone $gcp_region-$gcp_zone --format=json | jq -r ".status")
31 | done
32 |
33 | gcloud container clusters get-credentials $GKE_CLUSTER_NAME --zone $gcp_region-$gcp_zone
34 |
--------------------------------------------------------------------------------
/infra/k8s-common:
--------------------------------------------------------------------------------
1 | repo=$(echo $k8s_version | cut -f 1,2 -d .)
2 | cat </etc/yum.repos.d/kubernetes.repo
3 | [kubernetes]
4 | name=Kubernetes
5 | baseurl=https://pkgs.k8s.io/core:/stable:/v$repo/rpm/
6 | enabled=1
7 | gpgcheck=1
8 | gpgkey=https://pkgs.k8s.io/core:/stable:/v$repo/rpm/repodata/repomd.xml.key
9 | EOF
10 | while ! dnf install -y selinux-policy policycoreutils-python-utils selinux-policy-targeted container-selinux --setopt=tsflags=noscripts; do sleep 1; done
11 | while ! dnf install -y kubelet-$k8s_version docker kubeadm-$k8s_version kubectl-$k8s_version; do sleep 1; done
12 | curl -L https://github.com/containerd/containerd/releases/download/v1.6.15/containerd-1.6.15-linux-amd64.tar.gz | tar Cxzvf /usr/local -
13 | curl -o /etc/systemd/system/containerd.service https://raw.githubusercontent.com/containerd/containerd/main/containerd.service
14 | mkdir /etc/containerd
15 | containerd config default | sed 's/SystemdCgroup = false/SystemdCgroup = true/' >/etc/containerd/config.toml
16 | curl -Lo /usr/sbin/runc https://github.com/opencontainers/runc/releases/download/v1.1.4/runc.amd64
17 | chmod 755 /usr/sbin/runc
18 | systemctl daemon-reload
19 | systemctl enable --now containerd
20 | systemctl enable --now podman
21 | systemctl enable --now kubelet
22 | touch /etc/containers/nodocker
23 |
--------------------------------------------------------------------------------
/infra/k8s-master:
--------------------------------------------------------------------------------
1 | kubeadm config images list --kubernetes-version $k8s_version | xargs -n1 -P0 ctr -n k8s.io images pull
2 | kubeadm init --apiserver-advertise-address=$(hostname -i) --pod-network-cidr=10.244.0.0/16 --kubernetes-version $k8s_version
3 | mkdir /root/.kube
4 | cp /etc/kubernetes/admin.conf /root/.kube/config
5 | #
6 | # wait for API server to come up
7 | sleep 5
8 | echo "Waiting on kube api"
9 | readiness="https://localhost:6443/readyz"
10 | expected_response="ok"
11 |
12 | while true; do
13 | response=$(curl -k -s "$readiness")
14 | if [ "$response" = "$expected_response" ]; then
15 | echo "kube api is ready!"
16 | break
17 | fi
18 | echo "kube api not ready. Waiting 5 seconds..."
19 | sleep 5
20 | done
21 | # label node
22 | kubectl label node $(hostname) node-role.kubernetes.io/master=master
23 |
24 | echo "Applying Flannel"
25 | kubectl apply -f https://raw.githubusercontent.com/flannel-io/flannel/v0.25.1/Documentation/kube-flannel.yml
26 | kubectl config set-context --current --namespace=default
--------------------------------------------------------------------------------
/infra/k8s-node:
--------------------------------------------------------------------------------
1 | dnf install -y kernel-devel sg3_utils device-mapper-multipath iscsi-initiator-utils &
2 | (echo docker.io/portworx/{px-enterprise,oci-monitor}:$px_version ; echo docker.io/$(curl -sk "https://install.portworx.com/$px_version?kb_ver=$k8s_version&comp=stork" | awk '/image:/{print$2}') ; kubeadm config images list --kubernetes-version $k8s_version 2>/dev/null) | xargs -n1 -P0 ctr -n k8s.io images pull
3 | while : ; do
4 | command=$(ssh -oConnectTimeout=1 -oStrictHostKeyChecking=no master-$cluster kubeadm token create --print-join-command)
5 | echo $command | grep -qE '[0-9a-f]{64}'
6 | [ $? -eq 0 ] && break
7 | sleep 5
8 | done
9 | echo "Executing '$command'"
10 | eval $command
11 | wait
12 | # set role for this node
13 | ssh -oConnectTimeout=1 -oStrictHostKeyChecking=no master-$cluster kubectl label node $(hostname) node-role.kubernetes.io/worker=worker
14 |
--------------------------------------------------------------------------------
/infra/ocp4-master:
--------------------------------------------------------------------------------
1 | dnf install -y docker wget
2 | systemctl enable --now docker
3 |
4 | # install awscli2
5 | curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "/tmp/awscliv2.zip"
6 | unzip -o /tmp/awscliv2.zip -d /tmp >/dev/null
7 | /tmp/aws/install
8 |
9 | ln -s /usr/local/bin/aws /usr/bin/aws
10 | eval $(ssh-agent)
11 | cd /tmp
12 | wget -q https://mirror.openshift.com/pub/openshift-v4/clients/ocp/$ocp4_version/openshift-install-linux-$ocp4_version.tar.gz
13 | wget -q https://mirror.openshift.com/pub/openshift-v4/clients/ocp/4.15.38/openshift-client-linux-4.15.38.tar.gz
14 | tar xzf openshift-install-linux-$ocp4_version.tar.gz
15 | tar xzf openshift-client-linux-4.15.38.tar.gz
16 | mv openshift-install oc kubectl /usr/bin
17 | rm -f openshift-install-linux-$ocp4_version.tar.gz openshift-client-linux-$ocp4_version.tar.gz
18 |
19 | mkdir /root/ocp4
20 | cd /root/ocp4
21 |
22 | while [ ! -f "/tmp/ocp4-install-config.yaml" ]; do sleep 5; done
23 | cp /tmp/ocp4-install-config.yaml /root/ocp4/install-config.yaml
24 |
25 | echo "cd /root/ocp4 ; openshift-install destroy cluster" >> /px-deploy/platform-delete/ocp4.sh
26 |
27 | openshift-install create cluster --log-level=debug
28 | if [ $? -ne 0 ]; then
29 | echo Failed to deploy Openshift
30 | exit 1
31 | fi
32 | mkdir /root/.kube
33 | cp /root/ocp4/auth/kubeconfig /root/.kube/config
34 | chmod 600 /root/.kube/config
35 | aws configure set default.region $aws_region
36 | URL=$(grep 'Access the OpenShift web-console' /root/ocp4/.openshift_install.log |cut -d\" -f4 | cut -d: -f2-)
37 | echo "url $URL" >> /var/log/px-deploy/completed/tracking
38 |
39 | CRED=$(grep 'Login to the console' /root/ocp4/.openshift_install.log | cut -d\\ -f4 | cut -d\" -f2)
40 | echo "cred $CRED" >> /var/log/px-deploy/completed/tracking
41 |
42 | cat <> /etc/motd
43 | +================================================+
44 | OCP4 Web UI: $URL
45 | Admin User Name: kubeadmin
46 | Password: $CRED
47 | +================================================+
48 | EOF
49 |
--------------------------------------------------------------------------------
/infra/rancher-master:
--------------------------------------------------------------------------------
1 | repo=$(echo $rancher_k8s_version | cut -f 1,2 -d .)
2 | cat </etc/yum.repos.d/kubernetes.repo
3 | [kubernetes]
4 | name=Kubernetes
5 | baseurl=https://pkgs.k8s.io/core:/stable:/v$repo/rpm/
6 | enabled=1
7 | gpgcheck=1
8 | gpgkey=https://pkgs.k8s.io/core:/stable:/v$repo/rpm/repodata/repomd.xml.key
9 | EOF
10 |
11 | k8sversion=$(echo $rancher_k8s_version | grep -o "^[0-9]*\.[0-9]*\.[0-9]*")
12 | while ! dnf install -y kubectl-$k8sversion; do sleep 1; done
13 |
14 | # install awscli2
15 | curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "/tmp/awscliv2.zip"
16 | unzip -o /tmp/awscliv2.zip -d /tmp >/dev/null
17 | /tmp/aws/install
18 | ln -s /usr/local/bin/aws /usr/bin/aws
19 |
20 | mkdir /root/.kube
21 | echo "waiting for /root/.kube/config to be created"
22 | while [ ! -f "/root/.kube/config" ]; do sleep 5; done
23 | echo "/root/.kube/config found"
24 |
25 | # remove k3s implementation of kubectl
26 | rm /usr/local/bin/kubectl
27 |
28 | echo "waiting for rancher guest cluster readiness"
29 | while ! kubectl wait pod -n cattle-system --timeout=300s --for=condition=ready -l app=cattle-cluster-agent; do
30 | sleep 2
31 | done
32 |
33 | echo "url https://rancher.$name.$ocp4_domain" >> /var/log/px-deploy/completed/tracking
34 | echo "cred portworx1!portworx1!" >> /var/log/px-deploy/completed/tracking
35 |
36 |
37 | cat <> /etc/motd
38 | +================================================+
39 | Rancher Web UI: https://rancher.$name.$ocp4_domain
40 | Admin User Name: admin
41 | Password: portworx1!portworx1!
42 | +================================================+
43 | EOF
44 |
--------------------------------------------------------------------------------
/install.sh:
--------------------------------------------------------------------------------
1 | RED='\033[0;31m'
2 | GREEN='\033[0;32m'
3 | YELLOW='\033[0;33m'
4 | BLUE='\033[1;34m'
5 | WHITE='\033[0;37m'
6 | NC='\033[0m'
7 | ver=$1
8 |
9 | if [ $USER != root -a -d $HOME/.px-deploy ]; then
10 | if [ $(find $HOME/.px-deploy -uid 0 | wc -l) != 0 ]; then
11 | echo -e "${RED}Found root-owned files in $HOME/.px-deploy - run this command before rerunning install.sh:$NC"
12 | echo "sudo chown -R $USER $HOME/.px-deploy"
13 | exit 1
14 | fi
15 | fi
16 |
17 | rm -rf /tmp/px-deploy.build
18 | mkdir /tmp/px-deploy.build
19 | cd /tmp/px-deploy.build
20 | echo Cloning repo
21 | git clone https://github.com/PureStorage-OpenConnect/px-deploy
22 | cd px-deploy
23 | if [ -z "$ver" ]; then
24 | ver=$(cat VERSION)
25 | git checkout v$ver
26 | fi
27 | echo "Pulling image (version $ver)"
28 | docker pull ghcr.io/purestorage-openconnect/px-deploy:$ver
29 | docker tag ghcr.io/purestorage-openconnect/px-deploy:$ver px-deploy
30 |
31 | #echo Building container
32 | #docker build $PLATFORM --network host -t px-deploy . >&/dev/null
33 | #if [ $? -ne 0 ]; then
34 | # echo -e ${RED}Image build failed${NC}
35 | # exit
36 | #fi
37 | mkdir -p $HOME/.px-deploy/{keys,deployments,kubeconfig,tf-deployments,docs,logs}
38 |
39 | # backup existing directories and force copy from current branch
40 | time=$(date +%s)
41 | for i in infra scripts templates assets docs; do
42 | [ -e $HOME/.px-deploy/$i ] && echo Backing up $HOME/.px-deploy/$i to $HOME/.px-deploy/$i.$time && cp -r $HOME/.px-deploy/$i $HOME/.px-deploy/$i.$time
43 | cp -rf $i $HOME/.px-deploy
44 | done
45 |
46 | # existing defaults.yml found. Dont replace, but ask for updating versions
47 | if [ -e $HOME/.px-deploy/defaults.yml ]; then
48 | echo -e "${YELLOW}Existing defaults.yml found. Please consider updating k8s_version and px_version to release settings (check $HOME/px-deploy/versions.yml)."
49 | else
50 | cp defaults.yml $HOME/.px-deploy/defaults.yml
51 | fi
52 | cp defaults.yml $HOME/.px-deploy/versions.yml
53 |
54 | echo
55 | echo -e ${YELLOW}If you are using zsh, append this to your .zshrc:
56 | echo -e ${WHITE}'px-deploy() { [ "$DEFAULTS" ] && params="-v $DEFAULTS:/px-deploy/.px-deploy/defaults.yml" ; docker run --network host -it -e PXDUSER=$USER --rm --name px-deploy.$$ $=params -v $HOME/.px-deploy:/px-deploy/.px-deploy px-deploy /root/go/bin/px-deploy $* ; }'
57 | echo -e ${YELLOW}If you are using bash, append this to your .bash_profile:
58 | echo -e ${WHITE}'px-deploy() { [ "$DEFAULTS" ] && params="-v $DEFAULTS:/px-deploy/.px-deploy/defaults.yml" ; docker run --network host -it -e PXDUSER=$USER --rm --name px-deploy.$$ $params -v $HOME/.px-deploy:/px-deploy/.px-deploy px-deploy /root/go/bin/px-deploy "$@" ; }'
59 | echo
60 | echo -e ${GREEN}When your px-deploy function is set, create a deployment with:
61 | echo -e "${WHITE}px-deploy create --name myDeployment --template px$NC"
62 | echo
63 | echo -e ${YELLOW}If using bash completion, execute:
64 | echo -e ${WHITE}'px-deploy completion | tr -d "\\r" >$HOME/.px-deploy/bash-completion'
65 | echo -e ${YELLOW}and append this to your .bash_profile:
66 | echo -e "${WHITE}[ -n \$BASH_COMPLETION ] && . \$HOME/.px-deploy/bash-completion"
67 |
--------------------------------------------------------------------------------
/scripts/aws-elb:
--------------------------------------------------------------------------------
1 | if [ $platform != eks ] && [ $platform != gke ] && [ $platform != ocp4 ] && [ $platform != rancher ] && [ $platform != aks ]; then
2 | region=$(curl -s http://169.254.169.254/latest/meta-data/placement/availability-zone | sed 's/.$//')
3 | aws configure set default.region $region
4 | instance_id=$(curl -s http://169.254.169.254/latest/meta-data/instance-id)
5 | vpc=$(aws ec2 describe-instances --instance-ids $instance_id --query Reservations[0].Instances[0].VpcId --output text)
6 | subnet=$(aws ec2 describe-instances --instance-ids $instance_id --query Reservations[0].Instances[0].SubnetId --output text)
7 |
8 | #sg=$(aws ec2 describe-security-groups --filters Name=group-name,Values=px-deploy Name=vpc-id,Values=$vpc --query SecurityGroups[].GroupId --output text)
9 | sg=$(aws ec2 create-security-group --group-name px-deploy-$name-elb --vpc $vpc --description "px-deploy aws elb" --query "GroupId" --output text)
10 | aws ec2 authorize-security-group-ingress --group-id $sg --protocol tcp --port 80 --cidr 0.0.0.0/0
11 |
12 | instances=$(aws ec2 describe-instances --filters "Name=network-interface.vpc-id,Values=$vpc" --query "Reservations[*].Instances[*].InstanceId" --output text)
13 | for i in $instances; do
14 | aws ec2 describe-instances --instance-id $i --query Reservations[].Instances[].Tags --output text | grep -q Name.*node
15 | [ $? -eq 0 ] && elb_instances="$elb_instances $i"
16 | done
17 | aws elb create-load-balancer --load-balancer-name px-deploy-$name --listeners Protocol=http,LoadBalancerPort=80,InstanceProtocol=http,InstancePort=30333 --security-groups $sg --subnets $subnet
18 | aws elb configure-health-check --load-balancer-name px-deploy-$name --health-check Target=HTTP:30333/,Interval=10,UnhealthyThreshold=2,HealthyThreshold=2,Timeout=5
19 | aws elb register-instances-with-load-balancer --load-balancer-name px-deploy-$name --instances $elb_instances
20 |
21 | elb_dnsname=$(aws elb describe-load-balancers --query "LoadBalancerDescriptions[].{a:VPCId,b:DNSName}" --output text | awk /$vpc/'{print$2}')
22 |
23 | echo "AWS ELB URL for app is: $elb_dnsname" >> /etc/motd
24 | else
25 | echo "Cluster platform is $platform. Skipping creation of AWS ELB"
26 | fi
--------------------------------------------------------------------------------
/scripts/backup-restore:
--------------------------------------------------------------------------------
1 | # Apply Petclinic and Minio in separate namespaces
2 | kubectl apply -f /assets/minio/minio-deployment.yml
3 | kubectl apply -f /assets/petclinic/petclinic.yml
4 |
5 | # Setup Minio Endpoint
6 | ip=`curl -s https://ipinfo.io/ip`
7 | sed -i -e 's/xxxx/'"$ip"'/g' /assets/backup-restore/backupLocation.yml
8 |
9 | # Setup Minio Bucket
10 | kubectl wait --for=condition=ready pod -l app=minio -n minio --timeout 30m
11 | docker run --rm -v /etc/hosts:/etc/hosts -e AWS_ACCESS_KEY_ID=minio -e AWS_SECRET_ACCESS_KEY=minio123 amazon/aws-cli --endpoint-url http://node-$cluster-1:30221 s3 mb s3://portworx
12 |
--------------------------------------------------------------------------------
/scripts/cat:
--------------------------------------------------------------------------------
1 | [ ! "$cat" ] && echo '$cat' not defined && exit
2 | [ ! -f "$cat" ] && echo File $cat does not exist && exit
3 | echo Contents of $cat:
4 | cat $cat
5 |
--------------------------------------------------------------------------------
/scripts/clusterpair:
--------------------------------------------------------------------------------
1 | # Configures cluster pairs between cluster 1 and cluster 2
2 | # this script is expected to run on master-2
3 |
4 | AWS_ACCESS_KEY=$(sed -n 's/aws_access_key_id[ =]*//p' /root/.aws/credentials 2>/dev/null | head -1)
5 | AWS_SECRET_KEY=$(sed -n 's/aws_secret_access_key[ =]*//p' /root/.aws/credentials 2>/dev/null | head -1)
6 |
7 | echo "Creating bucket '$DR_BUCKET' in region 'us-east-1', if it does not exist"
8 | aws s3 mb s3://$DR_BUCKET --region us-east-1
9 | BUCKET_REGION=$(aws s3api get-bucket-location --bucket $DR_BUCKET --output text)
10 | # Region us-east-1 returns "None" instead of the region name
11 | if [ "$BUCKET_REGION" = "None" ]; then
12 | BUCKET_REGION="us-east-1"
13 | fi
14 | echo "Bucket region: $BUCKET_REGION"
15 |
16 | scp root@master-1:/root/.kube/config /tmp/dest_kubeconfig
17 |
18 | if [ "$platform" = eks ] || [ "$platform" = ocp4 ]; then
19 | # expose local px api on svc type LB
20 | kubectl patch stc $(kubectl get stc -n portworx -o jsonpath='{.items[].metadata.name}') -n portworx --type='json' -p '[{"op":"add","path":"/metadata/annotations/portworx.io~1service-type","value":"portworx-api:LoadBalancer"}]'
21 | # expose remote px api on svc type LB
22 | kubectl --kubeconfig /tmp/dest_kubeconfig patch stc $(kubectl --kubeconfig /tmp/dest_kubeconfig get stc -n portworx -o jsonpath='{.items[].metadata.name}') -n portworx --type='json' -p '[{"op":"add","path":"/metadata/annotations/portworx.io~1service-type","value":"portworx-api:LoadBalancer"}]'
23 |
24 | echo "waiting for local px api lb service creation"
25 | while ! kubectl wait -n portworx --for=jsonpath=.status.loadBalancer.ingress[].hostname services/portworx-api; do
26 | echo "waiting for local px api lb service creation"
27 | done
28 |
29 | echo "waiting for remote px api lb service creation"
30 | while ! kubectl --kubeconfig /tmp/dest_kubeconfig wait -n portworx --for=jsonpath=.status.loadBalancer.ingress[].hostname services/portworx-api; do
31 | echo "waiting for remote px api lb service creation"
32 | done
33 |
34 | LOCAL_EP=$(kubectl get svc portworx-api -n portworx -ojson | jq -r ".status.loadBalancer.ingress[0].hostname")
35 | REMOTE_EP=$(kubectl --kubeconfig /tmp/dest_kubeconfig get svc portworx-api -n portworx -ojson | jq -r ".status.loadBalancer.ingress[0].hostname")
36 |
37 | echo "waiting for local portworx api lb service to respond http 200 ELB $LOCAL_EP"
38 | while [ $(curl -s -o /dev/null -w "%{http_code}" $LOCAL_EP:9021/v1/identities/version) != "200" ]; do
39 | echo "waiting for local portworx api lb service to respond http 200 ELB $LOCAL_EP"
40 | sleep 5
41 | done
42 |
43 | echo "waiting for remote portworx api lb service to respond http 200. ELB $REMOTE_EP"
44 | while [ $(curl -s -o /dev/null -w "%{http_code}" $REMOTE_EP:9021/v1/identities/version) != "200" ]; do
45 | echo "waiting for remote portworx api lb service to respond http 200. ELB $REMOTE_EP"
46 | sleep 5
47 | done
48 | #elif [ "$platform" = rancher ]; then
49 | #else
50 | # host=$(kubectl get nodes -l 'node-role.kubernetes.io/worker in (worker,true)' -o json |jq -r '.items[0].status.addresses[] | select(.type=="InternalIP") | .address')
51 | fi
52 |
53 | #improvement: we may try to detect if DR licenses are enabled on both sides -> px api?
54 | kubectl pxc pxctl license list | grep PX-DR | grep -q yes && MODE=async-dr || MODE=migration
55 | echo Mode is $MODE
56 |
57 | if [ "$platform" = eks ]; then
58 | kubectl create secret generic --from-file=$HOME/.aws/credentials -n portworx aws-creds
59 | kubectl --kubeconfig /tmp/dest_kubeconfig create secret generic --from-file=$HOME/.aws/credentials -n portworx aws-creds
60 |
61 | kubectl patch stc $(kubectl get stc -n portworx -o jsonpath='{.items[].metadata.name}') -n portworx --type='json' -p '[{"op":"add","path":"/spec/stork/volumes","value":[{"mountPath":"/root/.aws","name":"aws-creds","secret":{"secretName":"aws-creds"}}]}]'
62 | kubectl patch stc $(kubectl --kubeconfig /tmp/dest_kubeconfig get stc -n portworx -o jsonpath='{.items[].metadata.name}') -n portworx --type='json' -p '[{"op":"add","path":"/spec/stork/volumes","value":[{"mountPath":"/root/.aws","name":"aws-creds","secret":{"secretName":"aws-creds"}}]}]'
63 | fi
64 |
65 | while : ; do
66 | storkctl create clusterpair remotecluster --namespace kube-system --dest-kube-file /tmp/dest_kubeconfig --src-kube-file /root/.kube/config --provider s3 --s3-endpoint s3.amazonaws.com --s3-access-key $AWS_ACCESS_KEY --s3-secret-key $AWS_SECRET_KEY --s3-region $BUCKET_REGION --bucket $DR_BUCKET --mode $MODE && break
67 | sleep 5
68 | done
69 |
--------------------------------------------------------------------------------
/scripts/clusterpair-metro:
--------------------------------------------------------------------------------
1 | # Configures a clusterpair from cluster 1 to this cluster
2 | while : ; do
3 | POD=$(kubectl get pods -n portworx -lname=portworx --field-selector=status.phase=Running | tail -1 | cut -f 1 -d " ")
4 | if [ "$security" = true ]; then
5 | ADMIN_TOKEN=$(kubectl -n portworx get secret px-admin-token -o json | jq -r '.data."auth-token"' | base64 -d)
6 | kubectl -n portworx exec -ti $POD -c portworx -- /opt/pwx/bin/pxctl context create admin --token=$ADMIN_TOKEN
7 | fi
8 | token=$(kubectl exec -n portworx -it $POD -- /opt/pwx/bin/pxctl cluster token show 2>/dev/null | cut -f 3 -d " ")
9 | echo $token | grep -Eq '\w{128}'
10 | [ $? -eq 0 ] && break
11 | sleep 5
12 | echo waiting for portworx
13 | done
14 | storkctl generate clusterpair -n kube-system remotecluster-$cluster | sed '/insert_storage_options_here/c\' >/var/tmp/cp.yaml
15 | while : ; do
16 | cat /var/tmp/cp.yaml | ssh -oConnectTimeout=1 -oStrictHostKeyChecking=no master-1 kubectl apply -f -
17 | [ $? -eq 0 ] && break
18 | sleep 5
19 | done
20 |
--------------------------------------------------------------------------------
/scripts/eks-multicloud-target:
--------------------------------------------------------------------------------
1 |
2 | kubectl create secret generic --from-file=/root/.aws/credentials -n portworx aws-creds
3 | kubectl patch stc/px-deploy-$cluster --type merge -n portworx -p '{"spec": {
4 | "stork": {
5 | "volumes": [
6 | {
7 | "mountPath": "/root/.aws",
8 | "name": "aws-creds",
9 | "readOnly": true,
10 | "secret": {
11 | "secretName": "aws-creds"
12 | }
13 | }
14 | ]
15 | }
16 | }}'
17 |
18 | kubectl annotate stc px-deploy-$cluster -n portworx portworx.io/service-type="LoadBalancer" --overwrite
19 |
20 | while : ; do
21 | token=$(kubectl exec -n portworx -it $(kubectl get pods -n portworx -lname=portworx --field-selector=status.phase=Running | tail -1 | cut -f 1 -d " ") -- /opt/pwx/bin/pxctl cluster token show 2>/dev/null | cut -f 3 -d " ")
22 | echo $token | grep -Eq '\w{128}'
23 | [ $? -eq 0 ] && break
24 | sleep 5
25 | echo waiting for portworx
26 | done
27 |
28 | UUID=$(kubectl get stc -n portworx -o jsonpath='{.items[].status.clusterUid}')
29 | S3_ACCESS_KEY=$(sed -n 's/aws_access_key_id[ =]*//p' /root/.aws/credentials 2>/dev/null | head -1)
30 | S3_SECRET_KEY=$(sed -n 's/aws_secret_access_key[ =]*//p' /root/.aws/credentials 2>/dev/null | head -1)
31 |
32 | S3_BUCKET_REGION=$(aws s3api get-bucket-location --bucket $DR_BUCKET --output text)
33 | # Region us-east-1 returns "None" instead of the region name
34 | if [ "$S3_BUCKET_REGION" = "None" ]; then
35 | BUCKET_REGION="us-east-1"
36 | fi
37 | echo "Bucket region: $S3_BUCKET_REGION"
38 |
39 |
40 | while : ;do
41 | host=$(kubectl get svc -n portworx portworx-service -o jsonpath='{.status.loadBalancer.ingress[].hostname}')
42 | [ "$host" ] && break
43 | sleep 1
44 | done
45 |
46 | mkdir /root/drscripts
47 |
48 | PX_POD=$(kubectl get pods -l name=portworx -n portworx -o jsonpath='{.items[0].metadata.name}')
49 |
50 | # run dummy "pxctl credentials list" to get driver ready
51 | CRED_CMD="pxctl credentials list"
52 | kubectl exec $PX_POD -n portworx -- /opt/pwx/bin/$CRED_CMD
53 |
54 | CRED_CMD="pxctl credentials create --provider s3 --s3-access-key $S3_ACCESS_KEY --s3-secret-key $S3_SECRET_KEY --s3-region $S3_BUCKET_REGION --s3-endpoint s3.$S3_BUCKET_REGION.amazonaws.com --s3-storage-class STANDARD --bucket $DR_BUCKET clusterPair_$UUID"
55 |
56 | PX_POD=$(kubectl get pods -l name=portworx -n portworx -o jsonpath='{.items[0].metadata.name}')
57 | kubectl exec $PX_POD -n portworx -- /opt/pwx/bin/$CRED_CMD
58 |
59 |
60 | storkctl generate clusterpair -n kube-system remotecluster | sed "/insert_storage_options_here/c\ ip: $host\n token: $token\n" >/root/drscripts/cp.yaml
61 |
62 | # create preparation script for source cluster
63 | cat <>/root/drscripts/prepare_migrate_dr_source.sh
64 |
65 | echo "[default]" > ./credentials
66 | echo "aws_access_key_id = $S3_ACCESS_KEY" >> ./credentials
67 | echo "aws_secret_access_key = $S3_SECRET_KEY" >> ./credentials
68 |
69 | kubectl pxc $CRED_CMD
70 | kubectl create secret generic --from-file=./credentials -n portworx aws-creds
71 | kubectl patch stc/px-deploy-1 --type merge -n portworx -p '{"spec": {
72 | "stork": {
73 | "volumes": [
74 | {
75 | "mountPath": "/root/.aws/",
76 | "name": "aws-creds",
77 | "readOnly": true,
78 | "secret": {
79 | "secretName": "aws-creds"
80 | }
81 | }
82 | ]
83 | }
84 | }}'
85 |
86 | kubectl apply -f ./cp.yaml
87 | EOF
88 |
89 | chmod +x /root/drscripts/prepare_migrate_dr_source.sh
90 |
91 |
92 | cat <> /etc/motd
93 | +================================================+
94 | Howto setup multi cloud Migrate/Async DR Source
95 | +================================================+
96 | - Copy content of /root/drscripts to source master and execute
97 | - on target system you could use /assets/app-migration.yml to setup migration
98 | +================================================+
99 | EOF
100 |
101 |
102 |
--------------------------------------------------------------------------------
/scripts/etcd:
--------------------------------------------------------------------------------
1 | # Run an etcd container
2 | mkdir /etcd
3 | # :latest tag is currently missing on quay, setting version
4 | docker run -d --restart unless-stopped -v /etcd:/etcd -p 2382:2382 \
5 | --name etcd quay.io/coreos/etcd:v3.5.21 \
6 | /usr/local/bin/etcd \
7 | -name etcd0 -data-dir /etcd \
8 | -auto-compaction-retention=3 -quota-backend-bytes=8589934592 \
9 | -advertise-client-urls http://$(hostname -i):2382 \
10 | -listen-client-urls http://0.0.0.0:2382
11 |
--------------------------------------------------------------------------------
/scripts/helm-backup:
--------------------------------------------------------------------------------
1 | NAMESPACE=central
2 | VERSION=2.8.4
3 |
4 | curl -O https://raw.githubusercontent.com/portworx/helm/master/stable/px-central-$VERSION.tgz
5 | helm install px-central px-central-$VERSION.tgz --namespace $NAMESPACE --create-namespace --version $VERSION --set persistentStorage.enabled=true,persistentStorage.storageClassName="px-csi-db",pxbackup.enabled=true,oidc.centralOIDC.updateAdminProfile=false,installCRDs=true
6 | #kubectl scale sts -n $NAMESPACE pxc-backup-mongodb --replicas 1
7 |
8 | until (kubectl get po -n $NAMESPACE -ljob-name=pxcentral-post-install-hook -o wide | awk '{print $1, $2, $3}' |grep "Completed"); do echo "Waiting for post install hook";sleep 3; done
9 | until (kubectl get po -n $NAMESPACE -lapp=px-backup -o wide | awk '{print $1, $2, $3}' | grep "Running" | grep "1/1"); do echo "Waiting for backup service";sleep 3; done
10 |
11 | # sometimes mongodb pods do not start. apply workaround if detected
12 | echo "checking for statefulset pxc-backup-mongodb readiness"
13 | while ! kubectl wait --for=jsonpath='{.status.readyReplicas}'=3 sts/pxc-backup-mongodb -n central --timeout 180s; do
14 | echo "statefulset mongodb not ready"
15 | POD=$(kubectl get pods -n central -l app.kubernetes.io/component=pxc-backup-mongodb -ojson | jq -r '.items[] | select(.status.containerStatuses[].ready==false) | .metadata.name' | head -n1)
16 | echo "deleting data dir in failed pod $POD"
17 | kubectl exec $POD -n central -- rm -rf /bitnami/mongodb/data/db
18 | echo "waiting for $POD to restart"
19 | done
20 |
21 | # enable pxmonitor & grafana (needs a running px-backup-ui IP/Port)
22 | pubIP=$(curl http://169.254.169.254/latest/meta-data/public-ipv4)
23 | backupPort=$(kubectl get svc px-backup-ui -n $NAMESPACE -o=jsonpath='{.spec.ports[?(@.port==80)].nodePort}')
24 | kubectl delete job pxcentral-post-install-hook --namespace $NAMESPACE
25 | helm upgrade px-central px-central-$VERSION.tgz --namespace $NAMESPACE --version $VERSION --reuse-values --set pxmonitor.enabled=true --set pxmonitor.pxCentralEndpoint=$pubIP:$backupPort
26 | #kubectl scale sts -n $NAMESPACE pxc-backup-mongodb --replicas 1
27 | #kubectl scale sts -n $NAMESPACE pxcentral-cortex-cassandra --replicas 1
28 | until (kubectl get po -n $NAMESPACE -ljob-name=pxcentral-post-install-hook -o wide | awk '{print $1, $2, $3}' |grep "Completed"); do echo "Waiting for post install hook";sleep 3; done
29 |
30 | BACKUP_POD_NAME=$(kubectl get pods -n $NAMESPACE -l app=px-backup -o jsonpath='{.items[0].metadata.name}' 2>/dev/null)
31 | kubectl cp -n $NAMESPACE $BACKUP_POD_NAME:pxbackupctl/linux/pxbackupctl /usr/bin/pxbackupctl
32 | chmod +x /usr/bin/pxbackupctl
33 |
--------------------------------------------------------------------------------
/scripts/helm-backup-apps:
--------------------------------------------------------------------------------
1 | BACKUP_POD_IP=$(kubectl get pods -n central -l app=px-backup -o jsonpath='{.items[*].status.podIP}' 2>/dev/null)
2 | AWS_ACCESS_KEY=$(sed -n 's/aws_access_key_id[ =]*//p' /root/.aws/credentials 2>/dev/null)
3 | AWS_SECRET_KEY=$(sed -n 's/aws_secret_access_key[ =]*//p' /root/.aws/credentials 2>/dev/null)
4 | pubIP=$(curl http://169.254.169.254/latest/meta-data/public-ipv4)
5 | backupPort=$(kubectl get svc px-backup-ui -n central -o=jsonpath='{.spec.ports[?(@.port==80)].nodePort}')
6 | client_secret=$(kubectl get secret --namespace central pxc-backup-secret -o jsonpath={.data.OIDC_CLIENT_SECRET} | base64 --decode)
7 |
8 | # Configures backup with clusters and locations
9 | pxbackupctl login -s http://$pubIP:$backupPort -u admin -p admin
10 | pxbackupctl create cloudcredential --aws-access-key $AWS_ACCESS_KEY --aws-secret-key $AWS_SECRET_KEY -e $BACKUP_POD_IP:10002 --orgID default -n s3 -p aws
11 | sleep 5
12 | cloud_credential_uid=$(pxbackupctl get cloudcredential -e $BACKUP_POD_IP:10002 --orgID default -o json | jq -cr '.[0].metadata.uid')
13 | pxbackupctl create backuplocation --cloud-credential-name s3 --cloud-credential-Uid $cloud_credential_uid -n aws -p s3 --s3-endpoint https://s3.$aws_region.amazonaws.com --path $BACKUP_BUCKET --s3-region $aws_region -e $BACKUP_POD_IP:10002 --orgID default
14 | pxbackupctl create schedulepolicy --interval-minutes 15 --interval-retain 12 --name 15min-schedule -e $BACKUP_POD_IP:10002 --orgID default
15 | sleep 5
16 | ssh master-2 cat /root/.kube/config > /cluster-2-kube-config
17 | ssh master-3 cat /root/.kube/config > /cluster-3-kube-config
18 | pxbackupctl create cluster --name cluster-1 -k /root/.kube/.config -e $BACKUP_POD_IP:10002 --orgID default
19 | pxbackupctl create cluster --name cluster-2 -k /cluster-2-kube-config -e $BACKUP_POD_IP:10002 --orgID default
20 | pxbackupctl create cluster --name cluster-3 -k /cluster-3-kube-config -e $BACKUP_POD_IP:10002 --orgID default
21 |
22 | # Patches Prometheus operator to allow multiple instances to run
23 | kubectl patch deployment prometheus-operator -n kube-system --type=json -p='[{"op": "add", "path": "/spec/template/spec/containers/0/args/-", "value": "-namespaces=kube-system" }]'
24 | ssh master-2 <> /etc/motd
32 | +================================================+
33 | SAVE THE FOLLOWING DETAILS FOR FUTURE REFERENCES
34 | +================================================+
35 | PX-Central User Interface Access URL : http://$pubIP:$backupPort
36 | PX-Central admin user name: admin
37 | PX-Central admin user password: admin
38 | +================================================+
39 | EOF
40 |
41 |
--------------------------------------------------------------------------------
/scripts/helm-backup-ocp4-kubevirt:
--------------------------------------------------------------------------------
1 | # create ocp route for backup UI
2 | cat </dev/null
35 | #res=$?
36 | #while [ "$res" != "23" ]; do
37 | # echo "Waiting for grpc to accept connections. Ret: $res "
38 | # sleep 2
39 | # curl --connect-timeout 2 $BACKUP_POD_IP:10002 2>/dev/null
40 | # res=$?
41 | #done
42 | sleep 20
43 | # TODO: find a reliable way to detect if grpc is responding
44 |
45 | # get external px-backup route hostname
46 | pubIP=$(kubectl get route px-backup-ui -n central -o json |jq -r ".status.ingress[0].host")
47 | AWS_ACCESS_KEY=$(sed -n 's/aws_access_key_id[ =]*//p' /root/.aws/credentials 2>/dev/null)
48 | AWS_SECRET_KEY=$(sed -n 's/aws_secret_access_key[ =]*//p' /root/.aws/credentials 2>/dev/null)
49 | backupPort=80
50 | client_secret=$(kubectl get secret --namespace central pxc-backup-secret -o jsonpath={.data.OIDC_CLIENT_SECRET} | base64 --decode)
51 |
52 | # Configures backup with clusters and locations
53 | pxbackupctl login -s http://$pubIP:$backupPort -u admin -p admin
54 | pxbackupctl version -e $BACKUP_POD_IP:10002
55 | pxbackupctl create cloudcredential --aws-access-key $AWS_ACCESS_KEY --aws-secret-key $AWS_SECRET_KEY -e $BACKUP_POD_IP:10002 --orgID default -n s3 -p aws
56 | sleep 5
57 | cloud_credential_uid=$(pxbackupctl get cloudcredential -e $BACKUP_POD_IP:10002 --orgID default -o json | jq -cr '.[0].metadata.uid')
58 | pxbackupctl create backuplocation --cloud-credential-name s3 --cloud-credential-Uid $cloud_credential_uid -n $BACKUP_BUCKET -p s3 --s3-endpoint https://s3.$aws_region.amazonaws.com --path $BACKUP_BUCKET --s3-region $aws_region -e $BACKUP_POD_IP:10002 --orgID default
59 | pxbackupctl create schedulepolicy --interval-minutes 15 --interval-retain 12 --name 15min-schedule -e $BACKUP_POD_IP:10002 --orgID default
60 | sleep 5
61 |
62 | pxbackupctl create cluster --name cluster-1 -k /root/.kube/config -e $BACKUP_POD_IP:10002 --orgID default
63 |
64 | cat <> /etc/motd
65 | +================================================+
66 | How to access PX-BACKUP UI
67 | +================================================+
68 | PX-Central User Interface Access URL : http://$pubIP:$backupPort
69 | PX-Central admin user name: admin
70 | PX-Central admin user password: admin
71 | +================================================+
72 | EOF
--------------------------------------------------------------------------------
/scripts/install-awscli:
--------------------------------------------------------------------------------
1 | # install awscli2
2 | curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "/tmp/awscliv2.zip"
3 | unzip -o /tmp/awscliv2.zip -d /tmp >/dev/null
4 | /tmp/aws/install
--------------------------------------------------------------------------------
/scripts/install-ceph:
--------------------------------------------------------------------------------
1 | helm repo add rook-release https://charts.rook.io/release
2 | helm install --create-namespace --namespace rook-ceph rook-ceph rook-release/rook-ceph
3 | kubectl wait --for=condition=ready pod -l app=rook-ceph-operator -n rook-ceph --timeout 5m
4 | kubectl label ns rook-ceph pod-security.kubernetes.io/enforce=privileged
5 | helm upgrade --install --create-namespace --namespace rook-ceph rook-ceph-cluster --set operatorNamespace=rook-ceph rook-release/rook-ceph-cluster -f /assets/ceph/values-override.yaml
6 | (
7 | set -x; cd "$(mktemp -d)" &&
8 | OS="$(uname | tr '[:upper:]' '[:lower:]')" &&
9 | ARCH="$(uname -m | sed -e 's/x86_64/amd64/' -e 's/\(arm\)\(64\)\?.*/\1\2/' -e 's/aarch64$/arm64/')" &&
10 | KREW="krew-${OS}_${ARCH}" &&
11 | curl -fsSLO "https://github.com/kubernetes-sigs/krew/releases/latest/download/${KREW}.tar.gz" &&
12 | tar zxvf "${KREW}.tar.gz" &&
13 | ./"${KREW}" install krew
14 | )
15 | echo 'export PATH=$PATH:$HOME/.krew/bin' >>$HOME/.bash_profile
16 | source $HOME/.bash_profile
17 | kubectl krew install rook-ceph
18 | kubectl wait --for=jsonpath='{.status.phase}'=Ready cephclusters/rook-ceph -n rook-ceph --timeout 30m
19 |
--------------------------------------------------------------------------------
/scripts/kubevirt:
--------------------------------------------------------------------------------
1 | export KUBEVIRTVERSION="v1.1.1"
2 | export CDIVERSION="v1.58.1"
3 |
4 | #export KUBEVIRTVERSION=$(curl -s https://api.github.com/repos/kubevirt/kubevirt/releases | grep tag_name | grep -v -- '-rc' | sort -r | head -1 | awk -F': ' '{print $2}' | sed 's/,//' | xargs)
5 | kubectl apply -f /assets/kubevirt/px-rwx-kubevirt.yml
6 |
7 | echo "downloading kubevirt $KUBEVIRTVERSION"
8 | kubectl apply -f https://github.com/kubevirt/kubevirt/releases/download/${KUBEVIRTVERSION}/kubevirt-operator.yaml
9 |
10 | echo "waiting for kubevirt operator readiness"
11 | while ! kubectl wait --for=condition=ready pod --timeout=300s -lname=virt-operator -n kubevirt; do
12 | sleep 2
13 | done
14 |
15 | kubectl apply -f https://github.com/kubevirt/kubevirt/releases/download/${KUBEVIRTVERSION}/kubevirt-cr.yaml
16 |
17 | echo "waiting for kubevirt CRD readiness"
18 | while ! kubectl wait kubevirt.kubevirt.io/kubevirt -n kubevirt --timeout=300s --for=jsonpath='{.status.phase}'='Deployed'; do
19 | sleep 2
20 | done
21 |
22 | # check node-1-1 if emulation (slow!!!) needed
23 | echo "checking for virtualizazion capabilities on node-1-1 cpu"
24 | ssh node-1-1 egrep '^flags.*\(vmx\|svm\)' /proc/cpuinfo >/dev/null
25 | if [ $? = 1 ]; then
26 | echo "No virtualizazion capabilities found on node-1-1 cpu"
27 | echo "enabling software emulation (slow)"
28 | kubectl -n kubevirt patch kubevirts kubevirt --type=merge --patch '{"spec":{"configuration":{"developerConfiguration":{"useEmulation":true}}}}'
29 | fi
30 |
31 | echo "installing virtctl"
32 | ARCH=$(uname -s | tr A-Z a-z)-$(uname -m | sed 's/x86_64/amd64/') || windows-amd64.exe
33 | echo ${ARCH}
34 | curl -L -o /tmp/virtctl https://github.com/kubevirt/kubevirt/releases/download/${KUBEVIRTVERSION}/virtctl-${KUBEVIRTVERSION}-${ARCH}
35 | chmod +x /tmp/virtctl
36 | sudo install /tmp/virtctl /usr/local/bin
37 | ln -s /usr/local/bin/virtctl /usr/local/bin/kubectl-virt
38 |
39 | kubectl apply -f https://github.com/kubevirt/containerized-data-importer/releases/download/${CDIVERSION}/cdi-operator.yaml
40 | kubectl apply -f https://github.com/kubevirt/containerized-data-importer/releases/download/${CDIVERSION}/cdi-cr.yaml
41 |
42 | echo "waiting for cdi operator readiness"
43 | while ! kubectl wait cdis.cdi.kubevirt.io cdi -n cdi --for=jsonpath='{.status.phase}'='Deployed'; do
44 | sleep 2
45 | done
46 |
47 |
48 | #kubectl apply -f /assets/kubevirt/px-virt-sc.yml
49 |
50 | #while ! kubectl patch storageprofile --type merge px-virtualization -p '{ "spec": { "claimPropertySets": [ { "accessModes": [ "ReadWriteMany" ], "volumeMode": "Filesystem" } ], "cloneStrategy": "csi-clone" } }'; do
51 | # echo waiting for storageprofile
52 | # sleep 2
53 | #done
54 |
--------------------------------------------------------------------------------
/scripts/kubevirt-apps:
--------------------------------------------------------------------------------
1 | kubectl apply -f /assets/kubevirt/pxbbq-ns.yml
2 | kubectl apply -f /assets/kubevirt/dv-ubuntu.yml
3 | kubectl apply -f /assets/kubevirt/pxbbq-ubuntu.yml
4 |
5 | if [ "$platform" = ocp4 ]; then
6 | kubectl apply -f /assets/kubevirt/ocp/pxbbq-route.yml
7 | else
8 | kubectl patch svc pxbbq-svc -n pxbbq --type='json' -p '[{"op":"replace","path":"/spec/type","value":"NodePort"}]'
9 | pubIP=$(curl http://169.254.169.254/latest/meta-data/public-ipv4)
10 | appPort=$(kubectl get svc pxbbq-svc -n pxbbq -o=jsonpath='{.spec.ports[?(@.port==80)].nodePort}')
11 | echo "Access PXBBQ on http://$pubIP:$appPort" >> /etc/motd
12 | fi
13 |
14 |
15 |
--------------------------------------------------------------------------------
/scripts/licenses:
--------------------------------------------------------------------------------
1 | if [ -z "$licenses" ]; then
2 | echo '$licenses is empty'
3 | exit
4 | fi
5 |
6 | for i in $licenses; do
7 | echo $i | grep -Eq '^[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}$'
8 | if [ $? -ne 0 ]; then
9 | echo "Licence '$i' is invalid"
10 | exit
11 | fi
12 | done
13 |
14 | for i in $licenses; do
15 | if [ "$security" = true ]; then
16 | ADMIN_TOKEN=$(kubectl -n portworx get secret px-admin-token -o json | jq -r '.data."auth-token"' | base64 -d)
17 | fi
18 | while ! kubectl exec -n portworx -c portworx -it $(kubectl get pods -n portworx -lname=portworx --field-selector=status.phase=Running | tail -1 | cut -f 1 -d " ") -- bash < /tmp/metro
--------------------------------------------------------------------------------
/scripts/ocp-kubevirt:
--------------------------------------------------------------------------------
1 | OPVERSION=$(kubectl get packagemanifests.packages.operators.coreos.com kubevirt-hyperconverged -o json | jq -r '.status.channels[] | select(.name=="stable") | .currentCSV')
2 |
3 | kubectl patch storageclass gp3-csi -p '{"metadata": {"annotations": {"storageclass.kubernetes.io/is-default-class": "false"}}}'
4 | kubectl apply -f /assets/kubevirt/px-rwx-kubevirt.yml
5 |
6 | echo "current kubevirt-hyperconverged operator version is $OPVERSION"
7 |
8 | cat <&/tmp/output
8 | echo Disk test:
9 | docker run --rm -v /mnt:/mnt antonipx/fio --blocksize=64k --filename=/mnt/fio.dat --ioengine=libaio --readwrite=write --size=10G --name=test --direct=1 --iodepth=128 --end_fsync=1
10 | umount /mnt
11 | mount /dev/pxd/pxd[0-9]* /mnt
12 | echo Portworx test:
13 | docker run --rm -v /mnt:/mnt antonipx/fio --blocksize=64k --filename=/mnt/fio.dat --ioengine=libaio --readwrite=write --size=10G --name=test --direct=1 --iodepth=128 --end_fsync=1
14 | EOF
15 |
--------------------------------------------------------------------------------
/scripts/px-wait:
--------------------------------------------------------------------------------
1 | # Wait for Portworx to be running on every node in the cluster
2 | while : ; do
3 | n=$(kubectl exec -n portworx -it $(kubectl get pods -n portworx -lname=portworx --field-selector=status.phase=Running | tail -1 | cut -f 1 -d " ") -- /opt/pwx/bin/pxctl status 2>/dev/null | grep "Yes.*Online.*Up" | wc -l)
4 | [ $n -eq $nodes ] && break
5 | sleep 1
6 | done
7 | kubectl rollout status deployment stork -n portworx
8 |
--------------------------------------------------------------------------------
/scripts/show-ip:
--------------------------------------------------------------------------------
1 | # post_script
2 | # Output external IP on master-1
3 | [ $cluster -eq 1 ] && echo SUCCESS - IP is $(curl -s https://ipinfo.io/ip)
4 | [ $platform = ocp4 -a -s /root/ocp4/auth/kubeadmin-password ] && echo OCP4 console on https://$(oc get routes console -n openshift-console -o json | jq -r .status.ingress[0].host), username kubeadmin, password $(cat /root/ocp4/auth/kubeadmin-password) || true
5 |
--------------------------------------------------------------------------------
/scripts/sock-shop:
--------------------------------------------------------------------------------
1 | # Install sock-shop
2 | kubectl apply -f /assets/sock-shop/sock-shop.yaml
3 |
--------------------------------------------------------------------------------
/scripts/training:
--------------------------------------------------------------------------------
1 | if [ $cluster -eq 1 ]; then
2 | dnf install -y vim-enhanced nano
3 | dnf install -y https://dl.fedoraproject.org/pub/epel/7/x86_64/Packages/s/shellinabox-2.20-5.el7.x86_64.rpm
4 | rm -f /etc/securetty
5 | sed -i s/4200/443/ /etc/sysconfig/shellinaboxd
6 | systemctl enable shellinaboxd
7 | systemctl restart shellinaboxd sshd
8 | sh /assets/training/Disaster_Recovery/etcd.sh
9 | mkdir /etc/skel/.kube
10 | mv /assets/training /etc/skel
11 | cat <>/etc/skel/.bashrc
12 | alias k=kubectl
13 | complete -F __start_kubectl k
14 | PS1='\e[0;33m[\u@px-training \W]\$ \e[m'
15 | EOF
16 | for i in $(seq 1 $clusters); do
17 | useradd training$i
18 | passwd --stdin training$i <<.kube/config' && exit 22"
26 | [ $? -eq 22 ] && break
27 | sleep 2
28 | done
29 |
30 | for i in $(seq 1 $nodes); do
31 | while :; do
32 | ssh node-$cluster-$i "useradd training$cluster && passwd --stdin training$cluster <<"
16 |
--------------------------------------------------------------------------------
/templates/backup-restore.yml:
--------------------------------------------------------------------------------
1 | description: Deploys a Kubernetes cluster, Minio S3 storage, Petclinic Application and Backup/Restore config
2 | clusters: 1
3 | scripts: ["install-px", "backup-restore"]
4 |
--------------------------------------------------------------------------------
/templates/ceph.yml:
--------------------------------------------------------------------------------
1 | description: Install and run Ceph on each cluster
2 | scripts: ["install-ceph"]
3 | aws_type: "t3.xlarge"
4 |
--------------------------------------------------------------------------------
/templates/dude.yml:
--------------------------------------------------------------------------------
1 | description: Deploys clusters with Portworx, sets up and configures a cluster pairing from odd to even clusters, configures an async DR schedule, installs PX-Backup on the last cluster
2 | scripts: ["install-awscli", "install-px", "licenses", "dude"]
3 |
--------------------------------------------------------------------------------
/templates/eks-multicloud-target.yml:
--------------------------------------------------------------------------------
1 | # do not edit this file as it gets overwritten during updates
2 | # if you need to change settings create your own copy
3 | #
4 | # Requires:
5 | # working settings for Cloud AWS
6 | # env variable DR_BUCKET in defaults.yml pointing to existing AWS S3 Bucket
7 | #
8 | # creates directory /root/drscripts containing clusterpair spec and setup script for source cluster
9 | # when 'px-deploy status' is ready copy master node directory /root/drscripts to source px cluster and execute setup script
10 | # clusterpair should then be ready on source cluster
11 | #
12 | # Maintainer: Daniel Paul
13 | #
14 | description: setup EKS cluster with PX as clusterpair migration target
15 | scripts: ["install-px", "eks-multicloud-target"]
16 | cloud: aws
17 | platform: eks
18 | env:
19 | cloud_drive: "type%3Dgp2%2Csize%3D150"
20 |
--------------------------------------------------------------------------------
/templates/kubevirt.yml:
--------------------------------------------------------------------------------
1 | description: Install kubevirt on a bare-metal cluster
2 | scripts: ["install-px","kubevirt","kubevirt-apps-freebsd"]
3 | aws_type: "c5n.metal"
--------------------------------------------------------------------------------
/templates/metro.yml:
--------------------------------------------------------------------------------
1 | description: Deploys 2 K8s clusters in AWS with a stretched Portworx cluster. It configures Metro, a GUI and Petclinic, ready for a manual failover demo
2 | cloud: aws
3 | clusters: 2
4 | nodes: 3
5 | scripts: ["install-awscli"]
6 | env:
7 | #licenses: "XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX"
8 | px_suffix: "c=metro-cluster&b=false"
9 | etcd: "EXTERNAL"
10 | cluster:
11 | - id: 1
12 | scripts: ["etcd", "metro-pre", "install-px", "metro-post", "aws-elb", "licenses", "petclinic"]
13 | - id: 2
14 | scripts: ["metro-pre", "install-px", "metro-post", "clusterpair-metro"]
15 |
16 | # NOTE: edit env.licenses above, or define in defaults.yml
17 | # Verify all is working with `storkctl get clusterdomainsstatus` (take around 7 minutes)
18 | # Stop the nodes on cluster 1
19 | # Mark cluster 1 as down with `storkctl deactivate clusterdomain cluster-1` on master-2
20 | # Scale up on cluster 2 with `storkctl activate migration` on master-2
21 |
--------------------------------------------------------------------------------
/templates/migration.yml:
--------------------------------------------------------------------------------
1 | # DO NOT EDIT THIS FILE. CHANGES WILL BE OVERIDDEN
2 | # mandatory env settings are shown below (commented out)
3 | # these env settings need to be set in your .px-deploy/defaults.yml
4 | description: Deploys 2 clusters with Portworx, sets up and configures a cluster pairing, and deploys a set of apps and a migration template.
5 | clusters: 2
6 | cloud: aws
7 | scripts: ["install-awscli", "install-px", "px-wait"]
8 | cluster:
9 | - id: 1
10 | scripts: []
11 | - id: 2
12 | scripts: ["clusterpair"]
13 | env:
14 | #DR_BUCKET: ""
15 |
16 | # NOTE - the cluster pair and migration template are created in the kube-system namespace
17 | # Verify all is installed correctly with `storkctl get clusterpair -n kube-system` You should see Ready for storage and scheduler
18 | # Deploy your choice of application(s) from the /assets directory - create mutliple namespaces if you wish
19 | # Add some sample data either using a CLI or web interface
20 | # Edit the migration template to add the namespaces you created, if you did so
21 | # Apply the migration template `/assets/app-migration.yml`
22 | # Verify resources including the data you added are cloned to the second cluster
23 |
--------------------------------------------------------------------------------
/templates/ocp-kubevirt.yml:
--------------------------------------------------------------------------------
1 | # DO NOT EDIT THIS FILE. CHANGES WILL BE OVERIDDEN
2 | # mandatory env settings are shown below (commented out)
3 | # these env settings need to be set in your .px-deploy/defaults.yml
4 | description: Two Cluster OCP Virtualization Demo with Backup & AsyncDR on a aws bare-metal
5 | scripts: ["install-awscli","install-px", "licenses", "ocp-kubevirt"]
6 | aws_type: "c5n.metal"
7 | platform: "ocp4"
8 | cloud: "aws"
9 | clusters: 2
10 | cluster:
11 | - id: 1
12 | scripts: [ "kubevirt-apps", "helm-backup", "helm-backup-ocp4-kubevirt"]
13 | - id: 2
14 | scripts: ["clusterpair"]
15 | env:
16 | cloud_drive: "type%3Dgp2%2Csize%3D150"
17 | #licenses: "XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX-XXXX"
18 | #DR_BUCKET: ""
19 |
20 |
--------------------------------------------------------------------------------
/templates/pds-petclinic.yml:
--------------------------------------------------------------------------------
1 | # do not edit this file as it gets overwritten during updates
2 | # if you need to change settings create your own copy
3 | description: Deploy single k8s/px cluster, register at PDS controlplane, deploy Postgres using API, install petclinic demo app
4 | clusters: 1
5 | scripts: ["install-px", "pds-petclinic"]
6 | run_predelete: true
7 |
8 | env:
9 | # PDS_TOKEN: "[your pds user api token]" -> set this as env variable in your defaults.yml
10 | PDS_ACCOUNT: "Sales"
11 | PDS_TENANT: "Default"
12 | PDS_PROJECT: "Default"
13 | PDS_ENDPOINT: "https://prod.pds.portworx.com/api"
14 | PDS_NAMESPACE: "pds-petclinic"
15 | PDS_DATASERVICE: "pg"
16 | PDS_DATASERVICE_VERSION: "13.13"
17 | PDS_APP_CONFIG_TEMPLATE: "Default"
18 | PDS_APP_RESOURCE_TEMPLATE: "Tiny"
19 | PDS_APP_STORAGE_TEMPLATE: "Volume%20replication%20(best-effort%20spread)"
20 |
--------------------------------------------------------------------------------
/templates/px-backup.yml:
--------------------------------------------------------------------------------
1 | description: A single Kubernetes cluster with Portworx and PX-Backup via Helm installed
2 | clusters: 3
3 | cluster:
4 | - id: 1
5 | scripts: ["install-px", "helm-backup", "helm-backup-apps"]
6 | nodes: 4
7 | - id: 2
8 | scripts: ["install-px", "sock-shop", "petclinic"]
9 | - id: 3
10 | scripts: ["install-px"]
11 |
12 | # Brings up px-backup stand-alone via helm on a single
13 | # px cluster where you can login and connect the cluster.
14 | # Login details are printed when you connect.
15 |
16 | env:
17 | #BACKUP_BUCKET: ""
18 |
19 | # IMPORTANT: set the BACKUP_BUCKET variable to match the name
20 | # of a bucket that already exists in the region you're deploying into
21 |
--------------------------------------------------------------------------------
/templates/px-fio-example.yml:
--------------------------------------------------------------------------------
1 | description: An example fio benchmark on a gp2 disk and a Portworx volume on a gp2 disk
2 | scripts: ["install-px", "px-wait", "px-fio-example"]
3 | clusters: 1
4 | nodes: 3
5 | cloud: aws
6 | aws_ebs: "gp2:150 gp2:150"
7 | post_script: cat
8 | auto_destroy: true
9 | env:
10 | px_suffix: "s=/dev/nvme1n1"
11 | cat: "/tmp/output"
12 |
--------------------------------------------------------------------------------
/templates/px.yml:
--------------------------------------------------------------------------------
1 | description: Install and run Portworx on each cluster
2 | scripts: ["install-px"]
3 |
--------------------------------------------------------------------------------
/templates/training.yml:
--------------------------------------------------------------------------------
1 | description: Deploys training clusters
2 | scripts: ["training"]
3 |
--------------------------------------------------------------------------------
/terraform/aws/aws-returns.tpl:
--------------------------------------------------------------------------------
1 |
2 | aws__vpc: ${tpl_vpc}
3 | aws__sg: ${tpl_sg}
4 | aws__gw: ${tpl_gw}
5 | aws__routetable: ${tpl_routetable}
6 | aws__ami: ${tpl_ami}
7 |
--------------------------------------------------------------------------------
/terraform/aws/cloud-init.tpl:
--------------------------------------------------------------------------------
1 | #cloud-config
2 |
3 | write_files:
4 | - encoding: b64
5 | content: ${tpl_priv_key}
6 | path: /tmp/id_rsa
7 | permissions: '0600'
8 | - content: |
9 | [default]
10 | aws_access_key_id = ${tpl_aws_access_key_id}
11 | aws_secret_access_key = ${tpl_aws_secret_access_key}
12 | path: /tmp/credentials
13 | permissions: '0600'
14 |
15 | runcmd:
16 | - while [ ! -f "/tmp/env.sh" ]; do sleep 5; done
17 | - sleep 5
18 | - source /tmp/env.sh
19 | - export aws__vpc="${tpl_vpc}"
20 | - export aws__sg="${tpl_sg}"
21 | - export aws__subnet="${tpl_subnet}"
22 | - export aws__gw="${tpl_gw}"
23 | - export aws__routetable="${tpl_routetable}"
24 | - export aws__ami="${tpl_ami}"
25 | - export cloud="aws"
26 | - export cluster="${tpl_cluster}"
27 | - export KUBECONFIG=/root/.kube/config
28 | - export HOME=/root
29 | - while [ ! -f "/tmp/${tpl_name}_scripts.sh" ]; do sleep 5; done
30 | - sleep 5
31 | - chmod +x /tmp/${tpl_name}_scripts.sh
32 | - /tmp/${tpl_name}_scripts.sh
33 |
--------------------------------------------------------------------------------
/terraform/aws/eks/eks_run_everywhere.tpl:
--------------------------------------------------------------------------------
1 | MIME-Version: 1.0
2 | Content-Type: multipart/mixed; boundary="==BOUNDARY=="
3 |
4 | --==BOUNDARY==
5 | Content-Type: text/cloud-config; charset="us-ascii"
6 |
7 | runcmd:
8 | - '${tpl_cmd}'
9 |
10 | --==BOUNDARY==--
11 |
12 |
--------------------------------------------------------------------------------
/terraform/aws/ocp4/ocp4-install-config.tpl:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | baseDomain: ${tpl_ocp4domain}
3 | compute:
4 | - hyperthreading: Enabled
5 | name: worker
6 | platform:
7 | aws:
8 | additionalSecurityGroupIDs:
9 | - ${tpl_ocp_sg}
10 | type: ${tpl_awstype}
11 | iamRole: ${tpl_aws_iamrole}
12 | replicas: ${tpl_nodes}
13 | controlPlane:
14 | hyperthreading: Enabled
15 | name: master
16 | platform: {}
17 | replicas: 3
18 | metadata:
19 | creationTimestamp: null
20 | name: pxd-${tpl_configname}-${tpl_cluster}
21 | networking:
22 | clusterNetwork:
23 | - cidr: 10.128.0.0/14
24 | hostPrefix: 23
25 | machineCIDR: ${tpl_cidr}
26 | networkType: OVNKubernetes
27 | serviceNetwork:
28 | - 172.30.0.0/16
29 | platform:
30 | aws:
31 | region: ${tpl_aws_region}
32 | userTags:
33 | %{ for tag_key, tag_value in tpl_aws_tag ~}
34 | ${tag_key} : ${tag_value}
35 | %{ endfor ~}
36 | subnets:
37 | - ${tpl_privsubnet}
38 | - ${tpl_pubsubnet}
39 | pullSecret: '${tpl_ocp4pullsecret}'
40 | sshKey: '${tpl_sshkey}'
41 |
42 |
--------------------------------------------------------------------------------
/terraform/aws/rancher/TODO.txt:
--------------------------------------------------------------------------------
1 | large scale test needed as clouddrive destroy function has been changed (now running after terrform destroy) (all platforms/clouds)
2 |
3 | OK use data source for rancher ami id
4 | -> test on other regions
5 |
6 | ? create option for rancher_domain or merge with ocp4_domain
7 |
8 | multiple clusters, handle exceptions for clusters (nodes,types...)
9 |
10 | ? route53 for workload clusters
11 | ? aws elb for l4
12 | ? no external IP for cluster nodes
13 |
14 | TODO:
15 | implement run_everywhere
16 | secure rancher cluster PW handling
17 | test AWS key change during runtime (new key on deletion)
18 |
19 | KNOWN ISSUES:
20 | cloud-init check sometimes shows errors
21 | creation of downstream clusters sometimes fails because amazonec2 node driver not yet cloudInitReady (unknow schema error)
22 |
23 | terraform destroy failing on helm releases because they're throwing error.
24 | current workaround: remove helm releases from terraform state
25 | terraform -chdir=/px-deploy/.px-deploy/tf-deployments/dpaul-rancher/ state rm helm_release.cert_manager
26 | terraform -chdir=/px-deploy/.px-deploy/tf-deployments/dpaul-rancher/ state rm helm_release.rancher_server
27 |
--------------------------------------------------------------------------------
/terraform/aws/rancher/rancher-variables.tf:
--------------------------------------------------------------------------------
1 | variable "rancher_k3s_version" {
2 | type = string
3 | description = "Kubernetes version to use for Rancher server cluster"
4 | }
5 |
6 | variable "rancher_helm_repository" {
7 | type = string
8 | description = "The helm repository, where the Rancher helm chart is installed from"
9 | default = "https://releases.rancher.com/server-charts/latest"
10 | }
11 |
12 | variable "rancher_nodes" {
13 | description = "number of worker nodes"
14 | type = number
15 | }
16 |
17 | variable "cert_manager_version" {
18 | type = string
19 | description = "Version of cert-manager to install alongside Rancher (format: 0.0.0)"
20 | default = "1.16.2"
21 | }
22 |
23 | variable "rancher_version" {
24 | type = string
25 | description = "Rancher server version (format v0.0.0)"
26 | }
27 |
28 | variable "rancher_domain" {
29 | type = string
30 | description = "delegated route53 domain for clusters"
31 | }
32 |
33 | variable "admin_password" {
34 | type = string
35 | description = "Admin password to use for Rancher server bootstrap, min. 12 characters"
36 | default = "Rancher1!Rancher1!"
37 | }
38 |
39 | variable "rancher_k8s_version" {
40 | type = string
41 | description = "rancher workload k8s version"
42 | }
43 |
44 | variable "rancherclusters" {
45 | description = "map of clusternumber & aws_type"
46 | type = map
47 | }
48 |
49 | # will be injected by TF_VAR_AWS_ACCESS_KEY_ID during runtime
50 | variable "AWS_ACCESS_KEY_ID" {
51 | type = string
52 | default = ""
53 | }
54 |
55 | # will be injected by TF_VAR_AWS_SECRET_ACCESS_KEY during runtime
56 | variable "AWS_SECRET_ACCESS_KEY" {
57 | type = string
58 | default = ""
59 | }
--------------------------------------------------------------------------------
/terraform/aws/variables.tf:
--------------------------------------------------------------------------------
1 | variable "name_prefix" {
2 | description = "prefix to apply to name of ressources"
3 | type = string
4 | default = "px-deploy"
5 | }
6 |
7 | variable "aws_tags" {
8 | description = "user-defined custom aws tags"
9 | type = map(string)
10 | }
11 |
12 | variable "config_name" {
13 | description = "px-deploy config name"
14 | type = string
15 | }
16 |
17 | variable "clusters" {
18 | description = "number of clusters to create"
19 | type = number
20 | }
21 |
22 | variable "nodeconfig" {
23 | description = "list / config of all ec2 instances"
24 | default = [{}]
25 | }
26 |
27 | variable "ip_base" {
28 | description = "default first to ip octets"
29 | default = "192.168"
30 | }
31 |
32 | variable "aws_cidr_vpc" {
33 | description ="CIDR block for VPC"
34 | type = string
35 | default = "192.168.0.0/16"
36 | }
37 |
38 | variable "aws_cidr_sn" {
39 | description ="CIDR block for Subnet"
40 | type = string
41 | default = "192.168.0.0/16"
42 | }
43 |
44 | variable "aws_region" {
45 | description ="AWS Region"
46 | type = string
47 | }
48 |
49 | variable "aws_access_key_id" {
50 | description ="AWS Access Key"
51 | type = string
52 | }
53 |
54 | variable "aws_secret_access_key" {
55 | description ="AWS Secret Access Key"
56 | type = string
57 | }
58 |
59 | data "local_file" "env_script" {
60 | filename = "${path.module}/env.sh"
61 | }
62 |
--------------------------------------------------------------------------------
/terraform/azure/aks/aks.tf:
--------------------------------------------------------------------------------
1 | variable "aks_nodes" {
2 | description = "number of worker nodes"
3 | type = number
4 | }
5 |
6 | variable "aks_version" {
7 | description ="AKS K8S Version"
8 | type = string
9 | }
10 |
11 | variable "run_everywhere" {
12 | description = "content of run_everywhere"
13 | type = string
14 | default = "echo \"no run_everywhere set\""
15 | }
16 |
17 | variable "aksclusters" {
18 | description = "map of clusternumber & machine_type"
19 | type = map
20 | }
21 |
22 | data "azurerm_kubernetes_service_versions" "current" {
23 | location = azurerm_resource_group.rg.location
24 | version_prefix = var.aks_version
25 | }
26 |
27 | resource "azurerm_kubernetes_cluster" "aks" {
28 | for_each = var.aksclusters
29 | name = format("%s-%s-%s",var.name_prefix,var.config_name, each.key)
30 | location = azurerm_resource_group.rg.location
31 | resource_group_name = azurerm_resource_group.rg.name
32 | dns_prefix = format("aks-%s",each.key)
33 | kubernetes_version = data.azurerm_kubernetes_service_versions.current.latest_version
34 |
35 | default_node_pool {
36 | name = "default"
37 | node_count = var.aks_nodes
38 | vm_size = each.value
39 | tags = var.azure_tags
40 | }
41 |
42 | identity {
43 | type = "SystemAssigned"
44 | }
45 |
46 | tags = var.azure_tags
47 | }
48 |
49 | //output "client_certificate" {
50 | // value = azurerm_kubernetes_cluster.example.kube_config.0.client_certificate
51 | // sensitive = true
52 | //}
53 |
54 | /*
55 | output "kube_config" {
56 | for_each = var.aksclusters
57 | value = azurerm_kubernetes_cluster.aks[each.key].kube_config_raw
58 | //sensitive = true
59 | }
60 | */
61 |
--------------------------------------------------------------------------------
/terraform/azure/cloud-init.tpl:
--------------------------------------------------------------------------------
1 | #cloud-config
2 |
3 | write_files:
4 | - encoding: b64
5 | content: ${tpl_priv_key}
6 | path: /tmp/id_rsa
7 | permissions: '0600'
8 |
9 | runcmd:
10 | - while [ ! -f "/tmp/env.sh" ]; do sleep 5; done
11 | - sleep 5
12 | - source /tmp/env.sh
13 | - export azure_client_id="${tpl_azure_client}"
14 | - export azure_client_secret="${tpl_azure_secret}"
15 | - export azure_tentant_id="${tpl_azure_tenant}"
16 | - export azure__group="${tpl_azure_group}"
17 | - export cloud="azure"
18 | - export cluster="${tpl_cluster}"
19 | - export KUBECONFIG=/root/.kube/config
20 | - export HOME=/root
21 | - while [ ! -f "/tmp/${tpl_name}_scripts.sh" ]; do sleep 5; done
22 | - sleep 5
23 | - chmod +x /tmp/${tpl_name}_scripts.sh
24 | - /tmp/${tpl_name}_scripts.sh
25 |
--------------------------------------------------------------------------------
/terraform/azure/variables.tf:
--------------------------------------------------------------------------------
1 | variable "azure_region" {
2 | description ="Azure Region"
3 | type = string
4 | }
5 |
6 | variable "azure_client_id" {
7 | description ="Azure Client ID"
8 | type = string
9 | }
10 |
11 | variable "azure_tenant_id" {
12 | description ="Azure Tenant ID"
13 | type = string
14 | }
15 |
16 | variable "azure_client_secret" {
17 | description ="Azure client Secret"
18 | type = string
19 | }
20 |
21 | variable "azure_subscription_id" {
22 | description ="Azure Subscription ID"
23 | type = string
24 | }
25 |
26 | variable "config_name" {
27 | description = "px-deploy config name"
28 | type = string
29 | }
30 |
31 | variable "name_prefix" {
32 | description = "prefix to apply to name of ressources"
33 | type = string
34 | default = "px-deploy"
35 | }
36 |
37 | variable "azure_cidr_vnet" {
38 | description ="CIDR block for vnet"
39 | type = string
40 | default = "192.168.0.0/16"
41 | }
42 |
43 | variable "clusters" {
44 | description = "number of clusters to create"
45 | type = number
46 | }
47 |
48 | variable "nodeconfig" {
49 | description = "list / config of all vm instances"
50 | default = [{}]
51 | }
52 |
53 | variable "ip_base" {
54 | description = "default first to ip octets"
55 | default = "192.168"
56 | }
57 |
58 | variable "azure_tags" {
59 | description = "user-defined custom azure tags"
60 | type = map(string)
61 | }
--------------------------------------------------------------------------------
/terraform/gcp/gcp-returns.tpl:
--------------------------------------------------------------------------------
1 |
2 | gcp__vpc: ${tpl_vpc}
3 |
4 |
--------------------------------------------------------------------------------
/terraform/gcp/gke/gke.tf:
--------------------------------------------------------------------------------
1 | variable "gkeclusters" {
2 | description = "map number/machine tye"
3 | type = map
4 | }
5 |
6 | variable "gke_version" {
7 | description = "GKE Version"
8 | type = string
9 | }
10 |
11 | variable "gke_nodes" {
12 | description = "GKE Nodes"
13 | type = number
14 | }
15 |
16 | data "google_container_engine_versions" "gkeversion" {
17 | location = format("%s-%s",var.gcp_region,var.gcp_zone)
18 | version_prefix = var.gke_version
19 | }
20 |
21 | resource "google_container_cluster" "gke" {
22 | for_each = var.gkeclusters
23 | // do not change naming scheme of cluster as this is referenced in destroy functions
24 | name = format("%s-%s-%s",var.name_prefix,var.config_name,each.key)
25 | location = format("%s-%s",var.gcp_region,var.gcp_zone)
26 | network = google_compute_network.vpc.id
27 | subnetwork = google_compute_subnetwork.subnet[each.key - 1].id
28 | initial_node_count = var.gke_nodes
29 | //node_version = data.google_container_engine_versions.gkeversion.release_channel_default_version["STABLE"]
30 | //min_master_version = data.google_container_engine_versions.gkeversion.release_channel_default_version["STABLE"]
31 | node_version = data.google_container_engine_versions.gkeversion.latest_node_version
32 | min_master_version = data.google_container_engine_versions.gkeversion.latest_master_version
33 | deletion_protection = false
34 |
35 | release_channel {
36 | channel = "UNSPECIFIED"
37 | }
38 |
39 | node_config {
40 | machine_type = each.value
41 | image_type = "UBUNTU_CONTAINERD"
42 | disk_type = "pd-standard"
43 | disk_size_gb = 50
44 | oauth_scopes = [ "compute-rw" ,"storage-ro"]
45 | }
46 |
47 | cluster_autoscaling {
48 | auto_provisioning_defaults {
49 | management {
50 | auto_upgrade = false
51 | }
52 | }
53 | }
54 | }
55 |
--------------------------------------------------------------------------------
/terraform/gcp/startup-script.tpl:
--------------------------------------------------------------------------------
1 | #! /bin/bash
2 | echo ${tpl_priv_key} | base64 -d > /tmp/id_rsa
3 | while [ ! -f "/tmp/env.sh" ]; do sleep 5; done
4 | sleep 5
5 | source /tmp/env.sh
6 | export cloud="gcp"
7 | export cluster="${tpl_cluster}"
8 | export KUBECONFIG=/root/.kube/config
9 | export HOME=/root
10 | while [ ! -f "/tmp/${tpl_name}_scripts.sh" ]; do sleep 5; done
11 | sleep 5
12 | chmod +x /tmp/${tpl_name}_scripts.sh
13 | /tmp/${tpl_name}_scripts.sh
14 |
--------------------------------------------------------------------------------
/terraform/gcp/variables.tf:
--------------------------------------------------------------------------------
1 | variable "gcp_region" {
2 | description ="GCP Region"
3 | type = string
4 | }
5 |
6 | variable "gcp_zone" {
7 | description ="GCP Zone"
8 | type = string
9 | }
10 |
11 | variable "gcp_project" {
12 | description ="GCP Project"
13 | type = string
14 | }
15 |
16 | variable "gcp_auth_json" {
17 | description ="GCP Authentication json"
18 | type = string
19 | default = "/px-deploy/.px-deploy/gcp.json"
20 | }
21 |
22 | variable "config_name" {
23 | description = "px-deploy config name"
24 | type = string
25 | }
26 |
27 | variable "name_prefix" {
28 | description = "prefix to apply to name of ressources"
29 | type = string
30 | default = "px-deploy"
31 | }
32 |
33 | variable "clusters" {
34 | description = "number of clusters to create"
35 | type = number
36 | }
37 |
38 | variable "nodeconfig" {
39 | description = "list / config of all gcp instances"
40 | default = [{}]
41 | }
42 |
43 | variable "ip_base" {
44 | description = "default first to ip octets"
45 | default = "192.168"
46 | }
47 |
48 | variable "aws_tags" {
49 | description = "user-defined custom aws tags"
50 | type = map(string)
51 | }
52 |
--------------------------------------------------------------------------------
/terraform/vsphere/cloud-init.tpl:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | users:
3 | - default
4 | - name: rocky
5 | primary_group: rocky
6 | sudo: ALL=(ALL) NOPASSWD:ALL
7 | groups: sudo, wheel
8 | ssh_import_id: None
9 | lock_passwd: true
10 | ssh_authorized_keys: ${tpl_pub_key}
11 |
12 | write_files:
13 | - encoding: b64
14 | content: ${tpl_priv_key}
15 | path: /tmp/id_rsa
16 | permissions: '0600'
17 |
18 | runcmd:
19 | - while [ ! -f "/tmp/env.sh" ]; do sleep 5; done
20 | - sleep 5
21 | - source /tmp/env.sh
22 | - export cloud="vsphere"
23 | - export cluster="${tpl_cluster}"
24 | - export KUBECONFIG=/root/.kube/config
25 | - export HOME=/root
26 | - while [ ! -f "/tmp/${tpl_name}_scripts.sh" ]; do sleep 5; done
27 | - sleep 5
28 | - chmod +x /tmp/${tpl_name}_scripts.sh
29 | - /tmp/${tpl_name}_scripts.sh
30 |
--------------------------------------------------------------------------------
/terraform/vsphere/metadata.tpl:
--------------------------------------------------------------------------------
1 | local-hostname: ${tpl_name}
2 | instance-id: ${tpl_name}
3 | network:
4 | version: 2
5 | ethernets:
6 | ens192:
7 | dhcp4: false
8 | addresses:
9 | - ${tpl_ip}
10 | gateway4: ${tpl_gw}
11 | nameservers:
12 | addresses:
13 | - ${tpl_dns}
--------------------------------------------------------------------------------
/terraform/vsphere/variables.tf:
--------------------------------------------------------------------------------
1 | variable "config_name" {
2 | description = "px-deploy config name"
3 | type = string
4 | }
5 |
6 | variable "name_prefix" {
7 | description = "prefix to apply to name of ressources"
8 | type = string
9 | default = "px-deploy"
10 | }
11 |
12 | variable "clusters" {
13 | description = "number of clusters to create"
14 | type = number
15 | }
16 |
17 | variable "nodeconfig" {
18 | description = "list / config of all gcp instances"
19 | default = [{}]
20 | }
21 |
22 | variable "vsphere_host" {
23 | description = "vCenter Server"
24 | type = string
25 | }
26 |
27 | variable "vsphere_compute_resource" {
28 | description = "vSphere Cluster"
29 | type = string
30 | }
31 |
32 | variable "vsphere_resource_pool" {
33 | description = "vCenter resource pool"
34 | type = string
35 | }
36 |
37 | variable "vsphere_datacenter" {
38 | description = "vCenter Datacenter"
39 | type = string
40 | }
41 |
42 | variable "vsphere_template" {
43 | description = "px-deploy template"
44 | type = string
45 | }
46 |
47 | variable "vsphere_folder" {
48 | description = "vCenter Folder"
49 | type = string
50 | }
51 |
52 | variable "vsphere_user" {
53 | description = "vCenter user"
54 | type = string
55 | }
56 |
57 | variable "vsphere_password" {
58 | description = "vCenter password"
59 | type = string
60 | }
61 |
62 | variable "vsphere_datastore" {
63 | description = "vCenter Datastore"
64 | type = string
65 | }
66 |
67 | variable "vsphere_network" {
68 | description = "vCenter Network"
69 | type = string
70 | }
71 |
72 | variable "vsphere_memory" {
73 | description = "vSphere Memory"
74 | type = string
75 | }
76 |
77 | variable "vsphere_cpu" {
78 | description = "vSphere CPU"
79 | type = string
80 | }
81 |
82 | variable "vsphere_ip" {
83 | description = "vSphere VM starting IP"
84 | type = string
85 | default = ""
86 | }
87 |
88 | variable "vsphere_gw" {
89 | description = "vSphere VM Gateway"
90 | type = string
91 | default = ""
92 | }
93 |
94 | variable "vsphere_dns" {
95 | description = "vSphere VM DNS"
96 | type = string
97 | default = ""
98 | }
--------------------------------------------------------------------------------