├── CHANGELOG.md ├── CONTRIBUTING.md ├── Dockerfile ├── LICENSE.md ├── README.md ├── build ├── deploy ├── deploy-flagger ├── destroy ├── destroy-canary ├── manifests ├── other │ ├── autoscale.yaml │ └── deployment.yaml └── web │ ├── deployment-web.yaml │ ├── ingress-web.yaml │ ├── production │ ├── autoscale.yaml │ └── pdb.yaml │ └── service-web.yaml ├── push └── src └── common.bash /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Version 7.1.0 2 | 3 | Docker Image: quay.io/lifechurch/k8s-deploy-helper:7.1.0 4 | 5 | ## Feature 6 | 7 | * Adds a deploy-flagger command for Flagger canary deployments 8 | 9 | ## Bugfixes 10 | 11 | * Update destroy command to include configmaps and certificates 12 | 13 | # Version 7.0.2 14 | 15 | Docker Image: quay.io/lifechurch/k8s-deploy-helper:7.0.2 16 | 17 | ## Bugfixes 18 | 19 | * Herokuish build process couldn't build successfully unless at least one environment variable was set. 20 | 21 | # Version 7.0.1 22 | 23 | Docker Image: quay.io/lifechurch/k8s-deploy-helper:7.0.1 24 | 25 | ## Updates 26 | 27 | * Updated helm to 2.14.1 28 | 29 | # Version 7.0.0 30 | 31 | Docker Image: quay.io/lifechurch/k8s-deploy-helper:7.0.0 32 | 33 | ## Backwards Incompatible Changes 34 | 35 | * Depracated use of ${DOLLAR} in manifests in favor of telling envsubst to only substitute environment variables that exist in the CI run. If you previously used ${DOLLAR}, you can just remove the {DOLLAR} portion and it should work fine. 36 | 37 | The following command (on macOS) should do the replacement for you: ```find kubernetes -type f -name '*.yaml' | xargs -I {} sed -i '' -e "s/\${DOLLAR}/$/g" "{}"``` 38 | 39 | ## Feature 40 | 41 | * Datadog notification text is more easily searchable. You can now overlay KDH deployment events by doing a string like "KDH Deployment: $NAMESPACE production", substituting $NAMESPACE for your namespace. 42 | 43 | # Version 6.0.3 44 | 45 | Docker Image: quay.io/lifechurch/k8s-deploy-helper:6.0.3 46 | 47 | ## Updates 48 | 49 | * Updated kubeval to 0.12.0 50 | 51 | ## Bugfixes 52 | 53 | * kubeval will skip missing schemas now 54 | 55 | # Version 6.0.0 56 | 57 | Docker Image: quay.io/lifechurch/k8s-deploy-helper:6.0.0 58 | 59 | ## Updates 60 | 61 | * Updated docker dind image to docker:18.06.3-ce-dind 62 | 63 | * Updated Helm to 2.13.1 64 | 65 | * Updated kubectl to 1.12.6 66 | 67 | * Updated kubeval to 0.8.1 68 | 69 | # Version 5.0.1 70 | 71 | Docker Image: quay.io/lifechurch/k8s-deploy-helper:5.0.1 72 | 73 | ## Bug Fixes 74 | 75 | * Fixed race condition in {{SECRETS}} substitution. 76 | 77 | * Added additional debugging to trap bash errors. 78 | 79 | # Version 5.0.0 80 | 81 | Docker Image: quay.io/lifechurch/k8s-deploy-helper:5.0.0 82 | 83 | ## Feature 84 | 85 | * DEPLOY: KDH will determine the Kubernetes version you are deploying against and run kubeval manifests specifically against that version. 86 | 87 | * DEPLOY: You can now set KDH_SKIP_KUBEVAL variable to true in order to skip kubeval runs. 88 | 89 | # Version 4.0.3 90 | 91 | Docker Image: quay.io/lifechurch/k8s-deploy-helper:4.0.3 92 | 93 | ## Feature 94 | 95 | * BUILD: Docker builds now pass the --pull flag in case you build from an image that pushes different content to the same tag. 96 | 97 | # Version 4.0.2 98 | 99 | Docker Image: quay.io/lifechurch/k8s-deploy-helper:4.0.2 100 | 101 | ## Bug Fixes 102 | 103 | * Version 4.0.1 introduced a bug where deployment rollouts were not checked correctly. This is fixed. 104 | 105 | ## New Features 106 | 107 | * New Relic deployment notifications now use the v2 API 108 | 109 | * Deployment notifications have more contextual information about what was deployed. 110 | 111 | # Version 4.0.1 - DO NOT USE 112 | 113 | Docker Image: quay.io/lifechurch/k8s-deploy-helper:4.0.1 114 | 115 | ## Bug Fixes 116 | 117 | * Canary stage was deploying manifests without ```track: stable``` label. This shouldn't happen. Fixed. 118 | 119 | * KDH no longer tries to create the namespace if it exists already. This was causing issues because GitLab is now supposed to be creating the namespace when you add the GitLab integration. 120 | 121 | # Version 4.0.0 122 | 123 | Docker Image: quay.io/lifechurch/k8s-deploy-helper:4.0.0 124 | 125 | ## Backwards Incompatible Changes 126 | 127 | * Starting in GitLab 11.5, GitLab decided to start managing service accounts for namespaces automatically. This broke k8s-deploy-helper because KUBE_TOKEN was no longer the token you specified in the cluster. Moving forward, k8s-deploy-helper is not going to generate a Kubernetes configuration itself, it will rely on GitLab to create it for us. 128 | 129 | ## New Features 130 | 131 | * Slack Deploy Events now support separate notifications per stage, allowing you to only send deploy events on specific stages if desired, or to different Slack channels for each stage. 132 | 133 | # Version 3.1.1 134 | 135 | Docker Image: quay.io/lifechurch/k8s-deploy-helper:3.1.1 136 | 137 | ## Bug Fixes 138 | 139 | * BUILDARGS_FROM now works with buildpack builds. 140 | 141 | * BUILDARGS_FROM is less chatty. 142 | 143 | 144 | # Version 3.1.0 145 | 146 | Docker Image: quay.io/lifechurch/k8s-deploy-helper:3.1.0 147 | 148 | **NOTE: Starting with this version, your GitLab runners need to run in privileged mode to allow for Heroku buildpack builds, and container scanning that will come in the next release.** 149 | 150 | ## New Features 151 | 152 | * BUILD - If no Dockerfile is present in the root directory, we will attempt to build an image using Heroku Buildpacks via herokuish. 153 | 154 | * DEPLOY - If no kubernetes folder is present in the root directory, we will attempt to use our own default manifests. This has Procfile support, allowing you to run workers as well. See documentation for more details. 155 | 156 | ## Bug Fixes 157 | 158 | * 3.0.1 introduced a template evaluation error, fixed. 159 | 160 | * Slack and Datadog will now announce canary deploys as canary, rather than production. 161 | 162 | # Version 3.0.1 - DO NOT USE 163 | 164 | ## Changes 165 | 166 | * Kubeval will not evaluate ```cloud.google.com``` schemas correctly, so we skip these now. 167 | 168 | # Version 3.0.0 169 | 170 | ## Backwards Incompatible Changes 171 | * kubectl - Due to a bug in kubectl, you'll need to delete the gitlab-registry secret in your namespace before you deploy. Make sure to do it right before you do a deploy to make it non-impactful. To do this, run ```kubectl delete secret gitlab-registry -n=yournamespace```. 172 | 173 | * Deploy Token Usage - Previous versions used an actual GitLab username because GitLab didn't have persistent deploy tokens until recently. Now that this feature is in GitLab, we're going to stop using the shared credentials as this is much more secure. Create a deploy token at Settings->Repository->Deploy Tokens and make one named gitlab-deploy-token with read_registry access. As long as it's named gitlab-deploy-token, that's all you should have to do. 174 | 175 | * Canary Usage - Delete your canary manifest templates. We will create them automatically now. Make sure track: stable is present as labels in deployments you want to go out in the canary stage. 176 | 177 | ## New Features 178 | * DEPLOY - Canary manifests are now dynamically created in canary stages. 179 | * DEPLOY - Automatically insert secrets as environment variables into Kubernetes manifest using {{SECRETS}} command in manifest templates. Make sure {{SECRETS}} is indented right, so it looks something like: 180 | ``` 181 | env: 182 | {{SECRETS}} 183 | ``` 184 | * DEPLOY - Deploy script now uses [kubeval](https://github.com/garethr/kubeval) to look for manifest errors before manifests are applied. 185 | * DEPLOY - Deploy script now leaves a copy of the post-processed files in /tmp/kubernetes for easier debugging and artifact grabbing 186 | * BUILD - Added BUILDARGS_FROM feature. Set BUILDARGS_FROM=production in your gitlab-ci stage, and it will make your GitLab secrets become build arguments for your Docker container. EXAMPLE: If you set BUILDARGS_FROM=production and have SECRET_VAR1 and PRODUCTION_VAR2 defined, it will automatically create build arguments named var1 and var2. *It also turns on KDH_INSERT_ARGS and will dynamically insert build arguments into your Dockerfile before build.* This is for applications that require all the environment variables to exist at buildtime as a sanity check. 187 | * BUILD - Set KDH_INSERT_ARGS=true as a variable in your gitlab ci build stage and k8s-deploy-helper will automatically insert the ARG statements into your Dockerfile. 188 | 189 | ## Bug Fixes 190 | * BUILD - BUILDARGS_ can now handle spaces correctly 191 | 192 | ## Changes 193 | * Deployment events will not fire until after a deployment has been registered as rolled out successfully. 194 | 195 | # Version 2.0.3 196 | 197 | Docker Image: quay.io/lifechurch/k8s-deploy-helper:2.0.3 198 | 199 | ## What's New? 200 | * Add ability to pass an variable to destroy in order to use destroy for namespaces with multiple apps 201 | 202 | # Version 2.0.2 203 | 204 | Docker Image: quay.io/lifechurch/k8s-deploy-helper:2.0.2 205 | 206 | ## What's New? 207 | * Add DataDog [deployment event](https://github.com/lifechurch/k8s-deploy-helper#deploy-events) support 208 | 209 | # Version 2.0.1 210 | 211 | Docker Image: quay.io/lifechurch/k8s-deploy-helper:2.0.1 212 | 213 | ## What's New? 214 | * Add KDH_SKIP_LATEST flag to skip pulling the latest docker image and using it as cache-from. 215 | 216 | # Version 2.0.0 217 | 218 | Docker Image: quay.io/lifechurch/k8s-deploy-helper:2.0.0 219 | 220 | ## What's New? 221 | * Canary Deploys 222 | 223 | # Version 1.0.0 224 | 225 | Docker Image: quay.io/lifechurch/k8s-deploy-helper:1.0.0 226 | 227 | ## What's New? 228 | * Initial release 229 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | When contributing to this repository, please first discuss the change you wish to make via issue, email, or any other method with the owners of this repository before making a change. 4 | 5 | Please note we have a code of conduct, please follow it in all your interactions with the project. 6 | 7 | ## Pull Request Process 8 | 9 | 1. Ensure any install or build dependencies are removed before the end of the layer when doing a build. 10 | 2. Update the README.md with details of changes to the interface, this includes new environment variables, exposed ports, useful file locations and container parameters. 11 | 3. Increase the version numbers in the CHANGELOG.md to the new version that this Pull Request would represent. The versioning scheme we use is [SemVer](http://semver.org/). 12 | 4. You may merge the Pull Request in once you have the sign-off of two other developers, or if you do not have permission to do that, you may request the second reviewer to merge it for you. 13 | 14 | ## Code of Conduct 15 | 16 | ### Our Pledge 17 | 18 | In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. 19 | 20 | ### Our Standards 21 | 22 | Examples of behavior that contributes to creating a positive environment 23 | include: 24 | 25 | * Using welcoming and inclusive language 26 | * Being respectful of differing viewpoints and experiences 27 | * Gracefully accepting constructive criticism 28 | * Focusing on what is best for the community 29 | * Showing empathy towards other community members 30 | 31 | Examples of unacceptable behavior by participants include: 32 | 33 | * The use of sexualized language or imagery and unwelcome sexual attention or advances 34 | * Trolling, insulting/derogatory comments, and personal or political attacks 35 | * Public or private harassment 36 | * Publishing others' private information, such as a physical or electronic address, without explicit permission 37 | * Other conduct which could reasonably be considered inappropriate in a professional setting 38 | 39 | ### Our Responsibilities 40 | 41 | Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. 42 | 43 | Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. 44 | 45 | ### Scope 46 | 47 | This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. 48 | 49 | ### Enforcement 50 | 51 | Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at [developers@youversion.com](developers@youversion.com). All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. 52 | 53 | Further details of specific enforcement policies may be posted separately. 54 | 55 | Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. 56 | 57 | ### Attribution 58 | 59 | This Code of Conduct is adapted from the [Contributor Covenant, version 1.4](http://contributor-covenant.org/version/1/4). -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM docker:18.06.3-ce-dind 2 | 3 | ENV HELM_VERSION="2.14.1" \ 4 | KUBECTL_VERSION="1.12.6" \ 5 | YQ_VERSION="2.1.1" \ 6 | KUBEVAL_VERSION="0.12.0" \ 7 | GLIBC_VERSION="2.28-r0" \ 8 | PATH=/opt/kubernetes-deploy:$PATH 9 | 10 | # Install pre-req 11 | RUN apk add -U openssl curl tar gzip bash ca-certificates git wget jq libintl coreutils \ 12 | && apk add --virtual build_deps gettext \ 13 | && mv /usr/bin/envsubst /usr/local/bin/envsubst \ 14 | && apk del build_deps 15 | 16 | # Install deploy scripts 17 | COPY / /opt/kubernetes-deploy/ 18 | 19 | # Install glibc for Alpine 20 | RUN wget -q -O /etc/apk/keys/sgerrand.rsa.pub https://alpine-pkgs.sgerrand.com/sgerrand.rsa.pub \ 21 | && wget https://github.com/sgerrand/alpine-pkg-glibc/releases/download/$GLIBC_VERSION/glibc-$GLIBC_VERSION.apk \ 22 | && apk add glibc-$GLIBC_VERSION.apk \ 23 | && rm glibc-$GLIBC_VERSION.apk 24 | 25 | # Install yq 26 | RUN wget -q -O /usr/local/bin/yq https://github.com/mikefarah/yq/releases/download/$YQ_VERSION/yq_linux_amd64 && chmod +x /usr/local/bin/yq 27 | 28 | # Install kubeval 29 | RUN wget https://github.com/garethr/kubeval/releases/download/$KUBEVAL_VERSION/kubeval-linux-amd64.tar.gz \ 30 | && tar xvfzmp kubeval-linux-amd64.tar.gz \ 31 | && mv kubeval /usr/local/bin \ 32 | && chmod +x /usr/local/bin/kubeval 33 | 34 | # Install kubectl 35 | RUN curl -L -o /usr/bin/kubectl https://storage.googleapis.com/kubernetes-release/release/v$KUBECTL_VERSION/bin/linux/amd64/kubectl \ 36 | && chmod +x /usr/bin/kubectl \ 37 | && kubectl version --client 38 | 39 | # Install Helm 40 | RUN set -x \ 41 | && curl -fSL https://storage.googleapis.com/kubernetes-helm/helm-v${HELM_VERSION}-linux-amd64.tar.gz -o helm.tar.gz \ 42 | && tar -xzvf helm.tar.gz \ 43 | && mv linux-amd64/helm /usr/local/bin/ \ 44 | && rm -rf linux-amd64 \ 45 | && rm helm.tar.gz \ 46 | && helm help 47 | 48 | 49 | 50 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 Life.Church 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Docker Repository on Quay](https://quay.io/repository/lifechurch/k8s-deploy-helper/status "Docker Repository on Quay")](https://quay.io/repository/lifechurch/k8s-deploy-helper) 2 | 3 | # Description 4 | k8s-deploy-helper (KDH) is a tool to help build and deploy containerized applications into Kubernetes using GitLab CI along with templated manifest files. Major features include: 5 | 6 | * Automated Kubernetes Secret Management using GitLab's UI. Version 3.0.0 can automatically insert secrets into Kubernetes manifests and Dockerfiles to lessen manual work. 7 | * Build via Heroku buildpacks or via Dockerfiles starting in 3.1.0. 8 | * Automated canary deployments. Dynamic creation is included in version 3.0.0. 9 | * Automated review app deployments. 10 | * Automated deployment of applications using Heroku Procfile in 3.1.0. 11 | * Deployment notifications to New Relic, Datadog and Slack. 12 | * Templated manifest deployments to Kubernetes living in the same repo as the code, giving developers more control. 13 | * Uses [kubeval](https://github.com/garethr/kubeval) to evaluate manifest yaml before any are deployed. 14 | * Easy, standardized image creation with build arguments and multiple Dockerfile support. 15 | * Standardized building conventions to allow for easy rollbacks through GitLab UI and better caching. 16 | 17 | 18 | This project is not endorsed or affiliated with GitLab in any way. 19 | 20 | # Examples 21 | In addition to this documentation, the best way to get started is to look at our [example repository](https://github.com/lifechurch/example-go). 22 | 23 | Need some help getting started? Feel free to join us on [Open Digerati Slack](https://join.slack.com/t/opendigerati/shared_invite/enQtMjU4MTcwOTIxMzMwLTcyYjQ4NWEwMzBlOGIzNDgyM2U5NzExYTY3NmI0MDE4MTRmMTQ5NjNhZWEyNDY3N2IyOWZjMDIxM2MwYjEwMmQ) in #k8s and we'll be more than happy to assist. 24 | 25 | # Why? 26 | GitLab's Auto DevOps initiative is amazing for getting simple apps running quickly, but for slightly more complex and production workloads, you need more control in the process. For instance, what if you have a pod with sidecar containers? What if you want a deployment of worker pods using something like celery for async work? You'll need to interact with Kubernetes at a deeper level to do stuff like this, and that's where our project comes in. 27 | 28 | At Life.Church, we wanted to create a standardized tool along with corresponding conventions that our developers could use with GitLab CI to allow us to get up and running with manifest-based deployments as quickly and easily as possible. So, we took the work that GitLab started, and used it as the base of a new project that would meet our needs. 29 | 30 | This tool was built akin to an airplane that was built while we were flying it. Our process is constantly maturing as we learn more about how to deploy into k8s. Our goal isn't to say 'this is the best way of deploying', but simply to share how we're doing it now, knowing that it will at least be a helpful starting place for others who are embarking on their Kubernetes journey. 31 | 32 | # Prerequisites 33 | 34 | * GitLab w/customizable runners 35 | * Kubernetes 36 | 37 | ## Configuring GitLab Runner 38 | 39 | There is a lot of discussion around the best way to build docker images from within Docker. We ended up going the route of sharing the Docker socket. Here is a sample GitLab Runner configuration. Of particular note, is the volumes section, to share the socket the way we expect. Additionally, we use dind for some stages, so privileged needs to be turned on as well. 40 | 41 | ``` 42 | [[runners]] 43 | name = "runner01" 44 | limit = 16 45 | url = "https://runneriurl/ci" 46 | token = "token" 47 | executor = "docker" 48 | [runners.docker] 49 | tls_verify = false 50 | image = "docker:latest" 51 | privileged = true 52 | disable_cache = false 53 | volumes = ["/var/run/docker.sock:/var/run/docker.sock", "/cache"] 54 | shm_size = 0 55 | [runners.cache] 56 | ``` 57 | 58 | ## Integrate Kubernetes into your Project 59 | 60 | In your GitLab Project, go to Operations->Kubernetes to give GitLab the ability to talk to your Kubernetes cluster. See GitLab's documentation on how to do this properly. 61 | 62 | ## GitLab Credentials 63 | GitLab has finally introduced a way to have persistent deploy tokens that can fetch things from the Docker registry. k8s-deploy-helper 3.0 now uses this more secure token. You can create a deploy token at Settings->Repository->Deploy Tokens and make one named ```gitlab-deploy-token``` with read_registry access. It HAS to be called ```gitlab-deploy-token``` due to GitLab limitations. Once this token is created, k8s-deploy-helper will pick it up automatically. 64 | 65 | # Building Docker Images 66 | 67 | Our goal was to make sure Docker containers could be built as quickly as possible without the developers having to micromanage each docker build command on a per-project basis. 68 | 69 | Here is a quick example from the .gitlab-ci.yml: 70 | 71 | ``` 72 | build_container: 73 | stage: build 74 | script: 75 | - command build 76 | only: 77 | - branches 78 | ``` 79 | 80 | Notice the script only has one command: ```command build``` - k8s-deploy-helper takes it from there, building the container, tagging it with a unique id (commit hash), and pushing it into the GitLab docker registry. 81 | 82 | ## Caching Docker FS Layers 83 | 84 | By default, k8s-deploy-helper and the accompanying examples use a convention where the build that is deployed to production is given the ```latest``` tag after successful deployment. When building Docker containers, we use --cache-from your docker image's ```latest``` tag. This will allow for more optimized caching when using multiple GitLab runners or runners without persistent storage. 85 | 86 | If you're managing your own runners, and you only have one, then you may want to think about setting a variable named ```KDH_SKIP_LATEST``` to ```true``` in your build stages or in the GitLab variables UI. When k8s-deploy-helper finds this variable set to true, we don't use --cache-from, and will just build the Docker container normally, which will try and make use of the cache that is already present in the runner. 87 | 88 | ## Build Arguments 89 | 90 | Sometimes you need to pass in arguments to containers at build time to do things like putting a token in place to pull from a private npm registry. To pass in build arguments, simply go to your GitLab project and go to Settings->CI/CD->Variables and create a variable in this form: 91 | 92 | ```BUILDARG_npmtoken=1111``` 93 | 94 | When we build the Docker container, we look for all environment variables that start with ```BUILDARG_```, strip that prefix out, and pass it into docker via --build-arg. In the example above, this will create a build argument ```npmtoken``` with a value of ```1111``` 95 | 96 | In the example above, you would have to put the following line into your Dockerfile in order to use it at build time: 97 | 98 | ```ARG npmtoken``` 99 | 100 | ## Automatic ARG Insertion 101 | Starting in 3.0, you can set a variable named ```KDH_INSERT_ARGS``` to ```true```, and k8s-deploy-helper will automatically take all your build arguments and insert corresponding ARG commands into your Dockerfile at build time, immediately after FROM lines. This makes GitLab the source of truth and you as a developer will no longer have to worry about setting things in multiple places. 102 | 103 | Inserting things into your Dockerfile at runtime is definitely considered a bit magic though, so we make this a feature you have to opt into. 104 | 105 | ## Secrets as Buildargs 106 | Sometimes you may have an application that has logic that looks to make sure all environment variables are present before you can run any command (like asset generation). k8s-deploy-helper has the ability to automatically create secrets and use them in your Kubernetes manifests, which we will go into later on. If you set the variable ```BUILDARGS_FROM``` to ```production```, it will take all the secrets that would be created to run in the ```production``` stage and automatically use them as build arguments when creating your Docker container. This will also turn on the ```KDH_INSERT_ARGS``` feature and will insert ```ARG``` statements into your Dockerfile automatically. 107 | 108 | ## Build Multiple Dockerfiles 109 | 110 | If your project needs to build multiple Dockerfiles, the helper will automatically handle all the naming convention management to avoid collisions. All you need to do is pass in the file name of the Dockerfile that is in the root of your repository. For example, if you have two Dockerfiles, Dockerfile-app, and Dockerfile-worker, this is what your .gitlab-ci.yml would look like: 111 | 112 | ``` 113 | build_app: 114 | stage: build 115 | script: 116 | - command build Dockerfile-app 117 | only: 118 | - branches 119 | 120 | build_worker: 121 | stage: build 122 | script: 123 | - command build Dockerfile-worker 124 | only: 125 | - branches 126 | ``` 127 | 128 | ## Buildpack Builds 129 | Starting in 3.1.0, KDH can build applications using [Heroku Buildpacks](https://devcenter.heroku.com/articles/buildpacks) via [herokuish](https://github.com/gliderlabs/herokuish). To do this, we run the latest herokuish docker container to make sure you have access to the latest buildpacks. Because we mount your code into the herokuish docker container, we need to use dind, so you'll need to make sure your GitLab runner has privileged access. All you need to do is not have a Dockerfile in your root, and we'll use the buildpack method. In the gitlab-ci, you'll need to expose docker:stable-dind as a service like so: 130 | 131 | ``` 132 | build: 133 | stage: build 134 | services: 135 | - docker:stable-dind 136 | script: 137 | - command build 138 | only: 139 | - branches 140 | ``` 141 | 142 | You can use BUILDARG_ syntax from above to pass in build arguments, such as npm tokens, etc... 143 | 144 | # Kubernetes Deployment 145 | 146 | The key to success here is being able to use variables in your manifests. By using the right variables in the right places, you can have one single deployment manifest to maintain that can create deployments for review apps, staging, canaries, and production. See our [example repository](https://github.com/lifechurch/example-go) for more information on how to properly set up your manifests. 147 | 148 | ## Directory Structure 149 | To deploy applications into Kubernetes, you need to place your templated manifest files into a ```kubernetes``` directory at the root of your repository. 150 | 151 | ``` 152 | kubernetes 153 | |-->deployment.yaml 154 | |-->service.yaml 155 | ``` 156 | 157 | ## Per-Stage Directory Structure 158 | 159 | Sometimes you have manifests that you only want to run in particular stages. For instance, you may want horizontal pod autoscalers only for production, but not for staging or review apps. All you have to do is create a directory name that corresponds to your build stage. 160 | 161 | ``` 162 | kubernetes 163 | |production 164 | ||-->hpa.yaml 165 | |-->deployment.yaml 166 | |-->service.yaml 167 | ``` 168 | 169 | ## Debugging 170 | Starting in 3.0, k8s-deploy-helper renders all templates before trying to apply them. All rendered manifests are displayed in the runner for easy debugging. The rendered templates are put in ```/tmp/kubernetes``` if you want to grab them using GitLab artifacts for some reason. In addition, we now use kubeval. 171 | 172 | ## kubeval 173 | KDH will try to figure out the version of Kubernetes you are deploying to, and then target kubeval specificaly for that version. 174 | 175 | Optionally, you can set the KDH_SKIP_KUBEVAL variable to true in order to skip the use of kubeval. 176 | 177 | ## Canary Deploys 178 | As of 3.0, k8s-deploy-helper will automatically support canary deployments via rewriting your deployment manifests. To use this functionality, you just need to be in a GitLab CI stage named canary, and k8s-deploy-helper will search for manifests where the ```track``` label is set to ```stable```. 179 | 180 | The canary stage operates as a production deployment. 181 | 182 | Check out our (example repo)[https://www.github.com/lifechurch/example-go] to see how to set up your manifests to support this automation. 183 | 184 | ## Environment Variable Substitution 185 | As of 7.0, k8s-deploy-helper added some logic that will only substitute environment variables that exist into your manifest files. 186 | 187 | # Secret Management 188 | 189 | For people just getting started with deploying apps to Kubernetes, one of the first questions is 'how do I keep secrets out of my repositories?' k8s-deploy-helper has built-in secret management that allows you to securely use GitLab as the source of truth for all secrets. 190 | 191 | How k8s-deploy-helper handles secrets is probably the hardest part to wrap your minds around initially, so read these documents carefully. 192 | 193 | 194 | ## Secret Creation 195 | To create a secret, go to your GitLab project and go to Settings->CI/CD->Variables and create a variable with this name pattern: 196 | 197 | ```SECRET_mykeyname``` 198 | 199 | During deployment, our scripts will look for all environment variables that start with the prefix ```SECRET_```, strip out the prefix and sticks the key and value into a kubernetes secret named ```$KUBE_NAMESPACE-secrets-$STAGE```, which translates to something like ```yournamespacename-secrets-production``` or ```yournamespacename-secrets-staging``` 200 | 201 | In the example above, there would be an entry in the secret file named ```mykeyname``` with the corresponding value you put in GitLab. You can then access these secrets in your manifest files. The below will create an environment variable in your pod called mykeyname. 202 | 203 | ``` 204 | env: 205 | - name: mykeyname 206 | valueFrom: 207 | secretKeyRef: 208 | name: $KUBE_NAMESPACE-secrets-$STAGE 209 | key: mykeyname 210 | ``` 211 | **The important thing to note is that k8s-deploy-helper does the stripping of the SECRET_ prefix during secret creation TO RUN IN KUBERNETES. When dealing with stages outside of k8s-deploy-helper, like for instance, a testing stage or a stage that does database migrations, your variables are sent as is to your GitLab Runners, prefixes and all.** 212 | 213 | ## Per-Stage Secret Creation 214 | 215 | Sometimes you have secrets that have different values depending on if you're running in production or staging. Our helper allows you to do this by prefixing your secret with the uppercased version of your GitLab CI stage name. 216 | 217 | For example, let's say you have a secret called ```api_env```, that needs to have different values depending on if you're deploying to one of three stages: review, staging, or production. 218 | 219 | Instead of creating a variable in GitLab called ```SECRET_api_env```, you would create three: 220 | 221 | ``` 222 | REVIEW_api_env 223 | STAGING_api_env 224 | PRODUCTION_api_env 225 | ``` 226 | 227 | Combined with a templated section like below, this would pull in the secret from wherever. 228 | 229 | ``` 230 | env: 231 | - name: api_env 232 | valueFrom: 233 | secretKeyRef: 234 | name: $KUBE_NAMESPACE-secrets-$STAGE 235 | key: api_env 236 | imagePullSecrets: 237 | ``` 238 | ## Automated Secret Management in manifests 239 | 240 | New in version 3.0 is the {{SECRETS}} command. In the examples above, we gave the code that you would insert into manifests to make the secrets that k8s-deploy-helper creates in Kubernetes usable from within your deployments. This meant for adding a new secret, you would have to set the value of the secret in GitLab, and then add some code to the manifest to make it accessible. 241 | 242 | Wanting to make developers lives easier and make GitLab the source of truth, we introduced the {{SECRETS}} command that you can insert into your templates at the appropriate place, and when we render your manifest templates, we will loop through all the secrets that k8s-deploy-helper created on your behalf, and insert the appropriate code into the manifest for you! 243 | 244 | To use it, just stick {{SECRETS}} in the right place underneath your env: section. Make sure it's placed correctly. 245 | 246 | ``` 247 | env: 248 | {{SECRETS}} 249 | ``` 250 | 251 | # Deploy Events 252 | 253 | Currently NewRelic, Slack, Datadog, Instana, & Microsoft Teams deploy events are supported. 254 | 255 | In Gitlab for NewRelic, you'll need to add a secret variable with the NewRelic API key and App Ids 256 | for each stage you want a deployment event for. Like: 257 | 258 | ``` 259 | NEWRELIC_API_KEY=xxx 260 | NEWRELIC_STAGING_APP_ID=xxx 261 | NEWRELIC_PRODUCTION_APP_ID=xxx 262 | ``` 263 | 264 | For Slack, you can simply set a Gitlab secret variable with the [Slack webhook URL](https://api.slack.com/incoming-webhooks) if you want notifications for every stage. 265 | 266 | ``` 267 | SLACK_WEBHOOK=xxx 268 | ``` 269 | 270 | If you want notifications for specific stages use the following format. 271 | 272 | ``` 273 | SLACK_{{STAGE}}_WEBHOOK 274 | ``` 275 | 276 | E.g. 277 | 278 | ``` 279 | SLACK_STAGING_WEBHOOK=xxx 280 | SLACK_PRODUCTION_WEBHOOK=xxx 281 | ``` 282 | 283 | For Datadog, you *must* set your Datadog API key with: 284 | 285 | ``` 286 | DATADOG_API_KEY=xxx 287 | ``` 288 | 289 | Optionally, you may set an [app key, message text, and tags to send to Datadog.] (https://docs.datadoghq.com/api/?lang=bash#post-an-event) 290 | 291 | The text attribute supports markdown. [This help article](https://help.datadoghq.com/hc/en-us/articles/204778779) 292 | best explains how to add markdown text to the deploy event. 293 | 294 | The DATADOG_TAGS variable can be used to send one or more tags with the event. Because this is an 295 | array in the POST, you *must* include quotes around each value. Multiple tags should then be 296 | separated by commas. 297 | 298 | ``` 299 | DATADOG_APP_KEY=xxx 300 | DATADOG_TAGS="deploys:api","foo:bar" 301 | DATADOG_TEXT=\n%%%\n### Success\n%%% 302 | ``` 303 | 304 | For Teams, you can simply set a Gitlab secret variable with a [Teams Incoming Webhook](https://docs.microsoft.com/en-us/microsoftteams/platform/webhooks-and-connectors/how-to/add-incoming-webhook#add-an-incoming-webhook-to-a-teams-channel) if you want notifications for every stage. 305 | 306 | ``` 307 | TEAMS_WEBHOOK=xxx 308 | ``` 309 | 310 | If you want notifications for specific stages use the following format. 311 | 312 | ``` 313 | TEAMS_{{STAGE}}_WEBHOOK=xxx 314 | ``` 315 | 316 | E.g. 317 | 318 | ``` 319 | TEAMS_STAGING_WEBHOOK=xxx 320 | TEAMS_PRODUCTION_WEBHOOK=xxx 321 | ``` 322 | 323 | For Instana, you *must* set your Instana API Token & Instana Base URL with: 324 | 325 | ``` 326 | INSTANA_API_TOKEN=xxx 327 | INSTANA_BASE_URL=https:// 328 | ``` 329 | 330 | If you want notifications for specific stages use the following format. 331 | 332 | ``` 333 | INSTANA_{{STAGE}}_API_TOKEN 334 | ``` 335 | 336 | E.g. 337 | 338 | ``` 339 | INSTANA_STAGING_API_TOKEN=xxx 340 | INSTANA_PRODUCTION_API_TOKEN=xxx 341 | ``` 342 | 343 | Per Instana's docs it's important to note: 344 | 345 | The used API Token requires the permission “Configuration of releases”. 346 | A release has no scope and is therefore globally applied to the whole monitored system. 347 | 348 | Based on that last note you can set these variables at a group level and not have to manage them at the project level. 349 | 350 | # Manifest-less Deploys 351 | Starting in 3.1.0, we added an option for manifest-less deploys to help us migrate away from Deis Workflow. In order for this to work, we had to make some very opinionated decisions regarding our manifests. These may not work for your organization. If this is the case, we encourage you to fork our project and make your own default manifests. They can be found in the manifests directory. 352 | 353 | ## Manifest-less Requirements 354 | 355 | * nginx Ingress Controller 356 | 357 | * cert-manager or kube-lego that can issue "Let's Encrypt" certificates via the ```kubernetes.io/tls-acme: 'true'``` annotation. 358 | 359 | ## Conventions 360 | 361 | * We will obey Procfiles and every line will get its own deployment. Web will get an ingress, service, pod disruption budget, and autoscaling. Every other line will be treated as worker, and will just get autoscaling. 362 | 363 | ## Variables & Defaults 364 | 365 | ### Web 366 | 367 | * LIMIT_CPU: 1 - CPU Resource Limit 368 | 369 | * LIMIT_MEMORY: 512Mi - Memory Resource Limit 370 | 371 | * SCALE_REPLICAS (Production Only): Not Set - If SCALE_REPLICAS is set, SCALE_MIN and SCALE_MAX will be set to the value of SCALE_REPLICAS. 372 | 373 | * SCALE_MIN (Production Only): 2 - Minimum amount of running pods set in the HPA 374 | 375 | * SCALE_MAX (Production Only): 4 - Maximum amount of running pods set in the HPA 376 | 377 | * SCALE_CPU (Production Only): 60 - CPU usage at which autoscaling occurs 378 | 379 | * PDB_MIN (Production Only): 50% - Minimum available percentage 380 | 381 | * PORT: 5000 - The port your app listens on 382 | 383 | * PROBE_URL: / - The URL that will get hit for readiness probe 384 | 385 | * LIVENESS_PROBE: /bin/true - The command used for the liveness probe 386 | 387 | ### Other (workers) 388 | 389 | To set variables for your other runtimes specified in the Procfile, you can create variables with this pattern. For example, let's say you have a worker that's named ```worker``` in your Procfile and you want to assign 2 CPU to each pod, you would set a variable named ```worker_LIMIT_CPU``` to ```2```. 390 | 391 | Variables you can set to control your worker stages are listed below, along with their default values. We'll refer to the name of your stage as ${1}. 392 | 393 | ``` 394 | ${1}_LIMIT_CPU: 1 395 | ${1}_LIMIT_MEMORY: 512Mi 396 | ${1}_LIVENESS_PROBE: /bin/true 397 | ${1}_REPLICAS: Not Set 398 | ${1}_SCALE_MIN: 1 399 | ${1}_SCALE_MAX: 1 400 | ${1}_SCALE_CPU : 60% 401 | ``` 402 | 403 | 404 | # Contributing 405 | 406 | Please read [CONTRIBUTING.md](CONTRIBUTING.md) for details on our code of conduct, and the process for submitting pull requests. 407 | 408 | # Versioning 409 | To make sure the community can use this project with their sanity intact, we will be committing to incrementing major versions when we introduce breaking changes. We anticipate this happening frequently, as this tool is still under heavy development. 410 | 411 | We use [SemVer](http://semver.org/) for versioning. For the versions available, see the [tags on this repository](https://github.com/lifechurch/k8s-deploy-helper/tags). 412 | 413 | # License 414 | 415 | This project is licensed under the MIT License - see the [LICENSE.md](LICENSE.md) file for details. 416 | -------------------------------------------------------------------------------- /build: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SOURCE="${BASH_SOURCE[0]}" 4 | while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink 5 | DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" 6 | SOURCE="$(readlink "$SOURCE")" 7 | [[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located 8 | done 9 | 10 | export DEPLOY_ROOT_DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" 11 | 12 | source "$DEPLOY_ROOT_DIR/src/common.bash" 13 | 14 | if [ "x$1" != "x" ]; then 15 | export DOCKERFILE=$1 16 | else 17 | export DOCKERFILE=Dockerfile 18 | fi 19 | 20 | # If BUILDARGS_FROM exists, change SECRET/STAGE_ prefixes to BUILDARG_ for later consumption 21 | buildargs_from 22 | 23 | # Set buildargs 24 | set_buildargs 25 | 26 | if [[ -n "$CI_REGISTRY_USER" ]]; then 27 | echo "Logging to GitLab Container Registry with CI credentials..." 28 | docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" "$CI_REGISTRY" 29 | echo "" 30 | fi 31 | 32 | echo "Building application..." 33 | 34 | if [ "x$1" != "x" ]; then 35 | echo "Building Dockerfile-based application using the Dockerfile at $1" 36 | # Look for KDH_INSERT_ARGS and edit Dockerfile 37 | insert_args 38 | export SHORT=$(echo "$1" | cut -d'-' -f2-) 39 | if [ "x$KDH_SKIP_LATEST" == "xtrue" ]; then 40 | echo "KDH_SKIP_LATEST is true, not building from latest" 41 | dockerbuild="docker build --pull -t \"$CI_REGISTRY_IMAGE:$SHORT-$CI_REGISTRY_TAG\" $buildargs -f $1 ." 42 | eval $dockerbuild 43 | else 44 | dockerbuild="docker build --pull --cache-from \"$CI_REGISTRY_IMAGE:$SHORT-latest\" -t \"$CI_REGISTRY_IMAGE:$SHORT-$CI_REGISTRY_TAG\" $buildargs -f $1 ." 45 | eval $dockerbuild 46 | fi 47 | echo "Pushing to GitLab Container Registry..." 48 | docker push "$CI_REGISTRY_IMAGE:$SHORT-$CI_REGISTRY_TAG" 49 | 50 | elif [[ -f Dockerfile ]]; then 51 | echo "Building Dockerfile-based application..." 52 | # Look for KDH_INSERT_ARGS and edit Dockerfile 53 | insert_args 54 | if [ "x$KDH_SKIP_LATEST" == "xtrue" ]; then 55 | echo "KDH_SKIP_LATEST is true, not building from latest" 56 | dockerbuild="docker build --pull -t \"$CI_REGISTRY_IMAGE:$CI_REGISTRY_TAG\" $buildargs ." 57 | eval $dockerbuild 58 | else 59 | docker pull $CI_REGISTRY_IMAGE:latest || true 60 | dockerbuild="docker build --pull --cache-from \"$CI_REGISTRY_IMAGE:latest\" -t \"$CI_REGISTRY_IMAGE:$CI_REGISTRY_TAG\" $buildargs ." 61 | eval $dockerbuild 62 | fi 63 | echo "Pushing to GitLab Container Registry..." 64 | docker push "$CI_REGISTRY_IMAGE:$CI_REGISTRY_TAG" 65 | else 66 | export DOCKER_HOST='tcp://docker:2375' 67 | echo "No Dockerfile at root of repo. Using herokuish build packs." 68 | echo "Building env file" 69 | build_env 70 | echo "Starting herokuish" 71 | dockerrun="docker run -i --name=\"$CI_CONTAINER_NAME\" -v \"$(pwd):/tmp/app:ro\" gliderlabs/herokuish /bin/bash -c \"/bin/herokuish buildpack build && (rm /app/.env || true)\"" 72 | eval $dockerrun 73 | docker commit "$CI_CONTAINER_NAME" "$CI_REGISTRY_IMAGE:$CI_REGISTRY_TAG" 74 | docker rm "$CI_CONTAINER_NAME" 75 | echo "Configuring $CI_REGISTRY_IMAGE:$CI_REGISTRY_TAG docker image..." 76 | docker create --expose 5000 --env PORT=5000 --name="$CI_CONTAINER_NAME" "$CI_REGISTRY_IMAGE:$CI_REGISTRY_TAG" /bin/herokuish procfile start web 77 | docker commit "$CI_CONTAINER_NAME" "$CI_REGISTRY_IMAGE:$CI_REGISTRY_TAG" 78 | docker rm "$CI_CONTAINER_NAME" >/dev/null 79 | 80 | echo "Pushing to GitLab Container Registry" 81 | docker push "$CI_REGISTRY_IMAGE:$CI_REGISTRY_TAG" 82 | fi 83 | -------------------------------------------------------------------------------- /deploy: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | SOURCE="${BASH_SOURCE[0]}" 3 | while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink 4 | DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" 5 | SOURCE="$(readlink "$SOURCE")" 6 | [[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located 7 | done 8 | 9 | trap 'last_command=$current_command; current_command=$BASH_COMMAND' DEBUG 10 | # echo an error message before exiting 11 | trap 'echo "\"${last_command}\" command filed with exit code $?."' EXIT 12 | 13 | export DEPLOY_ROOT_DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" 14 | 15 | source "$DEPLOY_ROOT_DIR/src/common.bash" 16 | 17 | ensure_deploy_variables 18 | 19 | CI_ENVIRONMENT_HOSTNAME="${CI_ENVIRONMENT_URL}" 20 | CI_ENVIRONMENT_HOSTNAME="${CI_ENVIRONMENT_HOSTNAME/http:\/\//}" 21 | CI_ENVIRONMENT_HOSTNAME="${CI_ENVIRONMENT_HOSTNAME/https:\/\//}" 22 | 23 | export CI_ENVIRONMENT_HOSTNAME=$CI_ENVIRONMENT_HOSTNAME 24 | 25 | echo "Creating namespace if it doesn't exist" 26 | kubectl get namespace $KUBE_NAMESPACE || cat < /dev/null; then 80 | ALL_STAGES=$(cat Procfile | grep -v "^web:" | grep -v "^#" | grep ":" | cut -d':' -f1) 81 | for line in $ALL_STAGES; do 82 | mkdir -p $CI_PROJECT_DIR/kubernetes/production 83 | cp /opt/kubernetes-deploy/manifests/other/deployment.yaml $CI_PROJECT_DIR/kubernetes/deployment-$line.yaml 84 | cp /opt/kubernetes-deploy/manifests/other/autoscale.yaml $CI_PROJECT_DIR/kubernetes/production/autoscale-$line.yaml 85 | sed -i -e "s/{{PREFIX}}/$line/g" $CI_PROJECT_DIR/kubernetes/deployment-$line.yaml 86 | sed -i -e "s/{{PREFIX}}/$line/g" $CI_PROJECT_DIR/kubernetes/production/autoscale-$line.yaml 87 | set_prefix_defaults $line 88 | done 89 | fi 90 | fi 91 | 92 | echo "Rendering your manifest templates." 93 | if [ -d "$CI_PROJECT_DIR/kubernetes" ]; then 94 | # Move any stage specific manifests into the main yaml 95 | echo "Moving stage specific yamls into job. Don't worry about any errors printed next" 96 | cp $CI_PROJECT_DIR/kubernetes/$CI_JOB_STAGE/*.yaml $CI_PROJECT_DIR/kubernetes || true 97 | 98 | # Render every template file using envsubst and look for {{SECRETS}} command 99 | for filename in $CI_PROJECT_DIR/kubernetes/*.yaml; do 100 | echo "Rendering $filename" 101 | basefile=$(basename $filename) 102 | if $(grep $'\t' $filename); then 103 | echo "Found a tab in $filename. Tabs are not allowed in manifests. Quitting." 104 | exit 1 105 | fi 106 | envsubst "`env | awk -F = '{printf \" \$%s\", \$1}'`" < $filename > /tmp/kubernetes/$basefile 107 | while grep "{{SECRETS}}" /tmp/kubernetes/$basefile | grep -v "#"; do 108 | line=$(grep -n -m 1 "{{SECRETS}}" /tmp/kubernetes/$basefile | grep -v "#") 109 | lineno=$(echo $line | cut -d':' -f1) 110 | spaces=$(sed "${lineno}!d" /tmp/kubernetes/$basefile | awk -F'[^ \t]' '{print length($1)}') 111 | spaces=$((spaces-1)) 112 | # Delete line that had {{SECRETS}} 113 | sed -i -e "${lineno}d" /tmp/kubernetes/$basefile 114 | while IFS='' read -r secretline || [[ -n "$secretline" ]]; do 115 | newline=$(printf "%*s%s" $spaces "" "$secretline") 116 | sed -i "${lineno}i\ ${newline}" /tmp/kubernetes/$basefile 117 | lineno=$((lineno+1)) 118 | done < "/tmp/secrets.yaml" 119 | done 120 | echo "Rendered manifest template to /tmp/kubernetes/$basefile" 121 | done 122 | echo "Done rendering templates." 123 | 124 | echo "Evaluating rendered manifests." 125 | # Dump the templated manifests to the log for easier debugging and run kubeval against each 126 | for filename in /tmp/kubernetes/*.yaml; do 127 | echo "Checking for GKE native manifests" 128 | if grep -q 'apiVersion: cloud.google.com' $filename; then 129 | echo "Found GKE K8s manifest - skipping - $filename" 130 | else 131 | echo 132 | cat "$filename" 133 | echo 134 | if [ "x$KDH_SKIP_KUBEVAL" != "xtrue" ]; then 135 | echo "Running kubeval against $filename" 136 | kubeval -v $kube_version --ignore-missing-schemas $filename 137 | fi 138 | fi 139 | done 140 | echo "Evaluation finished." 141 | echo 142 | echo "Applying rendered templates" 143 | # Start applying templates to Kubernetes 144 | for filename in /tmp/kubernetes/*.yaml; do 145 | # Grab the kind of manifest we're dealing with (ie, deployment, statefulset, etc..) 146 | manifest_kind=$(yq read $filename kind) 147 | # Grab the track label, which GitLab uses to tell if something is a canary or not 148 | track=$(yq read $filename metadata.labels.track) 149 | shopt -s nocasematch 150 | # Build a list of application deployments we need to monitor and apply 151 | if [[ "$manifest_kind" == "deployment" ]] || [[ "$manifest_kind" == "statefulset" ]] || [[ "$manifest_kind" == "daemonset" ]]; then 152 | # If we are in a canary deploymentt 153 | if [[ "$ISCANARY" == "true" && "$track" == "stable" ]]; then 154 | echo "We are in a canary deployment and this is a stable manifest. Beginning manifest transformation." 155 | sed -i -e 's/track: stable/track: canary/g' $filename 156 | appname=$(yq r $filename metadata.name) 157 | sed -i -e "s/name: $appname/name: canary-$appname/g" $filename 158 | echo "Done transforming manifest, going to cat it out so you can see what it's doing" 159 | cat $filename 160 | fi 161 | 162 | if [[ "$ISCANARY" == "true" && "$track" != "stable" ]]; then 163 | echo "We are in a canary deployment and this is not a stable manifest, so we aren't going to deploy it." 164 | continue 165 | fi 166 | 167 | # Add deployment to rollout 168 | manifest_name=$(yq read $filename metadata.name) 169 | rollout+=("$manifest_kind/$manifest_name") 170 | fi 171 | 172 | # If we get here and are still in this loop, it means it should be safe to apply the manifest 173 | echo "Applying manifest: $filename" 174 | kubectl apply -f $filename 175 | done 176 | 177 | echo "Kubernetes manifests deployed" 178 | else 179 | echo "No kubernetes directory was found. Exiting." 180 | exit 1 181 | fi 182 | 183 | echo 184 | echo "Checking rollout status" 185 | for item in ${rollout[@]}; do 186 | echo "Checking rollout status for $item" 187 | kubectl rollout status -n "$KUBE_NAMESPACE" -w "$item" 188 | done 189 | 190 | get_deploy_events 191 | if [ "$NEWRELIC_API_KEY" ] && [ "$NEWRELIC_APP_ID" ]; then 192 | echo "Sending deploy event to NewRelic..." 193 | curl --silent -X POST "https://api.newrelic.com/v2/applications/$NEWRELIC_APP_ID/deployments.json" \ 194 | -H "X-Api-Key:$NEWRELIC_API_KEY" -i \ 195 | -H "Content-Type: application/json" \ 196 | -d \ 197 | "{ 198 | \"deployment\": { 199 | \"revision\": \"$CI_COMMIT_SHA\", 200 | \"user\": \"$GITLAB_USER_LOGIN\" 201 | } 202 | }" > /dev/null || true 203 | fi 204 | 205 | if [ "$SLACK_WEBHOOK_URL" ]; then 206 | echo "Sending deploy event to Slack channel..." 207 | curl --silent -X POST -H 'Content-type: application/json' \ 208 | -d \ 209 | "{ 210 | \"text\": \"Kubernetes Deployment to $KUBE_NAMESPACE by $GITLAB_USER_LOGIN\", 211 | \"attachments\": [ 212 | {\"color\": \"good\", \"text\": \"Successfully deployed to $REAL_JOB_STAGE\"} 213 | ] 214 | }" $SLACK_WEBHOOK_URL > /dev/null || true 215 | fi 216 | 217 | if [ "$DATADOG_API_KEY" ]; then 218 | echo "Sending deploy event to Datadog..." 219 | URL="https://app.datadoghq.com/api/v1/events?api_key=$DATADOG_API_KEY" 220 | if [ "$DATADOG_APP_KEY" ]; then 221 | URL="$URL&app_key=$DATADOG_APP_KEY" 222 | fi 223 | if [[ -z "$DATADOG_TEXT" ]]; then 224 | DATADOG_TEXT="KDH Deployment: $KUBE_NAMESPACE $REAL_JOB_STAGE by $GITLAB_USER_LOGIN Tag: $CI_REGISTRY_TAG" 225 | fi 226 | curl -X POST -H "Content-type: application/json" \ 227 | -d \ 228 | "{ 229 | \"title\": \"KDH Deployment: $KUBE_NAMESPACE\", 230 | \"text\": \"$DATADOG_TEXT\", 231 | \"priority\": \"normal\", 232 | \"tags\": [\"$DATADOG_TAGS\"], 233 | \"alert_type\": \"info\", 234 | \"source_type_name\": \"KDH\" 235 | }" "$URL" > /dev/null || true 236 | fi 237 | 238 | if [ "$TEAMS_WEBHOOK_URL" ]; then 239 | echo "Sending deploy event to Teams channel..." 240 | curl --silent -X POST -H 'Content-type: application/json' \ 241 | -d \ 242 | "{ 243 | \"text\": \"Kubernetes Deployment to $KUBE_NAMESPACE by $GITLAB_USER_LOGIN\n\n 244 | Successfully deployed to $REAL_JOB_STAGE\", 245 | }" $TEAMS_WEBHOOK_URL > /dev/null || true 246 | fi 247 | 248 | if [ "$INSTANA_API_TOKEN" ] && [ "$INSTANA_BASE_URL" ]; then 249 | echo "Sending deploy event to Instana..." 250 | URL="$INSTANA_BASE_URL/api/releases" 251 | # this gets the epoch in milliseconds (does not work on macOS) 252 | start_time=$(date +%s%3N) 253 | curl --silent -X POST \ 254 | -H 'Content-type: application/json' \ 255 | -H "Authorization: apiToken $INSTANA_API_TOKEN" \ 256 | -d \ 257 | "{ 258 | \"name\": \"$KUBE_NAMESPACE env:$REAL_JOB_STAGE\", 259 | \"start\": $start_time 260 | }" "$URL" > /dev/null || true 261 | fi 262 | 263 | echo "Application is accessible at: ${CI_ENVIRONMENT_URL}" 264 | echo "" 265 | -------------------------------------------------------------------------------- /deploy-flagger: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | SOURCE="${BASH_SOURCE[0]}" 3 | while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink 4 | DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" 5 | SOURCE="$(readlink "$SOURCE")" 6 | [[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located 7 | done 8 | 9 | trap 'last_command=$current_command; current_command=$BASH_COMMAND' DEBUG 10 | # echo an error message before exiting 11 | trap 'echo "\"${last_command}\" command filed with exit code $?."' EXIT 12 | 13 | export DEPLOY_ROOT_DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" 14 | 15 | source "$DEPLOY_ROOT_DIR/src/common.bash" 16 | 17 | ensure_deploy_variables 18 | 19 | CI_ENVIRONMENT_HOSTNAME="${CI_ENVIRONMENT_URL}" 20 | CI_ENVIRONMENT_HOSTNAME="${CI_ENVIRONMENT_HOSTNAME/http:\/\//}" 21 | CI_ENVIRONMENT_HOSTNAME="${CI_ENVIRONMENT_HOSTNAME/https:\/\//}" 22 | 23 | export CI_ENVIRONMENT_HOSTNAME=$CI_ENVIRONMENT_HOSTNAME 24 | 25 | echo "Creating namespace if it doesn't exist" 26 | kubectl get namespace $KUBE_NAMESPACE || cat < /dev/null; then 80 | ALL_STAGES=$(cat Procfile | grep -v "^web:" | grep -v "^#" | grep ":" | cut -d':' -f1) 81 | for line in $ALL_STAGES; do 82 | mkdir -p $CI_PROJECT_DIR/kubernetes/production 83 | cp /opt/kubernetes-deploy/manifests/other/deployment.yaml $CI_PROJECT_DIR/kubernetes/deployment-$line.yaml 84 | cp /opt/kubernetes-deploy/manifests/other/autoscale.yaml $CI_PROJECT_DIR/kubernetes/production/autoscale-$line.yaml 85 | sed -i -e "s/{{PREFIX}}/$line/g" $CI_PROJECT_DIR/kubernetes/deployment-$line.yaml 86 | sed -i -e "s/{{PREFIX}}/$line/g" $CI_PROJECT_DIR/kubernetes/production/autoscale-$line.yaml 87 | set_prefix_defaults $line 88 | done 89 | fi 90 | fi 91 | 92 | echo "Rendering your manifest templates." 93 | if [ -d "$CI_PROJECT_DIR/kubernetes" ]; then 94 | # Move any stage specific manifests into the main yaml 95 | echo "Moving stage specific yamls into job. Don't worry about any errors printed next" 96 | cp $CI_PROJECT_DIR/kubernetes/$CI_JOB_STAGE/*.yaml $CI_PROJECT_DIR/kubernetes || true 97 | 98 | # Render every template file using envsubst and look for {{SECRETS}} command 99 | for filename in $CI_PROJECT_DIR/kubernetes/*.yaml; do 100 | echo "Rendering $filename" 101 | basefile=$(basename $filename) 102 | if $(grep $'\t' $filename); then 103 | echo "Found a tab in $filename. Tabs are not allowed in manifests. Quitting." 104 | exit 1 105 | fi 106 | envsubst "`env | awk -F = '{printf \" \$%s\", \$1}'`" < $filename > /tmp/kubernetes/$basefile 107 | while grep "{{SECRETS}}" /tmp/kubernetes/$basefile | grep -v "#"; do 108 | line=$(grep -n -m 1 "{{SECRETS}}" /tmp/kubernetes/$basefile | grep -v "#") 109 | lineno=$(echo $line | cut -d':' -f1) 110 | spaces=$(sed "${lineno}!d" /tmp/kubernetes/$basefile | awk -F'[^ \t]' '{print length($1)}') 111 | spaces=$((spaces-1)) 112 | # Delete line that had {{SECRETS}} 113 | sed -i -e "${lineno}d" /tmp/kubernetes/$basefile 114 | while IFS='' read -r secretline || [[ -n "$secretline" ]]; do 115 | newline=$(printf "%*s%s" $spaces "" "$secretline") 116 | sed -i "${lineno}i\ ${newline}" /tmp/kubernetes/$basefile 117 | lineno=$((lineno+1)) 118 | done < "/tmp/secrets.yaml" 119 | done 120 | echo "Rendered manifest template to /tmp/kubernetes/$basefile" 121 | done 122 | echo "Done rendering templates." 123 | 124 | echo "Evaluating rendered manifests." 125 | # Dump the templated manifests to the log for easier debugging and run kubeval against each 126 | for filename in /tmp/kubernetes/*.yaml; do 127 | echo "Checking for GKE native manifests" 128 | if grep -q 'apiVersion: cloud.google.com' $filename; then 129 | echo "Found GKE K8s manifest - skipping - $filename" 130 | else 131 | echo 132 | cat "$filename" 133 | echo 134 | if [ "x$KDH_SKIP_KUBEVAL" != "xtrue" ]; then 135 | echo "Running kubeval against $filename" 136 | kubeval -v $kube_version --ignore-missing-schemas $filename 137 | fi 138 | fi 139 | done 140 | echo "Evaluation finished." 141 | echo 142 | echo "Applying rendered templates" 143 | # Start applying templates to Kubernetes 144 | for filename in /tmp/kubernetes/*.yaml; do 145 | # Grab the kind of manifest we're dealing with (ie, deployment, statefulset, etc..) 146 | manifest_kind=$(yq read $filename kind) 147 | # Grab the track label, which GitLab uses to tell if something is a canary or not 148 | track=$(yq read $filename metadata.labels.track) 149 | shopt -s nocasematch 150 | # Build a list of application deployments we need to monitor and apply 151 | if [[ "$manifest_kind" == "deployment" ]] || [[ "$manifest_kind" == "statefulset" ]] || [[ "$manifest_kind" == "daemonset" ]] || [[ "$manifest_kind" == "canary" ]]; then 152 | # If we are in a canary deployment 153 | if [[ "$ISCANARY" == "true" && "$track" == "stable" ]]; then 154 | echo "We are in a canary deployment and this is a stable manifest. Beginning manifest transformation." 155 | sed -i -e 's/track: stable/track: canary/g' $filename 156 | appname=$(yq r $filename metadata.name) 157 | sed -i -e "s/name: $appname/name: canary-$appname/g" $filename 158 | echo "Done transforming manifest, going to cat it out so you can see what it's doing" 159 | cat $filename 160 | fi 161 | 162 | if [[ "$ISCANARY" == "true" && "$track" != "stable" ]]; then 163 | echo "We are in a canary deployment and this is not a stable manifest, so we aren't going to deploy it." 164 | continue 165 | fi 166 | 167 | # Add deployment to rollout 168 | manifest_name=$(yq read $filename metadata.name) 169 | rollout+=("$manifest_kind/$manifest_name") 170 | fi 171 | 172 | # If we get here and are still in this loop, it means it should be safe to apply the manifest 173 | echo "Applying manifest: $filename" 174 | kubectl apply -f $filename 175 | done 176 | 177 | echo "Kubernetes manifests deployed" 178 | else 179 | echo "No kubernetes directory was found. Exiting." 180 | exit 1 181 | fi 182 | 183 | echo 184 | echo "Checking rollout status" 185 | for item in ${rollout[@]}; do 186 | echo "Checking rollout status for $item" 187 | 188 | if [[ $item =~ "canary/" ]]; then 189 | # wait for Flagger to detect the change 190 | ok=false 191 | timeout=false 192 | thefail=false 193 | 194 | until ${ok}; do 195 | kubectl get $item -n "$KUBE_NAMESPACE" | grep 'Progressing' && ok=true || ok=false 196 | sleep 5 197 | done 198 | 199 | # wait for the canary analysis to finish 200 | echo "Your release is getting canaried. Waiting for it to succeed." 201 | kubectl wait $item -n "$KUBE_NAMESPACE" --for=condition=promoted --timeout=5m || timeout=true 202 | 203 | if ${timeout}; then 204 | echo "There was an issue promoting $item" 205 | kubectl describe $item 206 | exit 1 207 | fi 208 | 209 | # check if the deployment was successful 210 | kubectl get $item -n "$KUBE_NAMESPACE" | grep Succeeded || thefail=true 211 | 212 | if ${thefail}; then 213 | echo "There was an issue canarying $item" 214 | kubectl describe $item 215 | exit 1 216 | fi 217 | else 218 | kubectl rollout status -n "$KUBE_NAMESPACE" -w "$item" 219 | fi 220 | 221 | done 222 | 223 | get_deploy_events 224 | if [ "$NEWRELIC_API_KEY" ] && [ "$NEWRELIC_APP_ID" ]; then 225 | echo "Sending deploy event to NewRelic..." 226 | curl --silent -X POST "https://api.newrelic.com/v2/applications/$NEWRELIC_APP_ID/deployments.json" \ 227 | -H "X-Api-Key:$NEWRELIC_API_KEY" -i \ 228 | -H "Content-Type: application/json" \ 229 | -d \ 230 | "{ 231 | \"deployment\": { 232 | \"revision\": \"$CI_COMMIT_SHA\", 233 | \"user\": \"$GITLAB_USER_LOGIN\" 234 | } 235 | }" > /dev/null || true 236 | fi 237 | 238 | if [ "$SLACK_WEBHOOK_URL" ]; then 239 | echo "Sending deploy event to Slack channel..." 240 | curl --silent -X POST -H 'Content-type: application/json' \ 241 | -d \ 242 | "{ 243 | \"text\": \"Kubernetes Deployment to $KUBE_NAMESPACE by $GITLAB_USER_LOGIN\", 244 | \"attachments\": [ 245 | {\"color\": \"good\", \"text\": \"Successfully deployed to $REAL_JOB_STAGE\"} 246 | ] 247 | }" $SLACK_WEBHOOK_URL > /dev/null || true 248 | fi 249 | 250 | if [ "$DATADOG_API_KEY" ]; then 251 | echo "Sending deploy event to Datadog..." 252 | URL="https://app.datadoghq.com/api/v1/events?api_key=$DATADOG_API_KEY" 253 | if [ "$DATADOG_APP_KEY" ]; then 254 | URL="$URL&app_key=$DATADOG_APP_KEY" 255 | fi 256 | if [[ -z "$DATADOG_TEXT" ]]; then 257 | DATADOG_TEXT="KDH Deployment: $KUBE_NAMESPACE $REAL_JOB_STAGE by $GITLAB_USER_LOGIN Tag: $CI_REGISTRY_TAG" 258 | fi 259 | curl -X POST -H "Content-type: application/json" \ 260 | -d \ 261 | "{ 262 | \"title\": \"KDH Deployment: $KUBE_NAMESPACE\", 263 | \"text\": \"$DATADOG_TEXT\", 264 | \"priority\": \"normal\", 265 | \"tags\": [\"$DATADOG_TAGS\"], 266 | \"alert_type\": \"info\", 267 | \"source_type_name\": \"KDH\" 268 | }" "$URL" > /dev/null || true 269 | fi 270 | 271 | echo "Application is accessible at: ${CI_ENVIRONMENT_URL}" 272 | echo "" 273 | -------------------------------------------------------------------------------- /destroy: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SOURCE="${BASH_SOURCE[0]}" 4 | while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink 5 | DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" 6 | SOURCE="$(readlink "$SOURCE")" 7 | [[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located 8 | done 9 | 10 | export DEPLOY_ROOT_DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" 11 | 12 | source "$DEPLOY_ROOT_DIR/src/common.bash" 13 | 14 | if [ "x$1" != "x" ]; then 15 | echo "Removing all deployment objects for $1" 16 | kubectl delete deploy,pod,rs,svc,ing,ds -l "$1" -n "$KUBE_NAMESPACE" 17 | else 18 | echo "Removing all deployment objects" 19 | kubectl delete deploy,pod,rs,svc,ing,ds,configmap -l "app=$CI_ENVIRONMENT_SLUG" -n "$KUBE_NAMESPACE" 20 | echo "We are deleting on the environment=$CI_ENVIRONMENT_SLUG label for review apps with workers. You should be able to ignore errors below." 21 | kubectl delete deploy,pod,rs,svc,ing,ds -l "environment=$CI_ENVIRONMENT_SLUG" -n "$KUBE_NAMESPACE" || true 22 | fi 23 | 24 | echo "Removing auto-generated secrets..." 25 | kubectl delete secret $KUBE_NAMESPACE-secrets-$STAGE -n "$KUBE_NAMESPACE" || true 26 | 27 | echo "Removing certificates..." 28 | kubectl delete certificate -l "app=$CI_ENVIRONMENT_SLUG" -n "$KUBE_NAMESPACE" || true -------------------------------------------------------------------------------- /destroy-canary: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SOURCE="${BASH_SOURCE[0]}" 4 | while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink 5 | DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" 6 | SOURCE="$(readlink "$SOURCE")" 7 | [[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located 8 | done 9 | 10 | export DEPLOY_ROOT_DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" 11 | 12 | source "$DEPLOY_ROOT_DIR/src/common.bash" 13 | 14 | echo "Removing all deployment objects for the canary" 15 | kubectl delete deploy,pod,rs,svc,ing,ds -l "track=canary" -n "$KUBE_NAMESPACE" 16 | -------------------------------------------------------------------------------- /manifests/other/autoscale.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: autoscaling/v2beta1 3 | kind: HorizontalPodAutoscaler 4 | metadata: 5 | labels: 6 | app: {{PREFIX}}-$CI_ENVIRONMENT_SLUG 7 | environment: $CI_ENVIRONMENT_SLUG 8 | name: {{PREFIX}}-$CI_ENVIRONMENT_SLUG 9 | namespace: $KUBE_NAMESPACE 10 | spec: 11 | scaleTargetRef: 12 | apiVersion: apps/v1beta1 13 | kind: Deployment 14 | name: {{PREFIX}}-$CI_ENVIRONMENT_SLUG 15 | minReplicas: ${{PREFIX}}_SCALE_MIN 16 | maxReplicas: ${{PREFIX}}_SCALE_MAX 17 | metrics: 18 | - type: Resource 19 | resource: 20 | name: cpu 21 | targetAverageUtilization: ${{PREFIX}}_SCALE_CPU -------------------------------------------------------------------------------- /manifests/other/deployment.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: Deployment 4 | metadata: 5 | name: {{PREFIX}}-$CI_ENVIRONMENT_SLUG 6 | namespace: $KUBE_NAMESPACE 7 | labels: 8 | app: {{PREFIX}}-$CI_ENVIRONMENT_SLUG 9 | environment: $CI_ENVIRONMENT_SLUG 10 | space: $KUBE_NAMESPACE 11 | track: stable 12 | spec: 13 | selector: 14 | matchLabels: 15 | app: {{PREFIX}}-$CI_ENVIRONMENT_SLUG 16 | name: {{PREFIX}}-$CI_ENVIRONMENT_SLUG 17 | environment: $CI_ENVIRONMENT_SLUG 18 | track: stable 19 | template: 20 | metadata: 21 | labels: 22 | name: {{PREFIX}}-$CI_ENVIRONMENT_SLUG 23 | app: {{PREFIX}}-$CI_ENVIRONMENT_SLUG 24 | track: stable 25 | environment: $CI_ENVIRONMENT_SLUG 26 | space: $KUBE_NAMESPACE 27 | annotations: 28 | build_id: "$CI_JOB_ID" 29 | spec: 30 | terminationGracePeriodSeconds: 60 31 | containers: 32 | - name: {{PREFIX}}-$CI_ENVIRONMENT_SLUG 33 | image: $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA 34 | command: ["/bin/herokuish"] 35 | args: ["procfile", "start", "{{PREFIX}}"] 36 | imagePullPolicy: IfNotPresent 37 | ports: 38 | - containerPort: $PORT 39 | resources: 40 | limits: 41 | cpu: "${{PREFIX}}_LIMIT_CPU" 42 | memory: "${{PREFIX}}_LIMIT_MEMORY" 43 | livenessProbe: 44 | exec: 45 | command: 46 | - /bin/bash 47 | - -c 48 | - ${{PREFIX}}_LIVENESS_PROBE 49 | env: 50 | {{SECRETS}} 51 | imagePullSecrets: 52 | - name: gitlab-registry -------------------------------------------------------------------------------- /manifests/web/deployment-web.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: Deployment 4 | metadata: 5 | name: web-$CI_ENVIRONMENT_SLUG 6 | namespace: $KUBE_NAMESPACE 7 | labels: 8 | app: $CI_ENVIRONMENT_SLUG 9 | environment: $CI_ENVIRONMENT_SLUG 10 | space: $KUBE_NAMESPACE 11 | track: stable 12 | spec: 13 | selector: 14 | matchLabels: 15 | app: $CI_ENVIRONMENT_SLUG 16 | environment: $CI_ENVIRONMENT_SLUG 17 | name: web-$CI_ENVIRONMENT_SLUG 18 | track: stable 19 | template: 20 | metadata: 21 | labels: 22 | name: web-$CI_ENVIRONMENT_SLUG 23 | environment: $CI_ENVIRONMENT_SLUG 24 | app: $CI_ENVIRONMENT_SLUG 25 | track: stable 26 | space: $KUBE_NAMESPACE 27 | annotations: 28 | build_id: "$CI_JOB_ID" 29 | spec: 30 | terminationGracePeriodSeconds: 60 31 | containers: 32 | - name: web-$CI_ENVIRONMENT_SLUG 33 | image: $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA 34 | imagePullPolicy: IfNotPresent 35 | ports: 36 | - containerPort: $PORT 37 | lifecycle: 38 | preStop: 39 | exec: 40 | command: ["bash", "-c", "sleep 20"] 41 | resources: 42 | limits: 43 | cpu: "$LIMIT_CPU" 44 | memory: "$LIMIT_MEMORY" 45 | livenessProbe: 46 | exec: 47 | command: 48 | - /bin/bash 49 | - -c 50 | - $LIVENESS_PROBE 51 | readinessProbe: 52 | httpGet: 53 | path: $PROBE_URL 54 | port: $PORT 55 | scheme: HTTP 56 | httpHeaders: 57 | - name: Host 58 | value: 127.0.0.1 59 | initialDelaySeconds: 5 60 | timeoutSeconds: 2 61 | periodSeconds: 3 62 | failureThreshold: 10 63 | env: 64 | {{SECRETS}} 65 | imagePullSecrets: 66 | - name: gitlab-registry -------------------------------------------------------------------------------- /manifests/web/ingress-web.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: Ingress 4 | metadata: 5 | name: web-$CI_ENVIRONMENT_SLUG 6 | namespace: $KUBE_NAMESPACE 7 | labels: 8 | app: $CI_ENVIRONMENT_SLUG 9 | pipeline_id: "$CI_PIPELINE_ID" 10 | build_id: "$CI_JOB_ID" 11 | environment: $CI_ENVIRONMENT_SLUG 12 | annotations: 13 | kubernetes.io/tls-acme: 'true' 14 | kubernetes.io/ingress-class: 'nginx' 15 | spec: 16 | tls: 17 | - hosts: 18 | - $CI_ENVIRONMENT_HOSTNAME 19 | secretName: $CI_ENVIRONMENT_SLUG-tls 20 | rules: 21 | - host: $CI_ENVIRONMENT_HOSTNAME 22 | http: 23 | paths: 24 | - path: / 25 | backend: 26 | serviceName: web-$CI_ENVIRONMENT_SLUG 27 | servicePort: $PORT -------------------------------------------------------------------------------- /manifests/web/production/autoscale.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: autoscaling/v2beta1 3 | kind: HorizontalPodAutoscaler 4 | metadata: 5 | labels: 6 | app: $CI_ENVIRONMENT_SLUG 7 | environment: $CI_ENVIRONMENT_SLUG 8 | name: web-$CI_ENVIRONMENT_SLUG 9 | namespace: $KUBE_NAMESPACE 10 | spec: 11 | scaleTargetRef: 12 | apiVersion: apps/v1beta1 13 | kind: Deployment 14 | name: web-$CI_ENVIRONMENT_SLUG 15 | minReplicas: $SCALE_MIN 16 | maxReplicas: $SCALE_MAX 17 | metrics: 18 | - type: Resource 19 | resource: 20 | name: cpu 21 | targetAverageUtilization: $SCALE_CPU -------------------------------------------------------------------------------- /manifests/web/production/pdb.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy/v1beta1 2 | kind: PodDisruptionBudget 3 | metadata: 4 | name: web-$CI_ENVIRONMENT_SLUG 5 | namespace: $KUBE_NAMESPACE 6 | labels: 7 | app: $CI_ENVIRONMENT_SLUG 8 | environment: $CI_ENVIRONMENT_SLUG 9 | pipeline_id: "$CI_PIPELINE_ID" 10 | build_id: "$CI_JOB_ID" 11 | spec: 12 | minAvailable: $PDB_MIN 13 | selector: 14 | matchLabels: 15 | app: $CI_ENVIRONMENT_SLUG -------------------------------------------------------------------------------- /manifests/web/service-web.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: web-$CI_ENVIRONMENT_SLUG 6 | namespace: $KUBE_NAMESPACE 7 | labels: 8 | app: $CI_ENVIRONMENT_SLUG 9 | environment: $CI_ENVIRONMENT_SLUG 10 | spec: 11 | ports: 12 | - port: 80 13 | targetPort: $PORT 14 | protocol: TCP 15 | name: http 16 | selector: 17 | app: $CI_ENVIRONMENT_SLUG -------------------------------------------------------------------------------- /push: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SOURCE="${BASH_SOURCE[0]}" 4 | while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink 5 | DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" 6 | SOURCE="$(readlink "$SOURCE")" 7 | [[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located 8 | done 9 | 10 | export DEPLOY_ROOT_DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" 11 | 12 | source "$DEPLOY_ROOT_DIR/src/common.bash" 13 | 14 | if [[ -n "$CI_REGISTRY_USER" ]]; then 15 | echo "Logging to GitLab Container Registry with CI credentials..." 16 | docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" "$CI_REGISTRY" 17 | echo "" 18 | fi 19 | 20 | echo "Tagging application as latest..." 21 | 22 | if [ "x$1" != "x" ]; then 23 | echo "Tagging $CI_REGISTRY_IMAGE:$SHORT-$CI_REGISTRY_TAG as $CI_REGISTRY_IMAGE:$SHORT-latest" 24 | export SHORT=$(echo "$1" | cut -d'-' -f2-) 25 | docker pull $CI_REGISTRY_IMAGE:$SHORT-$CI_REGISTRY_TAG 26 | docker tag $CI_REGISTRY_IMAGE:$SHORT-$CI_REGISTRY_TAG $CI_REGISTRY_IMAGE:$SHORT-latest 27 | echo "Pushing to GitLab Container Registry..." 28 | docker push $CI_REGISTRY_IMAGE:$SHORT-latest 29 | else 30 | echo "Tagging $CI_REGISTRY_IMAGE:$CI_REGISTRY_TAG as $CI_REGISTRY_IMAGE:latest" 31 | docker pull $CI_REGISTRY_IMAGE:$CI_REGISTRY_TAG 32 | docker tag $CI_REGISTRY_IMAGE:$CI_REGISTRY_TAG $CI_REGISTRY_IMAGE:latest 33 | echo "Pushing to GitLab Container Registry..." 34 | docker push "$CI_REGISTRY_IMAGE:latest" 35 | fi 36 | -------------------------------------------------------------------------------- /src/common.bash: -------------------------------------------------------------------------------- 1 | set -eo pipefail 2 | 3 | [[ "$TRACE" ]] && set -x 4 | 5 | export CI_CONTAINER_NAME="ci_job_build_$CI_JOB_ID" 6 | export CI_REGISTRY_TAG="$CI_COMMIT_SHA" 7 | 8 | if [[ "$CI_JOB_STAGE" == "review" ]]; then 9 | export STAGE="$CI_ENVIRONMENT_SLUG" 10 | else 11 | export STAGE="$CI_JOB_STAGE" 12 | fi 13 | 14 | if [[ "$CI_JOB_STAGE" == "canary" ]]; then 15 | export STAGE="production" 16 | export CI_JOB_STAGE="production" 17 | export ISCANARY="true" 18 | export REAL_JOB_STAGE="canary" 19 | echo "We are in a canary deploy, behaving like production but looking for track:" 20 | else 21 | export ISCANARY="false" 22 | export REAL_JOB_STAGE=$CI_JOB_STAGE 23 | fi 24 | 25 | ensure_deploy_variables() { 26 | if [[ -z "$KUBE_URL" ]]; then 27 | echo "ERROR: Missing KUBE_URL. Make sure to configure the Kubernetes Cluster in Operations->Kubernetes" 28 | exit 1 29 | fi 30 | if [[ -z "$KUBE_TOKEN" ]]; then 31 | echo "ERROR: Missing KUBE_TOKEN. Make sure to configure the Kubernetes Cluster in Operations->Kubernetes" 32 | exit 1 33 | fi 34 | if [[ -z "$KUBE_NAMESPACE" ]]; then 35 | echo "ERROR: Missing KUBE_NAMESPACE. Make sure to configure the Kubernetes Cluster in Operations->Kubernetes" 36 | exit 1 37 | fi 38 | if [[ -z "$CI_ENVIRONMENT_SLUG" ]]; then 39 | echo "ERROR: Missing CI_ENVIRONMENT_SLUG. Make sure to configure the Kubernetes Cluster in Operations->Kubernetes" 40 | exit 1 41 | fi 42 | if [[ -z "$CI_ENVIRONMENT_URL" ]]; then 43 | echo "ERROR: Missing CI_ENVIRONMENT_URL. Make sure to configure the Kubernetes Cluster in Operations->Kubernetes" 44 | exit 1 45 | fi 46 | if [[ -z "$CI_DEPLOY_USER" ]]; then 47 | echo "ERROR: Missing CI_DEPLOY_USER. Create a deploy token at Settings->Repository->Deploy Tokens and make one named gitlab-deploy-token with read_registry access." 48 | exit 1 49 | fi 50 | if [[ -z "$CI_DEPLOY_PASSWORD" ]]; then 51 | echo "ERROR: Missing CI_DEPLOY_PASSWORD. Create a deploy token at Settings->Repository->Deploy Tokens and make one named gitlab-deploy-token with read_registry access." 52 | exit 1 53 | fi 54 | } 55 | 56 | ping_kube() { 57 | if kubectl version > /dev/null; then 58 | echo "Kubernetes is online!" 59 | echo "" 60 | else 61 | echo "Cannot connect to Kubernetes." 62 | return 1 63 | fi 64 | } 65 | 66 | buildargs_from() { 67 | if [[ -n "$BUILDARGS_FROM" ]]; then 68 | echo "BUILDARGS_FROM is set to $BUILDARGS_FROM, starting clone secret operation." 69 | echo "Turning on the KDH_INSERT_ARGS flag" 70 | export KDH_INSERT_ARGS=true 71 | if env | grep -i -e '^SECRET_' > /dev/null; then 72 | IFS=$'\n' 73 | SECRETS=$(env | grep -i -e '^SECRET_') 74 | for i in $SECRETS; do 75 | fullkey=$(echo $i | cut -d'=' -f1) 76 | stripped=$(echo $i | cut -d'_' -f2-) 77 | key=$(echo $stripped | cut -d'=' -f1) 78 | value=$(echo -n "${!fullkey}") 79 | echo "Exporting $key as BUILDARG_$key" 80 | export BUILDARG_$stripped 81 | done 82 | fi 83 | if env | grep -i -e "^${BUILDARGS_FROM}_" > /dev/null; then 84 | IFS=$'\n' 85 | STAGE_SECRETS=$(env | grep -i -e "^${BUILDARGS_FROM}_") 86 | for i in $STAGE_SECRETS; do 87 | fullkey=$(echo $i | cut -d'=' -f1) 88 | stripped=$(echo $i | cut -d'_' -f2-) 89 | key=$(echo $stripped | cut -d'=' -f1) 90 | value=$(echo -n "${!fullkey}") 91 | echo "Exporting $key as BUILDARG_$key" 92 | export BUILDARG_$stripped 93 | done 94 | fi 95 | fi 96 | } 97 | 98 | insert_args() { 99 | if [[ -n "$KDH_INSERT_ARGS" ]]; then 100 | echo "KDH_INSERT_ARGS is turned on, so we're going to re-write your Dockerfile and insert ARG commands for every BUILDARG" 101 | IFS=$'\n' 102 | ALL_VARIABLES=$(env | grep -i -e '^BUILDARG_') 103 | for i in $ALL_VARIABLES; do 104 | stripped=$(echo $i | cut -d'_' -f2-) 105 | key=$(echo $stripped | cut -d'=' -f1) 106 | echo "Inserting ARG $key into Dockerfile below the FROM" 107 | sed -i -e "/^FROM/a ARG $key" $DOCKERFILE 108 | done 109 | echo "Dockerfile manipulation complete. Now it looks like:" 110 | echo 111 | cat $DOCKERFILE 112 | echo 113 | fi 114 | } 115 | 116 | set_defaults() { 117 | 118 | if [[ -v SCALE_REPLICAS ]]; then 119 | export SCALE_MIN=$SCALE_REPLICAS 120 | export SCALE_MAX=$SCALE_REPLICAS 121 | fi 122 | 123 | if [[ ! -v SCALE_MIN ]]; then 124 | export SCALE_MIN=2 125 | fi 126 | 127 | if [[ ! -v SCALE_MAX ]]; then 128 | export SCALE_MAX=4 129 | fi 130 | 131 | if [[ ! -v SCALE_CPU ]]; then 132 | export SCALE_CPU=60 133 | fi 134 | 135 | if [[ ! -v PDB_MIN ]]; then 136 | export PDB_MIN="50%" 137 | fi 138 | 139 | if [[ ! -v PORT ]]; then 140 | export PORT=5000 141 | fi 142 | 143 | if [[ ! -v PROBE_URL ]]; then 144 | export PROBE_URL="/" 145 | fi 146 | 147 | if [[ ! -v LIMIT_CPU ]]; then 148 | export LIMIT_CPU="1" 149 | fi 150 | 151 | if [[ ! -v LIMIT_MEMORY ]]; then 152 | export LIMIT_MEMORY="512Mi" 153 | fi 154 | 155 | if [[ ! -v LIVENESS_PROBE ]]; then 156 | export LIVENESS_PROBE="/bin/true" 157 | fi 158 | } 159 | 160 | set_prefix_defaults() { 161 | memory=${1}_LIMIT_MEMORY 162 | cpu=${1}_LIMIT_CPU 163 | liveness=${1}_LIVENESS_PROBE 164 | replicas=${1}_REPLICAS 165 | min_replicas=${1}_SCALE_MIN 166 | max_replicas=${1}_SCALE_MAX 167 | scale_cpu=${1}_SCALE_CPU 168 | 169 | if [[ -v ${replicas} ]]; then 170 | export ${min_replicas}=${!replicas} 171 | export ${max_replicas}=${!replicas} 172 | fi 173 | 174 | if [[ ! -v ${min_replicas} ]]; then 175 | export ${min_replicas}="1" 176 | fi 177 | 178 | if [[ ! -v ${max_replicas} ]]; then 179 | export ${max_replicas}="1" 180 | fi 181 | 182 | if [[ ! -v ${scale_cpu} ]]; then 183 | export ${scale_cpu}="60" 184 | fi 185 | 186 | if [[ ! -v ${memory} ]]; then 187 | export ${memory}="512Mi" 188 | fi 189 | 190 | if [[ ! -v ${cpu} ]]; then 191 | export ${cpu}="1" 192 | fi 193 | 194 | if [[ ! -v ${liveness} ]]; then 195 | export ${liveness}="/bin/true" 196 | fi 197 | } 198 | 199 | set_buildargs() { 200 | IFS=$'\n' 201 | if env | grep -i -e '^BUILDARG_' > /dev/null; then 202 | ALL_VARIABLES=$(env | grep -i -e '^BUILDARG_') 203 | for i in $ALL_VARIABLES; do 204 | fullkey=$(echo $i | cut -d'=' -f1) 205 | stripped=$(echo $i | cut -d'_' -f2-) 206 | key=$(echo $stripped | cut -d'=' -f1) 207 | value=$(echo -n "${!fullkey}") 208 | buildargs="${buildargs}--build-arg $key='$value' " 209 | done 210 | export buildargs=$buildargs 211 | fi 212 | } 213 | 214 | build_env() { 215 | IFS=$'\n' 216 | echo "Removing .env file" 217 | rm $CI_PROJECT_DIR/.env &> /dev/null || true 218 | if env | grep -i -e '^BUILDARG_' > /dev/null; then 219 | ALL_VARIABLES=$(env | grep -i -e '^BUILDARG_') 220 | for i in $ALL_VARIABLES; do 221 | fullkey=$(echo $i | cut -d'=' -f1) 222 | stripped=$(echo $i | cut -d'_' -f2-) 223 | key=$(echo $stripped | cut -d'=' -f1) 224 | value=$(echo -n "${!fullkey}") 225 | echo "$key=$value" >> $CI_PROJECT_DIR/.env 226 | done 227 | fi 228 | } 229 | 230 | setup_docker() { 231 | if ! docker info &>/dev/null; then 232 | if [ -z "$DOCKER_HOST" -a "$KUBERNETES_PORT" ]; then 233 | export DOCKER_HOST='tcp://localhost:2375' 234 | fi 235 | fi 236 | } 237 | 238 | get_secrets_for_creation() { 239 | if env | grep -i -e '^SECRET_' > /dev/null; then 240 | IFS=$'\n' 241 | SECRETS=$(env | grep -i -e '^SECRET_') 242 | for i in $SECRETS; do 243 | fullkey=$(echo $i | cut -d'=' -f1) 244 | stripped=$(echo $i | cut -d'_' -f2-) 245 | key=$(echo $stripped | cut -d'=' -f1) 246 | value=$(echo -n "${!fullkey}" | base64 -w 0) 247 | echo " $key: $value" 248 | done 249 | fi 250 | 251 | if env | grep -i -e "^$CI_JOB_STAGE"_ > /dev/null; then 252 | IFS=$'\n' 253 | STAGE_SECRETS=$(env | grep -i -e "^$CI_JOB_STAGE") 254 | for i in $STAGE_SECRETS; do 255 | fullkey=$(echo $i | cut -d'=' -f1) 256 | stripped=$(echo $i | cut -d'_' -f2-) 257 | key=$(echo $stripped | cut -d'=' -f1) 258 | value=$(echo -n "${!fullkey}" | base64 -w 0) 259 | echo " $key: $value" 260 | done 261 | fi 262 | } 263 | 264 | get_secrets_for_usage() { 265 | if env | grep -i -e '^SECRET_' > /dev/null; then 266 | IFS=$'\n' 267 | SECRETS=$(env | grep -i -e '^SECRET_') 268 | for i in $SECRETS; do 269 | fullkey=$(echo $i | cut -d'=' -f1) 270 | stripped=$(echo $i | cut -d'_' -f2-) 271 | key=$(echo $stripped | cut -d'=' -f1) 272 | value=$(echo -n "${!fullkey}" | base64 -w 0) 273 | echo "- name: $key" >> /tmp/secrets.yaml 274 | echo " valueFrom:" >> /tmp/secrets.yaml 275 | echo " secretKeyRef:" >> /tmp/secrets.yaml 276 | echo " name: $KUBE_NAMESPACE-secrets-$STAGE" >> /tmp/secrets.yaml 277 | echo " key: $key" >> /tmp/secrets.yaml 278 | done 279 | fi 280 | 281 | if env | grep -i -e "^$CI_JOB_STAGE"_ > /dev/null; then 282 | IFS=$'\n' 283 | STAGE_SECRETS=$(env | grep -i -e "^$CI_JOB_STAGE") 284 | for i in $STAGE_SECRETS; do 285 | fullkey=$(echo $i | cut -d'=' -f1) 286 | stripped=$(echo $i | cut -d'_' -f2-) 287 | key=$(echo $stripped | cut -d'=' -f1) 288 | value=$(echo -n "${!fullkey}" | base64 -w 0) 289 | echo "- name: $key" >> /tmp/secrets.yaml 290 | echo " valueFrom:" >> /tmp/secrets.yaml 291 | echo " secretKeyRef:" >> /tmp/secrets.yaml 292 | echo " name: $KUBE_NAMESPACE-secrets-$STAGE" >> /tmp/secrets.yaml 293 | echo " key: $key" >> /tmp/secrets.yaml 294 | done 295 | fi 296 | } 297 | 298 | get_deploy_events() { 299 | if env | grep -i -e '^NEWRELIC_' > /dev/null; then 300 | NEWRELIC=$(env | grep -i -e '^NEWRELIC_') 301 | for i in $NEWRELIC; do 302 | if echo "$i" | grep -i -e "API_KEY" > /dev/null; then 303 | export NEWRELIC_API_KEY=$(echo $i | cut -d'=' -f2) 304 | fi 305 | if echo "$i" | grep -i -e "$CI_JOB_STAGE" | grep -i -e "APP_ID" > /dev/null; then 306 | export NEWRELIC_APP_ID=$(echo $i | cut -d'=' -f2) 307 | fi 308 | done 309 | fi 310 | 311 | if env | grep -i -e '^SLACK' > /dev/null; then 312 | SLACK=$(env | grep -i -e '^SLACK') 313 | for i in $SLACK; do 314 | if echo "$i" | grep -i -e "$CI_JOB_STAGE" | grep -i -e "WEBHOOK" > /dev/null; then 315 | export SLACK_WEBHOOK_URL=$(echo $i | cut -d'=' -f2) 316 | elif echo "$i" | grep -i -e "^SLACK_WEBHOOK" > /dev/null; then 317 | export SLACK_WEBHOOK_URL=$(echo $i | cut -d'=' -f2) 318 | fi 319 | done 320 | fi 321 | 322 | if env | grep -i -e '^TEAMS' > /dev/null; then 323 | TEAMS=$(env | grep -i -e '^TEAMS') 324 | for i in $TEAMS; do 325 | if echo "$i" | grep -i -e "$CI_JOB_STAGE" | grep -i -e "WEBHOOK" > /dev/null; then 326 | export TEAMS_WEBHOOK_URL=$(echo $i | cut -d'=' -f2) 327 | elif echo "$i" | grep -i -e "^TEAMS_WEBHOOK" > /dev/null; then 328 | export TEAMS_WEBHOOK_URL=$(echo $i | cut -d'=' -f2) 329 | fi 330 | done 331 | fi 332 | 333 | if env | grep -i -e '^INSTANA' > /dev/null; then 334 | INSTANA=$(env | grep -i -e '^INSTANA') 335 | for i in $INSTANA; do 336 | if echo "$i" | grep -i -e "$CI_JOB_STAGE" | grep -i -e "API_TOKEN" > /dev/null; then 337 | export INSTANA_API_TOKEN=$(echo $i | cut -d'=' -f2) 338 | elif echo "$i" | grep -i -e "^INSTANA_API_TOKEN" > /dev/null; then 339 | export INSTANA_API_TOKEN=$(echo $i | cut -d'=' -f2) 340 | fi 341 | done 342 | fi 343 | } --------------------------------------------------------------------------------