├── CONTRIBUTING.md ├── Gopkg.lock ├── Gopkg.toml ├── LICENSE ├── OWNERS ├── OWNERS_ALIASES ├── README-old.md ├── README.md ├── bazel ├── README.md └── bazel.yaml ├── buildah ├── Dockerfile ├── README.md └── buildah.yaml ├── buildkit ├── 0-buildkitd.yaml ├── 1-buildtemplate.yaml └── README.md ├── buildpacks ├── README-CF.md ├── README.md ├── cf.yaml └── cnb.yaml ├── ecr_helper ├── README.md └── helper.sh ├── gcr_helper ├── README.md └── helper.sh ├── hack └── update-deps.sh ├── jib ├── README.md ├── jib-gradle.yaml └── jib-maven.yaml ├── kaniko ├── README.md └── kaniko.yaml ├── makisu ├── README.md ├── makisu.yaml └── registry.yaml ├── test ├── OWNERS ├── README.md ├── build-cf.yaml ├── build-cnb.yaml ├── e2e-tests.sh └── presubmit-tests.sh └── vendor └── knative.dev └── test-infra ├── LICENSE └── scripts ├── README.md ├── dummy.go ├── e2e-tests.sh ├── library.sh ├── markdown-link-check-config.rc ├── markdown-lint-config.rc ├── presubmit-tests.sh └── release.sh /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | Welcome, we would love to accept your patches and contributions to this project. There are just a few steps to follow: 4 | 5 | ## Sign the Contributor License Agreement (CLA). 6 | 7 | Before you can contribute, you will need to sign the Contributor License Agreement (CLA). 8 | 9 | Contributions to this project must be accompanied by a Contributor License 10 | Agreement. You (or your employer) retain the copyright to your contribution, 11 | this simply gives us permission to use and redistribute your contributions as 12 | part of the project. Head over to [https://cla.developers.google.com/](https://cla.developers.google.com/) to see 13 | your current agreements on file or to sign a new one. 14 | 15 | You generally only need to submit a CLA once, so if you've already submitted one 16 | (even if it was for a different project), you probably don't need to do it 17 | again. 18 | 19 | ## Code of Conduct 20 | 21 | Please make sure to read and observe our [Code of Conduct](https://github.com/knative/serving/blob/master/code-of-conduct.md). 22 | 23 | ## Setting up your development environment 24 | 25 | If you haven’t set up your environment, set it up like you would for [Knative Build](https://github.com/knative/build/blob/master/DEVELOPMENT.md). 26 | 27 | ## Patch submission and review 28 | 29 | All submissions, including submissions by project members, require review. We 30 | use GitHub pull requests for this purpose. Consult [GitHub Help] for more 31 | information on using pull requests. 32 | 33 | [GitHub Help]: https://help.github.com/articles/about-pull-requests/ 34 | -------------------------------------------------------------------------------- /Gopkg.lock: -------------------------------------------------------------------------------- 1 | # This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. 2 | 3 | 4 | [[projects]] 5 | branch = "master" 6 | digest = "1:28ab14d6b5cbebc76f615694f1381dc04f530ffa1fda3f5148059adc90667bfd" 7 | name = "knative.dev/test-infra" 8 | packages = ["scripts"] 9 | pruneopts = "UT" 10 | revision = "17f2331e80ad0d3e170ea2bae45c3922744f83af" 11 | 12 | [solve-meta] 13 | analyzer-name = "dep" 14 | analyzer-version = 1 15 | input-imports = ["knative.dev/test-infra/scripts"] 16 | solver-name = "gps-cdcl" 17 | solver-version = 1 18 | -------------------------------------------------------------------------------- /Gopkg.toml: -------------------------------------------------------------------------------- 1 | # Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md 2 | # for detailed Gopkg.toml documentation. 3 | 4 | required = [ 5 | "knative.dev/test-infra/scripts", 6 | ] 7 | 8 | [prune] 9 | go-tests = true 10 | unused-packages = true 11 | non-go = true 12 | 13 | [[prune.project]] 14 | name = "knative.dev/test-infra" 15 | non-go = false 16 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /OWNERS: -------------------------------------------------------------------------------- 1 | # The OWNERS file is used by prow to automatically merge approved PRs. 2 | 3 | approvers: 4 | - build-templates-approvers 5 | -------------------------------------------------------------------------------- /OWNERS_ALIASES: -------------------------------------------------------------------------------- 1 | aliases: 2 | build-templates-approvers: 3 | - imjasonh 4 | - mattmoor 5 | 6 | productivity-approvers: 7 | - adrcunha 8 | - chaodaiG 9 | - srinivashegde86 10 | productivity-reviewers: 11 | - adrcunha 12 | - chaodaiG 13 | - coryrc 14 | - dushyanthsc 15 | - Fredy-Z 16 | - srinivashegde86 17 | - steuhs 18 | - yt3liu 19 | -------------------------------------------------------------------------------- /README-old.md: -------------------------------------------------------------------------------- 1 | # Knative Build Templates 2 | 3 | :rotating_light: **NOTE: Knative Build is 4 | [deprecated](https://github.com/knative/build/issues/614) in favor of [Tekton 5 | Pipelines](https://github.com/tektoncd/pipeline). There are no plans to 6 | produce future releases of this component.** :rotating_light: 7 | 8 | **This version of README.md is provided for historical purposes.** 9 | 10 | This repository contains a library of 11 | `BuildTemplate` [resources](https://github.com/knative/docs/blob/master/build/build-templates.md) which are designed to be reusable by many applications. 12 | 13 | Each build template is in a separate directory along with a README.md and a Kubernetes manifest, so you can choose which build templates to install on your cluster. 14 | 15 | ## Build Templates Kinds 16 | 17 | There are two kinds of build templates: 18 | 19 | 1. `ClusterBuildTemplates` with a Cluster scope 20 | 1. `BuildTemplates` with a Namespace scope 21 | 22 | A default kind of `BuildTemplate` is used if the field `kind` is not set. 23 | 24 | ## Using Build Templates 25 | 26 | First, install a build template onto your cluster: 27 | 28 | ``` 29 | $ kubectl apply -f buildpack.yaml 30 | buildtemplate "buildpack" created 31 | ``` 32 | 33 | You can see which build templates are installed using `kubectl` as well: 34 | 35 | ``` 36 | $ kubectl get buildtemplates 37 | NAME AGE 38 | buildpack 3s 39 | ``` 40 | 41 | OR 42 | 43 | ``` 44 | $ kubectl get clusterbuildtemplates 45 | NAME AGE 46 | buildpack 9s 47 | ``` 48 | 49 | With the build template installed, you can define a build that uses that 50 | template, being sure to provide values for required parameters: 51 | 52 | ``` 53 | apiVersion: build.knative.dev/v1alpha1 54 | kind: Build 55 | metadata: 56 | name: buildpack-build 57 | spec: 58 | source: 59 | git: 60 | url: https://github.com/my-user/my-repo 61 | revision: master 62 | template: 63 | name: buildpack 64 | kind: BuildTemplate # (or ClusterBuildTemplate) 65 | arguments: 66 | - name: IMAGE 67 | value: us.gcr.io/my-project/my-app 68 | ``` 69 | 70 | Next, create the build you defined: 71 | 72 | ``` 73 | $ kubectl apply -f build.yaml 74 | build "buildpack-build" created 75 | ``` 76 | 77 | You can check the status of the build using `kubectl`: 78 | 79 | ``` 80 | kubectl get build buildpack-build -o yaml 81 | ``` 82 | 83 | ## Contributing and Support 84 | 85 | If you want to contribute to this repository, please see our [contributing](./CONTRIBUTING.md) guidelines. 86 | 87 | If you are looking for support, enter an [issue](https://github.com/knative/build-templates/issues/new) or join our [Slack workspace](https://knative.slack.com/) 88 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Knative Build Templates 2 | 3 | :rotating_light: **NOTE: Knative Build is 4 | [deprecated](https://github.com/knative/build/issues/614) in favor of [Tekton 5 | Pipelines](https://github.com/tektoncd/pipeline). There are no plans to 6 | produce future releases of this component.** :rotating_light: 7 | 8 | The original README can be found [here](./README-old.md) for historical 9 | purposes. 10 | -------------------------------------------------------------------------------- /bazel/README.md: -------------------------------------------------------------------------------- 1 | ## Bazel 2 | 3 | This build template builds source into a container image using the [Bazel build 4 | tool](https://bazel.build), and [Bazel's container image 5 | support](https://github.com/bazelbuild/rules_docker). 6 | 7 | This assumes the source repo in question is using the 8 | [`container_push`](https://github.com/bazelbuild/rules_docker/#container_push-1) 9 | rule to build and push a container image. For example: 10 | 11 | ``` 12 | container_push( 13 | name = "push", 14 | format = "Docker", # Or "OCI" 15 | image = ":image", 16 | registry = "gcr.io", 17 | repository = "my-project/my-app", 18 | stamp = True, 19 | ) 20 | ``` 21 | 22 | This target instructs Bazel to build and push a container image containing the 23 | application defined by the `:image` target, based on a suitable base image. 24 | 25 | The `rules_docker` repo defines build rules to construct images for a variety of 26 | popular programming languages, like 27 | [Python](https://github.com/bazelbuild/rules_), 28 | [Java](https://github.com/bazelbuild/rules_docker/#java_image), 29 | [Go](https://github.com/bazelbuild/rules_docker/#go_image) and many more. 30 | 31 | ## Create the template 32 | 33 | ``` 34 | kubectl apply -f https://raw.githubusercontent.com/knative/build-templates/master/bazel/bazel.yaml 35 | ``` 36 | 37 | ## Parameters 38 | 39 | * **TARGET**: The Bazel `container_push` target to run. 40 | 41 | ## Usage 42 | 43 | ``` 44 | apiVersion: build.knative.dev/v1alpha1 45 | kind: Build 46 | metadata: 47 | name: bazel-build 48 | spec: 49 | source: 50 | git: 51 | url: https://github.com/my-user/my-repo 52 | revision: master 53 | template: 54 | name: bazel 55 | arguments: 56 | - name: TARGET 57 | value: //path/to/build:target 58 | ``` 59 | -------------------------------------------------------------------------------- /bazel/bazel.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: build.knative.dev/v1alpha1 2 | kind: BuildTemplate 3 | metadata: 4 | name: bazel 5 | spec: 6 | parameters: 7 | - name: TARGET 8 | description: The name of the Bazel "container_push" target to run 9 | 10 | steps: 11 | - name: build-and-push 12 | image: gcr.io/cloud-builders/bazel 13 | args: ['run', '${TARGET}'] 14 | -------------------------------------------------------------------------------- /buildah/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM docker.io/centos:7 as base 2 | RUN \ 3 | yum -y install epel-release && \ 4 | # Install buildah dependencies. 5 | yum -y install \ 6 | make \ 7 | golang \ 8 | bats \ 9 | btrfs-progs-devel \ 10 | device-mapper-devel \ 11 | glib2-devel \ 12 | gpgme-devel \ 13 | libassuan-devel \ 14 | libseccomp-devel \ 15 | ostree-devel \ 16 | git \ 17 | bzip2 18 | 19 | FROM base as runc 20 | ARG RUNC_REVISION="master" 21 | RUN yum -y install runc 22 | RUN mkdir ~/runc && \ 23 | cd ~/runc && \ 24 | export GOPATH=`pwd` && \ 25 | git clone https://github.com/opencontainers/runc ./src/github.com/opencontainers/runc && \ 26 | cd $GOPATH/src/github.com/opencontainers/runc && \ 27 | git checkout "${RUNC_REVISION}" && \ 28 | git log -1 --oneline > /.version.runc && \ 29 | make runc && \ 30 | mv runc /usr/bin/runc 31 | 32 | FROM base as buildah 33 | ARG BUILDAH_REVISION="master" 34 | RUN yum -y install \ 35 | go-md2man \ 36 | runc \ 37 | skopeo-containers 38 | RUN mkdir ~/buildah && \ 39 | cd ~/buildah && \ 40 | export GOPATH=`pwd` && \ 41 | cd $GOPATH/ && \ 42 | git clone https://github.com/containers/buildah ./src/github.com/containers/buildah && \ 43 | cd $GOPATH/src/github.com/containers/buildah && \ 44 | git checkout "${BUILDAH_REVISION}" && \ 45 | make && \ 46 | make install 47 | 48 | FROM docker.io/centos:7 49 | RUN yum -y install libarchive ostree lzo libseccomp libedit gpgme && \ 50 | yum update -y && \ 51 | yum clean all && \ 52 | rm -rf \ 53 | /var/cache/yum \ 54 | /usr/share/doc \ 55 | /usr/share/man \ 56 | /usr/share/info \ 57 | /usr/share/locale \ 58 | /var/log/* 59 | COPY --from=runc /usr/bin/runc /usr/bin/runc 60 | COPY --from=runc /.version.runc /.version.runc 61 | COPY --from=buildah /usr/local/bin/buildah /usr/bin/buildah 62 | COPY --from=buildah /etc/containers /etc/containers 63 | ENV BUILDAH_ISOLATION chroot 64 | ENV STORAGE_DRIVER vfs 65 | ENTRYPOINT ["buildah"] 66 | -------------------------------------------------------------------------------- /buildah/README.md: -------------------------------------------------------------------------------- 1 | # Buildah 2 | 3 | This build template builds source into a container image using Project Atomic's 4 | [Buildah](https://github.com/projectatomic/buildah) build tool. 5 | 6 | This build template uses Buildah's support for building from 7 | [`Dockerfile`](https://docs.docker.com/engine/reference/builder/)s, using its 8 | `buildah bud` command. This command executes the directives in the `Dockerfile` 9 | to assemble a container image, then pushes that image to a container registry. 10 | 11 | ## Parameters 12 | 13 | * **BUILDER_IMAGE:**: The name of the image containing the Buildah tool. See 14 | note below. 15 | (_required_) 16 | * **IMAGE**: The Docker image name to apply to the newly built image. 17 | (_required_) 18 | * **DOCKERFILE**: The path to the `Dockerfile` to execute (_default:_ 19 | `./Dockerfile`) 20 | * **TLSVERIFY**: Verify the TLS on the registry endpoint (for push/pull to a 21 | non-TLS registry) (_default:_ `true`) 22 | 23 | ## Usage 24 | 25 | ``` 26 | apiVersion: build.knative.dev/v1alpha1 27 | kind: Build 28 | metadata: 29 | name: buildah-build-my-repo 30 | spec: 31 | source: 32 | git: 33 | url: https://github.com/my-user/my-repo 34 | revision: master 35 | template: 36 | name: buildah 37 | arguments: 38 | - name: BUILDER_IMAGE 39 | value: gcr.io/my-project/buildah 40 | - name: IMAGE 41 | value: gcr.io/my-project/my-app 42 | ``` 43 | 44 | In this example, the Git repo being built is expected to have a `Dockerfile` at 45 | the root of the repository. 46 | 47 | ## Note: BUILDER_IMAGE 48 | 49 | Currently, you must build and host the builder image yourself. This is expected 50 | to change in the future. You can build the image from [the Dockerfile in this 51 | directory](./Dockerfile), e.g.: 52 | 53 | ``` 54 | docker build -t /buildah . && docker push /buildah 55 | ``` 56 | 57 | You need a relatively recent version of Docker (>= 17.05). 58 | You could also build the image using `buildah` itself, or `kaniko` 59 | -------------------------------------------------------------------------------- /buildah/buildah.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: build.knative.dev/v1alpha1 2 | kind: BuildTemplate 3 | metadata: 4 | name: buildah 5 | spec: 6 | parameters: 7 | - name: BUILDER_IMAGE 8 | description: The location of the buildah builder image. 9 | - name: IMAGE 10 | description: The name of the image to push. 11 | - name: DOCKERFILE 12 | description: Path to the Dockerfile to build. 13 | default: ./Dockerfile 14 | - name: TLSVERIFY 15 | description: Verify the TLS on the registry endpoint (for push/pull to a non-TLS registry) 16 | default: "true" 17 | 18 | steps: 19 | - name: build 20 | image: ${BUILDER_IMAGE} 21 | args: ['bud', '--tls-verify=${TLSVERIFY}', '--layers', '-f', '${DOCKERFILE}', '-t', '${IMAGE}', '.'] 22 | volumeMounts: 23 | - name: varlibcontainers 24 | mountPath: /var/lib/containers 25 | 26 | - name: push 27 | image: ${BUILDER_IMAGE} 28 | args: ['push', '--tls-verify=${TLSVERIFY}', '${IMAGE}', 'docker://${IMAGE}'] 29 | volumeMounts: 30 | - name: varlibcontainers 31 | mountPath: /var/lib/containers 32 | 33 | volumes: 34 | - name: varlibcontainers 35 | emptyDir: {} 36 | -------------------------------------------------------------------------------- /buildkit/0-buildkitd.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app: buildkitd 6 | name: buildkitd 7 | spec: 8 | selector: 9 | matchLabels: 10 | app: buildkitd 11 | template: 12 | metadata: 13 | labels: 14 | app: buildkitd 15 | annotations: 16 | container.apparmor.security.beta.kubernetes.io/buildkitd: unconfined 17 | container.seccomp.security.alpha.kubernetes.io/buildkitd: unconfined 18 | spec: 19 | containers: 20 | # moby/buildkit:vX.Y.Z-rootless is executed as an unprivileged user (UID=1000). 21 | # If you are using Debian (not Ubuntu) or Arch Linux kernel on each of kubelet nodes, 22 | # `sudo sh -c "echo 1 > /proc/sys/kernel/unprivileged_userns_clone"` is required. 23 | # 24 | # Note that rootless mode does not support OverlayFS snapshotter unless using Ubuntu kernel. 25 | # However, reflink-based Copy-on-Write can be enabled by mounting XFS volume on /home/user/.local/share/buildkit . 26 | # The XFS volume needs to be formatted with `mkfs.xfs -m reflink=1`. 27 | # 28 | # Also note that rootless mode is known not to work on GCOS kernel: https://github.com/moby/buildkit/issues/879 29 | # 30 | # You can also use rootful moby/buildkit:vX.Y.Z instead. 31 | - image: moby/buildkit:v0.4.0-rootless@sha256:3877d091e65429f59919ed5591aaeb863b1889a5314bdfdba5ff9c0dfb2f3ed0 32 | args: 33 | - --addr 34 | - tcp://0.0.0.0:1234 35 | # Disable PID namespace isolation across BuildKit daemon and build containers. 36 | # To enable PID namespace isolation, you need to remove --oci-worker-no-process-sandbox and 37 | # set securityContext.procMount to "Unmasked" (or set securityContext.privileged to true). 38 | - --oci-worker-no-process-sandbox 39 | name: buildkitd 40 | ports: 41 | - containerPort: 1234 42 | --- 43 | apiVersion: v1 44 | kind: Service 45 | metadata: 46 | labels: 47 | app: buildkitd 48 | name: buildkitd 49 | spec: 50 | ports: 51 | - port: 1234 52 | protocol: TCP 53 | selector: 54 | app: buildkitd 55 | -------------------------------------------------------------------------------- /buildkit/1-buildtemplate.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: build.knative.dev/v1alpha1 2 | kind: BuildTemplate 3 | metadata: 4 | name: buildkit 5 | spec: 6 | parameters: 7 | - name: IMAGE 8 | description: Where to publish the resulting image 9 | - name: DOCKERFILE 10 | description: The name of the Dockerfile 11 | default: "Dockerfile" 12 | - name: PUSH 13 | description: Whether push or not 14 | default: "true" 15 | - name: DIRECTORY 16 | description: The directory containing the app 17 | default: "/workspace" 18 | - name: BUILDKIT_CLIENT_IMAGE 19 | description: The name of the BuildKit client (buildctl) image 20 | default: "moby/buildkit:v0.4.0-rootless@sha256:3877d091e65429f59919ed5591aaeb863b1889a5314bdfdba5ff9c0dfb2f3ed0" 21 | - name: BUILDKIT_DAEMON_ADDRESS 22 | description: The address of the BuildKit daemon (buildkitd) service 23 | default: "tcp://buildkitd:1234" 24 | steps: 25 | - name: build-and-push 26 | image: ${BUILDKIT_CLIENT_IMAGE} 27 | workingDir: ${DIRECTORY} 28 | command: ["buildctl", "--addr=${BUILDKIT_DAEMON_ADDRESS}", "build", 29 | "--progress=plain", 30 | "--frontend=dockerfile.v0", 31 | "--opt", "filename=${DOCKERFILE}", 32 | "--local", "context=.", "--local", "dockerfile=.", 33 | "--output", "type=image,name=${IMAGE},push=${PUSH}"] 34 | # 35 | # To enable distributed cache, add 36 | # --export-cache type=inline --import-cache type=registry,ref=${IMAGE} 37 | # or 38 | # --export-cache type=local,dest=/your/nfs/volume --import-cache type=local,src=/your/nfs/volume 39 | -------------------------------------------------------------------------------- /buildkit/README.md: -------------------------------------------------------------------------------- 1 | # BuildKit 2 | 3 | This build template builds source into a container image using [Moby BuildKit](https://github.com/moby/buildkit). 4 | 5 | [Rootless mode](https://github.com/moby/buildkit/blob/v0.3.1/docs/rootless.md) is used by default. 6 | 7 | ## Parameters 8 | 9 | * **IMAGE**: The Docker image name to apply to the newly built image. 10 | (_required_) 11 | * **DOCKERFILE**: The path to the `Dockerfile` to execute (_default:_ 12 | `./Dockerfile`) 13 | * **PUSH**: Whether to push or not (_default:_`true`) 14 | * **DIRECTORY**: Workspace directory (_default:_`/workspace`) 15 | * **BUILDKIT_CLIENT_IMAGE**: BuildKit client image (_default:_`moby/buildkit:vX.Y.Z-rootless@sha256:...`) 16 | * **BUILDKIT_DAEMON_ADDRESS**: BuildKit daemon address (_default:_`tcp://buildkitd:1234`) 17 | 18 | ## Set up 19 | 20 | ### Step 0: Deploy BuildKit daemon 21 | 22 | First, you need to deploy BuildKit daemon as follows: 23 | 24 | ```console 25 | kubectl apply -f 0-buildkitd.yaml 26 | ``` 27 | 28 | The default image is set to `moby/buildkit:vX.Y.Z-rootless@sha256:...` (see YAML files for the actual revision), but you can also build the image manually as follows: 29 | 30 | ```console 31 | git clone https://github.com/moby/buildkit.git 32 | cd buildkit 33 | DOCKER_BUILDKIT=1 docker build --target rootless -f hack/dockerfiles/test.buildkit.Dockerfile . 34 | ``` 35 | 36 | If you are using Debian (not Ubuntu) or Arch Linux kernel on each of kubelet nodes, `sudo sh -c "echo 1 > /proc/sys/kernel/unprivileged_userns_clone"` is required. 37 | See the content of [`0-buildkitd.yaml`](./0-buildkitd.yaml) for further information about rootless mode. 38 | 39 | You can also use "rootful" BuildKit image (`moby/buildkit:vX.Y.Z`) at your own risk. 40 | 41 | ### Step 1: Register BuildKit build template 42 | 43 | ```console 44 | kubectl apply -f 1-buildtemplate.yaml 45 | ``` 46 | 47 | ## Usage 48 | 49 | ``` 50 | apiVersion: build.knative.dev/v1alpha1 51 | kind: Build 52 | metadata: 53 | name: buildkit-build-my-repo 54 | spec: 55 | source: 56 | git: 57 | url: https://github.com/my-user/my-repo 58 | revision: master 59 | template: 60 | name: buildkit 61 | arguments: 62 | - name: IMAGE 63 | value: gcr.io/my-project/my-app 64 | ``` 65 | 66 | In this example, the Git repo being built is expected to have a `Dockerfile` at 67 | the root of the repository. 68 | -------------------------------------------------------------------------------- /buildpacks/README-CF.md: -------------------------------------------------------------------------------- 1 | # Buildpack 2 | 3 | This build template builds source into a container image using Cloud Foundry's 4 | Buildpack build system. 5 | 6 | When you execute a Buildpack on your application source, the Buildpack build 7 | system detects the source language and runtime, identifies a suitable base 8 | image, and builds the application source on top of that base image, and pushes 9 | the resulting application image to a Docker registry under the provided name. 10 | 11 | ## Create the template 12 | 13 | ``` 14 | kubectl apply -f https://raw.githubusercontent.com/knative/build-templates/master/buildpacks/cf.yaml 15 | ``` 16 | 17 | ## Parameters 18 | 19 | * **IMAGE:** The Docker image name to apply to the newly built image. 20 | (_required_) 21 | * **BUILDPACK_ORDER:** A comma separated list of names or URLs for the 22 | buildpacks to use. Each buildpack is applied in order. (_default:_ `""`) 23 | * **SKIP_DETECT:** By default, the first buildpack to match is used. If true, 24 | detection is skipped and each buildpack contributes in order. 25 | (_default:_ `"false"`) 26 | * **DIRECTORY:** The directory in the source repository where source 27 | should be found. (_default:_ `/workspace`) 28 | * **CACHE:** The directory where data should be persistently cached 29 | between builds. (_default:_ `app-cache`) 30 | * **USE_CRED_HELPERS:** Use Docker credential helpers for Google's GCR, Amazon's 31 | ECR, or Microsoft's ACR. (_default:_ `"true"`) 32 | 33 | ## Usage 34 | 35 | ``` 36 | apiVersion: build.knative.dev/v1alpha1 37 | kind: Build 38 | metadata: 39 | name: buildpack-build 40 | spec: 41 | source: 42 | git: 43 | url: https://github.com/my-user/my-repo 44 | revision: master 45 | template: 46 | name: buildpacks-cloudfoundry 47 | arguments: 48 | - name: IMAGE 49 | value: us.gcr.io/my-project/my-app 50 | ``` 51 | -------------------------------------------------------------------------------- /buildpacks/README.md: -------------------------------------------------------------------------------- 1 | # Buildpack V3 2 | 3 | This build template builds source into a container image using [Cloud Native Buildpacks](https://buildpacks.io). 4 | 5 | The Cloud Native Buildpacks website describes v3 buildpacks as: 6 | 7 | > ... pluggable, modular tools that translate source code into container-ready artifacts 8 | > such as OCI images. They replace Dockerfiles in the app development lifecycle with a higher level 9 | > of abstraction. ... Cloud Native Buildpacks embrace modern container standards, such as the OCI 10 | > image format. They take advantage of the latest capabilities of these standards, such as remote 11 | > image layer rebasing on Docker API v2 registries. 12 | 13 | **Note**: The current Cloud Foundry buildpacks are available in the [CF template](README-CF.md). 14 | 15 | ## Create the template 16 | 17 | ``` 18 | kubectl apply -f https://raw.githubusercontent.com/knative/build-templates/master/buildpacks/cnb.yaml 19 | ``` 20 | 21 | ## Parameters 22 | 23 | * **IMAGE:** The image you wish to create. For example, "repo/example", or "example.com/repo/image". (_required_) 24 | * **BUILDER_IMAGE** The image on which builds will run (must include v3 lifecycle and compatible buildpacks; _required_) 25 | * **USE_CRED_HELPERS:** Use Docker credential helpers. Set to `"true"` or `"false"` as a string value. (_default:_ `"false"`) 26 | * **CACHE** The name of the persistent app cache volume (_default:_ an empty directory -- effectively no cache) 27 | * **USER_ID** The user ID of the builder image user, as a string value (_default:_ `"1000"`) 28 | * **GROUP_ID** The group ID of the builder image user, as a string value (_default:_ `"1000"`) 29 | 30 | ## Usage 31 | 32 | ``` 33 | apiVersion: build.knative.dev/v1alpha1 34 | kind: Build 35 | metadata: 36 | name: cnb-example-build 37 | spec: 38 | source: 39 | git: 40 | url: https://github.com/my-user/my-repo 41 | revision: master 42 | template: 43 | name: buildpacks-cnb 44 | arguments: 45 | - name: IMAGE 46 | value: us.gcr.io/my-project/my-app 47 | - name: BUILDER_IMAGE 48 | value: cloudfoundry/cnb:bionic 49 | - name: CACHE 50 | value: my-cache 51 | volumes: 52 | - name: my-cache 53 | persistentVolumeClaim: 54 | claimName: my-volume-claim 55 | ``` 56 | -------------------------------------------------------------------------------- /buildpacks/cf.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: build.knative.dev/v1alpha1 2 | kind: BuildTemplate 3 | metadata: 4 | name: buildpacks-cloudfoundry 5 | spec: 6 | parameters: 7 | - name: IMAGE 8 | description: The name of the image to push 9 | - name: BUILDPACK_ORDER 10 | description: A comma separated list of names or URLs for the buildpacks to use. Each buildpack is applied in order. 11 | default: "" 12 | - name: SKIP_DETECT 13 | description: By default, the first buildpack to match is used. If true, detection is skipped and each buildpack contributes in order. 14 | default: "false" 15 | - name: DIRECTORY 16 | description: The directory containing the app 17 | default: /workspace 18 | - name: CACHE 19 | description: The name of the persistent app cache volume 20 | default: app-cache 21 | - name: USE_CRED_HELPERS 22 | description: Use Docker credential helpers for Google's GCR, Amazon's ECR, or Microsoft's ACR. 23 | default: "true" 24 | 25 | steps: 26 | # In: a CF app in $DIRECTORY 27 | # Out: a CF app droplet in /out 28 | # Out: a build cache in /cache 29 | - name: build 30 | image: packs/cf:build 31 | args: ["-skipDetect=${SKIP_DETECT}", "-buildpackOrder=${BUILDPACK_ORDER}"] 32 | workingDir: "${DIRECTORY}" 33 | volumeMounts: 34 | - name: droplet 35 | mountPath: /out 36 | - name: "${CACHE}" 37 | mountPath: /cache 38 | 39 | # In: a CF app droplet in /in 40 | # Out: an image published as $IMAGE 41 | - name: export 42 | image: packs/cf:export 43 | workingDir: /in 44 | args: ["${IMAGE}"] 45 | env: 46 | - name: PACK_USE_HELPERS 47 | value: "${USE_CRED_HELPERS}" 48 | volumeMounts: 49 | - name: droplet 50 | mountPath: /in 51 | 52 | volumes: 53 | - name: droplet 54 | emptyDir: {} 55 | - name: app-cache 56 | emptyDir: {} 57 | -------------------------------------------------------------------------------- /buildpacks/cnb.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: build.knative.dev/v1alpha1 3 | kind: BuildTemplate 4 | metadata: 5 | name: buildpacks-cnb 6 | spec: 7 | parameters: 8 | - name: IMAGE 9 | description: The image you wish to create. For example, "repo/example", or "example.com/repo/image". 10 | - name: BUILDER_IMAGE 11 | description: The image on which builds will run (must include v3 lifecycle and compatible buildpacks). 12 | - name: USE_CRED_HELPERS 13 | description: Use Docker credential helpers for Google's GCR, Amazon's ECR, or Microsoft's ACR. 14 | default: 'false' 15 | - name: CACHE 16 | description: The name of the persistent app cache volume 17 | default: empty-dir 18 | - name: USER_ID 19 | description: The user ID of the builder image user 20 | default: "1000" 21 | - name: GROUP_ID 22 | description: The group ID of the builder image user 23 | default: "1000" 24 | 25 | steps: 26 | - name: prepare 27 | image: alpine 28 | command: ["/bin/sh"] 29 | args: 30 | - "-c" 31 | - > 32 | chown -R "${USER_ID}:${GROUP_ID}" "/builder/home" && 33 | chown -R "${USER_ID}:${GROUP_ID}" /layers && 34 | chown -R "${USER_ID}:${GROUP_ID}" /cache && 35 | chown -R "${USER_ID}:${GROUP_ID}" /workspace 36 | imagePullPolicy: Always 37 | volumeMounts: 38 | - name: "layers-dir" 39 | mountPath: /layers 40 | - name: "${CACHE}" 41 | mountPath: /cache 42 | 43 | - name: detect 44 | image: ${BUILDER_IMAGE} 45 | command: ["/lifecycle/detector"] 46 | args: 47 | - "-app=/workspace" 48 | - "-group=/layers/group.toml" 49 | - "-plan=/layers/plan.toml" 50 | imagePullPolicy: Always 51 | volumeMounts: 52 | - name: "layers-dir" 53 | mountPath: /layers 54 | 55 | - name: restore 56 | image: ${BUILDER_IMAGE} 57 | command: ["/lifecycle/restorer"] 58 | args: 59 | - "-group=/layers/group.toml" 60 | - "-layers=/layers" 61 | - "-path=/cache" 62 | imagePullPolicy: Always 63 | volumeMounts: 64 | - name: "${CACHE}" 65 | mountPath: /cache 66 | - name: "layers-dir" 67 | mountPath: /layers 68 | 69 | - name: analyze 70 | image: ${BUILDER_IMAGE} 71 | command: ["/lifecycle/analyzer"] 72 | args: 73 | - "-layers=/layers" 74 | - "-helpers=${USE_CRED_HELPERS}" 75 | - "-group=/layers/group.toml" 76 | - "${IMAGE}" 77 | imagePullPolicy: Always 78 | volumeMounts: 79 | - name: "layers-dir" 80 | mountPath: /layers 81 | 82 | - name: build 83 | image: ${BUILDER_IMAGE} 84 | command: ["/lifecycle/builder"] 85 | args: 86 | - "-layers=/layers" 87 | - "-app=/workspace" 88 | - "-group=/layers/group.toml" 89 | - "-plan=/layers/plan.toml" 90 | imagePullPolicy: Always 91 | volumeMounts: 92 | - name: "layers-dir" 93 | mountPath: /layers 94 | 95 | - name: export 96 | image: ${BUILDER_IMAGE} 97 | command: ["/lifecycle/exporter"] 98 | args: 99 | - "-layers=/layers" 100 | - "-helpers=${USE_CRED_HELPERS}" 101 | - "-app=/workspace" 102 | - "-group=/layers/group.toml" 103 | - "${IMAGE}" 104 | imagePullPolicy: Always 105 | volumeMounts: 106 | - name: "layers-dir" 107 | mountPath: /layers 108 | 109 | - name: cache 110 | image: ${BUILDER_IMAGE} 111 | command: ["/lifecycle/cacher"] 112 | args: 113 | - "-group=/layers/group.toml" 114 | - "-layers=/layers" 115 | - "-path=/cache" 116 | imagePullPolicy: Always 117 | volumeMounts: 118 | - name: "${CACHE}" 119 | mountPath: /cache 120 | - name: "layers-dir" 121 | mountPath: /layers 122 | 123 | volumes: 124 | - name: empty-dir 125 | emptyDir: {} 126 | - name: layers-dir 127 | emptyDir: {} -------------------------------------------------------------------------------- /ecr_helper/README.md: -------------------------------------------------------------------------------- 1 | # ECR Helper 2 | 3 | This script loads your Amazon ECR credentials into a secret in your current kubernetes cluster and namespace. 4 | 5 | By default, the following resources will be provisioned: 6 | 7 | * A Kubernetes service account (named `builder` by default) with secrets (`ecr-creds`) to enable pushing to ECR. 8 | 9 | To use, simply add a `serviceAccountName: builder` entry to your build definition 10 | 11 | ```yaml: 12 | apiVersion: build.knative.dev/v1alpha1 13 | kind: Build 14 | metadata: 15 | name: mybuild 16 | spec: 17 | serviceAccountName: builder 18 | source: ... 19 | template: ... 20 | ``` 21 | 22 | ## Usage 23 | 24 | At least, you need to be allowed to call `ecr:GetAuthorizationToken` action, as well as other actions that are required for pushing images. 25 | You should use [`AmazonEC2ContainerRegistryPowerUser` managed policy](https://docs.aws.amazon.com/AmazonECR/latest/userguide/ecr_managed_policies.html). 26 | 27 | ```shell 28 | aws configure 29 | ecr_helper/helper.sh 30 | ``` 31 | 32 | Optionally, `helper.sh` accepts two positional arguments to specify 33 | the namespace and kubernetes service account used: 34 | 35 | ```shell 36 | ecr_helper/helper.sh $MY_NAMESPACE builder-serviceaccount 37 | ``` 38 | 39 | Optionally, a pull secret can be added to the service account which enables pulling images from a private ECR. 40 | 41 | ```shell 42 | ecr_helper/helper.sh --push-and-pull 43 | ``` 44 | 45 | This will output a log of operations performed or skipped: 46 | 47 | ``` 48 | serviceaccount "builder" created 49 | secret "ecr-creds" created 50 | the secret will expire at Thu Oct 4 07:04:09 JST 2018. 51 | ``` 52 | 53 | NOTE: As of October 2018, you need to rerun `helper.sh` every 12 hours, because the credential expires every 12 hours. 54 | -------------------------------------------------------------------------------- /ecr_helper/helper.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Copyright 2018 Google, Inc. All rights reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # A simple script to create or validate credentials to allow pushing 18 | # built images to ECR. 19 | # 20 | # This script assumes the following environment: 21 | # 22 | # 1. `aws`, 'kubectl` and `jq` installed and in $PATH. 23 | # 24 | # 2. `aws` configured with a default credential. 25 | # 26 | # 3. The current `aws` credentials have permissions to create 27 | # call `ecr:GetAuthorizationToken` action, as well as other actions 28 | # that are required for pushing images. 29 | # You may use `AmazonEC2ContainerRegistryPowerUser` managed policy. 30 | # 31 | # 4. kubectl context configured, or appropriate flags passed on the 32 | # command line to select namespace and (optionally) builder service 33 | # account name. 34 | # 35 | # The script should warn if any of these preconditions cannot be met. 36 | # 37 | # Once all arguments are validated, this script will create a kubernetes 38 | # secret with the appropriate metadata for usage by build steps, accessible 39 | # by a service account named "builder" (by default). 40 | 41 | ## 42 | ## Validate environment. 43 | ## 44 | 45 | checkBinary() { 46 | if ! which $1 >&/dev/null; then 47 | echo "Unable to locate $1, please ensure it is installed and on your \$PATH." 48 | exit 1 49 | fi 50 | } 51 | 52 | checkBinary aws 53 | checkBinary jq 54 | checkBinary kubectl 55 | 56 | if [[ "$1" == "--push-and-pull" ]]; then 57 | PUSH_AND_PULL_SECRETS=true 58 | shift 59 | fi 60 | 61 | readonly KUBECTL_FLAGS="${1:+ -n $1}" 62 | 63 | if ! kubectl $KUBECTL_FLAGS get sa >& /dev/null; then 64 | echo "Unable to read Kubernetes service accounts with 'kubectl $KUBECTL_FLAGS get sa'." 65 | exit 1 66 | fi 67 | 68 | readonly KUBE_SA=${2:-"builder"} 69 | 70 | 71 | ## 72 | ## Begin doing things 73 | ## 74 | 75 | # Supress stderr, as many of the check queries will print extra output 76 | # if the resources are not present. Keep stderr on FD 3 to allow 77 | # printing output from explicit create commands. 78 | exec 3>&2 79 | exec 2>/dev/null 80 | 81 | # The token expires in 12 hours, as of October, 2018. 82 | DATA=$(aws ecr get-authorization-token) 83 | if [[ $? != 0 ]]; then 84 | echo '`aws ecr get-authorization-token failed`' 85 | exit 1 86 | fi 87 | 88 | ENDPOINT=$(echo $DATA | jq -r .authorizationData[0].proxyEndpoint) 89 | if [[ -z $ENDPOINT ]]; then 90 | echo "got empty endpoint" 91 | exit 1 92 | fi 93 | EXPIRES_AT=$(echo $DATA | jq -r .authorizationData[0].expiresAt) 94 | 95 | TOKEN=$(echo $DATA | jq -r .authorizationData[0].authorizationToken | openssl base64 -a -A -d) 96 | IFS=: 97 | set -- $TOKEN 98 | USERNAME=$1 99 | PASSWORD=$2 100 | unset IFS 101 | 102 | if [[ -z $USERNAME ]]; then 103 | echo "got empty username" 104 | exit 1 105 | fi 106 | 107 | if [[ -z $PASSWORD ]]; then 108 | echo "got empty password" 109 | exit 1 110 | fi 111 | 112 | if [[ -n "$PUSH_AND_PULL_SECRETS" ]]; then 113 | OPTIONAL_IMAGE_PULL_SECRETS=$(cat <&3 132 | apiVersion: v1 133 | kind: ServiceAccount 134 | metadata: 135 | name: $KUBE_SA 136 | secrets: 137 | - name: ecr-creds 138 | $OPTIONAL_IMAGE_PULL_SECRETS 139 | --- 140 | apiVersion: v1 141 | kind: Secret 142 | metadata: 143 | name: ecr-creds 144 | annotations: 145 | build.knative.dev/docker-0: $ENDPOINT 146 | type: kubernetes.io/basic-auth 147 | data: 148 | username: $(echo -n $USERNAME | openssl base64 -a -A) 149 | password: $(echo -n $PASSWORD | openssl base64 -a -A) 150 | $OPTIONAL_PULL_SECRET 151 | EOF 152 | 153 | readonly EXIT=$? 154 | 155 | echo "the secret will expire at $(date -d @${EXPIRES_AT})." 156 | 157 | exit $EXIT 158 | -------------------------------------------------------------------------------- /gcr_helper/README.md: -------------------------------------------------------------------------------- 1 | # GCR Helper 2 | 3 | This script provisions (or verifies) a GCP service account with 4 | permissions to push images to GCR, and loads the credentials into a 5 | secret in your current kubernetes cluster and namespace. 6 | 7 | By default, the following resources will be provisioned: 8 | 9 | * A GCP service account named `push-image` with appropriate permissions. 10 | * A Kubernetes service account (named `builder` by default) with secrets to 11 | enable pushing to GCR using the `push-image`'s credentials. 12 | 13 | 14 | To use, simply add a `serviceAccountName: builder` entry to your build definition 15 | 16 | ```yaml: 17 | apiVersion: build.knative.dev/v1alpha1 18 | kind: Build 19 | metadata: 20 | name: mybuild 21 | spec: 22 | serviceAccountName: builder 23 | source: ... 24 | template: ... 25 | ``` 26 | 27 | ## Usage 28 | 29 | ```shell 30 | # Usage assumes that the user has IAM Owner permissions for the project. 31 | gcloud config set core/project $PROJECT_ID 32 | gcr_helper/helper.sh 33 | ``` 34 | 35 | Optionally, `helper.sh` accepts two positional arguments to specify 36 | the namespace and kubernetes service account used: 37 | 38 | ```shell 39 | gcr_helper/helper.sh $MY_NAMESPACE builder-serviceaccount 40 | ``` 41 | 42 | This will output a log of operations performed or skipped: 43 | 44 | ``` 45 | Enabling iam.googleapis.com... 46 | Waiting for async operation operations/tmo-acf.11f13a3e-5b13-4a5e-91f0-9814e76708a3 to complete... 47 | Operation finished successfully. The following command can describe the Operation details: 48 | gcloud services operations describe operations/tmo-acf.11f13a3e-5b13-4a5e-91f0-9814e76708a3 49 | containerregistry.googleapis.com already enabled 50 | Could not find push-image@$PROJECT_ID.iam.gserviceaccount.com, creating... 51 | Created service account [push-image]. 52 | Granting push-image@$PROJECT_ID.iam.gserviceaccount.com admin access to gs://us.artifacts.$PROJECT_ID.appspot.com 53 | push-image@$PROJECT_ID.iam.gserviceaccount.com already has access to gs://us.artifacts.$PROJECT_ID.appspot.com 54 | push-image@$PROJECT_ID.iam.gserviceaccount.com already has access to gs://eu.artifacts.$PROJECT_ID.appspot.com 55 | push-image@$PROJECT_ID.iam.gserviceaccount.com already has access to gs://asia.artifacts.$PROJECT_ID.appspot.com 56 | Found serviceAccount 'builder' with access to 'gcr-creds' 57 | created key [462561f97c7fc567f167b4cef8e9bfedde992143] of type [json] as [image-push-key.json] for [push-image@$PROJECT_ID.iam.gserviceaccount.com] 58 | serviceaccount "builder" configured 59 | secret "gcr-creds" created 60 | ``` 61 | -------------------------------------------------------------------------------- /gcr_helper/helper.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Copyright 2018 Google, Inc. All rights reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # A simple script to create or validate credentials to allow pushing 18 | # built images to gcr.io. 19 | # 20 | # This script assumes the following environment: 21 | # 22 | # 1. `gcloud`, `gsutil`, 'kubectl` and `jq` installed and in $PATH. 23 | # 24 | # 2. gcloud configured with a default project, or the $PROJECT_ID 25 | # environment variable set. 26 | # 27 | # 3. The current `gcloud` credentials have permissions to create 28 | # service accounts and change IAM ACLs. 29 | # 30 | # 4. kubectl context configured, or appropriate flags passed on the 31 | # command line to select namespace and (optionally) builder service 32 | # account name. 33 | # 34 | # The script should warn if any of these preconditions cannot be met. 35 | # 36 | # Once all arguments are validated, this script will: 37 | # 38 | # 1. Provision a GCP Service Account and grant it access to all 39 | # existing GCR buckets. 40 | # 41 | # 2. Create a kubernetes secret with the appropriate metadata for 42 | # usage by build steps, accessible by a service account named 43 | # "builder" (by default). 44 | 45 | ## 46 | ## Validate environment. 47 | ## 48 | 49 | checkBinary() { 50 | if ! which $1 >&/dev/null; then 51 | echo "Unable to locate $1, please ensure it is installed and on your \$PATH." 52 | exit 1 53 | fi 54 | } 55 | 56 | checkBinary gcloud 57 | checkBinary gsutil 58 | checkBinary jq 59 | checkBinary kubectl 60 | 61 | if [[ -z "${PROJECT_ID:=$(gcloud config get-value project)}" ]]; then 62 | echo "Could not determine project id from $PROJECT_ID or gcloud defaults." 63 | exit 1 64 | fi 65 | 66 | readonly KUBECTL_FLAGS="${1:+ -n $1}" 67 | 68 | if ! kubectl $KUBECTL_FLAGS get sa >& /dev/null; then 69 | echo "Unable to read Kubernetes service accounts with 'kubectl $KUBECTL_FLAGS get sa'." 70 | exit 1 71 | fi 72 | 73 | readonly KUBE_SA=${2:-"builder"} 74 | 75 | 76 | ## 77 | ## Begin doing things 78 | ## 79 | 80 | : ${GCP_SA_NAME:=push-image} 81 | readonly GCP_SA=$GCP_SA_NAME@$PROJECT_ID.iam.gserviceaccount.com 82 | 83 | # Supress stderr, as many of the check queries will print extra output 84 | # if the resources are not present. Keep stderr on FD 3 to allow 85 | # printing output from explicit create commands. 86 | exec 3>&2 87 | exec 2>/dev/null 88 | 89 | # Enable IAM and container registry if needed 90 | for SERVICE in iam containerregistry; do 91 | gcloud services list | grep -q $SERVICE.googleapis.com 92 | if [ $? -eq 0 ]; then 93 | echo "$SERVICE.googleapis.com already enabled" 94 | else 95 | echo "Enabling $SERVICE.googleapis.com..." 96 | gcloud services enable $SERVICE.googleapis.com 2>&3 || exit 2 97 | fi 98 | done 99 | 100 | if gcloud iam service-accounts describe $GCP_SA >&/dev/null; then 101 | echo "Using existing service account $GCP_SA" 102 | else 103 | echo "Could not find $GCP_SA, creating..." 104 | gcloud iam service-accounts create $GCP_SA_NAME 2>&3 || exit 2 105 | fi 106 | 107 | ensureIamPermission() { 108 | local BUCKET=$1 109 | gsutil iam get $BUCKET | \ 110 | jq -e ".bindings | map(select(.role == \"roles/storage.admin\" )) | any(.members | any(. == \"serviceAccount:$GCP_SA\"))" >/dev/null 111 | if [ $? -eq 0 ]; then 112 | echo "$GCP_SA already has access to $BUCKET" 113 | else 114 | echo "Granting $GCP_SA admin access to $BUCKET" 115 | gsutil iam ch serviceAccount:$GCP_SA:admin $BUCKET 2>&3 || exit 2 116 | fi 117 | } 118 | 119 | # GCR objects are stored in "artifacts.$PROJECT_ID.appspot.com" buckets 120 | # Grant access for the known regions and global bucket. 121 | for B in artifacts us.artifacts eu.artifacts asia.artifacts; do 122 | ensureIamPermission gs://$B.$PROJECT_ID.appspot.com 123 | done 124 | 125 | 126 | # See if secrets are already loaded. If not, add them. 127 | if [[ $(kubectl $KUBECTL_FLAGS get -o jsonpath='{.secrets[?(@.name=="gcr-creds")].name}' sa $KUBE_SA) == 'gcr-creds' ]]; then 128 | echo "Found serviceAccount '$KUBE_SA' with access to 'gcr-creds'" 129 | if [[ $(kubectl $KUBECTL_FLAGS get -o jsonpath={.type} secrets gcr-creds) == 'kubernetes.io/basic-auth' ]]; then 130 | echo "Secrets set up already, exiting" 131 | exit 0 132 | fi 133 | fi 134 | 135 | 136 | # Temporarily store a local JSON key for the service account. 137 | gcloud iam service-accounts keys create image-push-key.json --iam-account $GCP_SA 2>&3 || exit 2 138 | 139 | cat <&3 140 | apiVersion: v1 141 | kind: ServiceAccount 142 | metadata: 143 | name: $KUBE_SA 144 | secrets: 145 | - name: gcr-creds 146 | --- 147 | apiVersion: v1 148 | kind: Secret 149 | metadata: 150 | name: gcr-creds 151 | annotations: 152 | build.knative.dev/docker-0: https://us.gcr.io 153 | build.knative.dev/docker-1: https://gcr.io 154 | build.knative.dev/docker-2: https://eu.gcr.io 155 | build.knative.dev/docker-3: https://asia.gcr.io 156 | type: kubernetes.io/basic-auth 157 | data: 158 | username: $(echo -n "_json_key" | openssl base64 -a -A) # Should be X2pzb25fa2V5 159 | password: $(openssl base64 -a -A < image-push-key.json) 160 | EOF 161 | 162 | readonly EXIT=$? 163 | 164 | rm image-push-key.json 165 | 166 | exit $EXIT 167 | -------------------------------------------------------------------------------- /hack/update-deps.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Copyright 2018 The Knative Authors 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | set -o errexit 18 | set -o nounset 19 | set -o pipefail 20 | 21 | source $(dirname $0)/../vendor/knative.dev/test-infra/scripts/library.sh 22 | 23 | cd ${REPO_ROOT_DIR} 24 | 25 | # Ensure we have at least one go file in the repo, so dep works 26 | echo "package main" > dummy.go 27 | echo "func main() {}" >> dummy.go 28 | 29 | # Ensure we have everything we need under vendor/ 30 | dep ensure 31 | 32 | rm dummy.go 33 | 34 | rm -rf $(find vendor/ -name 'OWNERS') 35 | rm -rf $(find vendor/ -name '*_test.go') 36 | -------------------------------------------------------------------------------- /jib/README.md: -------------------------------------------------------------------------------- 1 | # Jib 2 | 3 | This build template builds Java/Kotlin/Groovy/Scala source into a container image using Google's [Jib](https://github.com/GoogleContainerTools/jib) tool. 4 | 5 | Jib works with [Maven](https://github.com/GoogleContainerTools/jib/tree/master/jib-maven-plugin) and [Gradle](https://github.com/GoogleContainerTools/jib/tree/master/jib-gradle-plugin) projects, and this template comes in two flavors, [`jib-maven.yaml`](./jib-maven.yaml) for Maven projects and [`jib-gradle.yaml`](./jib-gradle.yaml) for Gradle projects. 6 | 7 | ## Create the template 8 | 9 | Maven: 10 | 11 | ```shell 12 | kubectl apply -f https://raw.githubusercontent.com/knative/build-templates/master/jib/jib-maven.yaml 13 | ``` 14 | 15 | Gradle: 16 | 17 | ```shell 18 | kubectl apply -f https://raw.githubusercontent.com/knative/build-templates/master/jib/jib-gradle.yaml 19 | ``` 20 | 21 | ## Parameters 22 | 23 | - **IMAGE**: The Docker image name to apply to the newly built image. (*required*) 24 | - **DIRECTORY**: The directory in the source repository where source should be found. (*default: .*) 25 | - **CACHE**: The name of the volume for caching Maven artifacts and base image layers (*default: empty-dir-volume*) 26 | 27 | ## ServiceAccount 28 | 29 | Jib builds an image and pushes it to the destination defined as the **IMAGE** parameter. In order to properly authenticate to the remote container registry, the build needs to have the proper credentials. This is achieved using a build `ServiceAccount`. 30 | 31 | For an example on how to create such a `ServiceAccount`, see the [Authentication](https://github.com/knative/docs/blob/master/build/auth.md#basic-authentication-docker) documentation page. 32 | 33 | ## Usage (Maven) 34 | 35 | To use the `jib-maven` template, first install the template: 36 | 37 | ```shell 38 | kubectl apply -f https://raw.githubusercontent.com/knative/build-templates/master/jib/jib-maven.yaml 39 | ``` 40 | 41 | Then, define a `Build` that instantiates the template: 42 | 43 | `jib-maven-build.yaml`: 44 | ```yaml 45 | apiVersion: build.knative.dev/v1alpha1 46 | kind: Build 47 | metadata: 48 | name: jib-maven-build 49 | spec: 50 | source: 51 | git: 52 | url: https://github.com/my-user/my-repo 53 | revision: master 54 | template: 55 | name: jib-maven 56 | arguments: 57 | - name: IMAGE 58 | value: gcr.io/my-project/my-app 59 | ``` 60 | 61 | Run the build: 62 | 63 | ```shell 64 | kubectl apply -f jib-maven-build.yaml 65 | ``` 66 | 67 | If you would like to customize the container, configure the `jib-maven-plugin` in your `pom.xml`. 68 | See [setup instructions for Maven](https://github.com/GoogleContainerTools/jib/tree/master/jib-maven-plugin#setup) for more information. 69 | 70 | ### Speed up builds 71 | 72 | Using a persistent volume for caching can speed up your builds. To set up the cache, define a `PersistentVolumeClaim` and attach a corresponding volume to the `Build`: 73 | 74 | ```yaml 75 | kind: PersistentVolumeClaim 76 | apiVersion: v1 77 | metadata: 78 | name: jib-build-cache 79 | spec: 80 | accessModes: 81 | - ReadWriteOnce 82 | volumeMode: Filesystem 83 | resources: 84 | requests: 85 | storage: 8Gi 86 | --- 87 | apiVersion: build.knative.dev/v1alpha1 88 | kind: Build 89 | metadata: 90 | name: jib-maven-build 91 | spec: 92 | source: 93 | git: 94 | url: https://github.com/my-user/my-repo 95 | revision: master 96 | template: 97 | name: jib-maven 98 | arguments: 99 | - name: IMAGE 100 | value: gcr.io/my-project/my-app 101 | - name: CACHE 102 | value: persistent-cache 103 | 104 | volumes: 105 | - name: persistent-cache 106 | persistentVolumeClaim: 107 | claimName: jib-build-cache 108 | ``` 109 | 110 | This creates a `PersistentVolumeClaim` with 8Gi of storage and attaches it to the build by setting the `CACHE` argument on `spec.template.arguments`. 111 | 112 | Future builds should now run much faster. 113 | 114 | ## Usage (Gradle) 115 | 116 | This assumes the source repo is using the Gradle plugin, configured in 117 | `build.gradle`: 118 | 119 | ```groovy 120 | plugins { 121 | id 'com.google.cloud.tools.jib' version '0.9.10' 122 | } 123 | ``` 124 | 125 | See [setup instructions for 126 | Gradle](https://github.com/GoogleContainerTools/jib/tree/master/jib-gradle-plugin#setup). 127 | 128 | To use the `jib-gradle` template, first install the template: 129 | 130 | ```shell 131 | kubectl apply -f https://raw.githubusercontent.com/knative/build-templates/master/jib/jib-gradle.yaml 132 | ``` 133 | 134 | Then, define a build that instantiates the template: 135 | 136 | ```yaml 137 | apiVersion: build.knative.dev/v1alpha1 138 | kind: Build 139 | metadata: 140 | name: jib-gradle-build 141 | spec: 142 | source: 143 | git: 144 | url: https://github.com/my-user/my-repo 145 | revision: master 146 | template: 147 | name: jib-gradle 148 | arguments: 149 | - name: IMAGE 150 | value: gcr.io/my-project/my-app 151 | ``` 152 | -------------------------------------------------------------------------------- /jib/jib-gradle.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: build.knative.dev/v1alpha1 2 | kind: BuildTemplate 3 | metadata: 4 | name: jib-gradle 5 | spec: 6 | parameters: 7 | - name: IMAGE 8 | description: The name of the image to push 9 | - name: DIRECTORY 10 | description: The directory containing the app, relative to the source repository root 11 | default: . 12 | - name: CACHE 13 | description: The name of the volume for caching Maven artifacts and base image layers 14 | default: empty-dir-volume 15 | 16 | steps: 17 | - name: build-and-push 18 | image: gcr.io/cloud-builders/gradle 19 | args: 20 | - jib 21 | - -Duser.home=/builder/home 22 | - --image=${IMAGE} 23 | workingDir: /workspace/${DIRECTORY} 24 | volumeMounts: 25 | - name: ${CACHE} 26 | mountPath: /builder/home/.m2 27 | subPath: m2-cache 28 | - name: ${CACHE} 29 | mountPath: /builder/home/.cache 30 | subPath: jib-cache 31 | 32 | volumes: 33 | - name: empty-dir-volume 34 | emptyDir: {} 35 | -------------------------------------------------------------------------------- /jib/jib-maven.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: build.knative.dev/v1alpha1 2 | kind: BuildTemplate 3 | metadata: 4 | name: jib-maven 5 | spec: 6 | parameters: 7 | - name: IMAGE 8 | description: The name of the image to push 9 | - name: DIRECTORY 10 | description: The directory containing the app, relative to the source repository root 11 | default: . 12 | - name: CACHE 13 | description: The name of the volume for caching Maven artifacts and base image layers 14 | default: empty-dir-volume 15 | 16 | steps: 17 | - name: build-and-push 18 | image: gcr.io/cloud-builders/mvn 19 | args: 20 | - compile 21 | - com.google.cloud.tools:jib-maven-plugin:build 22 | - -Duser.home=/builder/home 23 | - -Dimage=${IMAGE} 24 | workingDir: /workspace/${DIRECTORY} 25 | volumeMounts: 26 | - name: ${CACHE} 27 | mountPath: /builder/home/.m2 28 | subPath: m2-cache 29 | - name: ${CACHE} 30 | mountPath: /builder/home/.cache 31 | subPath: jib-cache 32 | 33 | volumes: 34 | - name: empty-dir-volume 35 | emptyDir: {} 36 | -------------------------------------------------------------------------------- /kaniko/README.md: -------------------------------------------------------------------------------- 1 | # Kaniko 2 | 3 | This build template builds source into a container image using Google's 4 | [`kaniko`](https://github.com/GoogleCloudPlatform/kaniko) tool. 5 | 6 | >kaniko doesn't depend on a Docker daemon and executes each command within a 7 | >Dockerfile completely in userspace. This enables building container images in 8 | >environments that can't easily or securely run a Docker daemon, such as a 9 | >standard Kubernetes cluster. 10 | > - [Kaniko website](https://github.com/GoogleCloudPlatform/kaniko) 11 | 12 | kaniko is meant to be run as an image, `gcr.io/kaniko-project/executor`. This 13 | makes it a perfect tool to be part of a Knative build. 14 | 15 | ## Create the template 16 | 17 | ``` 18 | kubectl apply -f https://raw.githubusercontent.com/knative/build-templates/master/kaniko/kaniko.yaml 19 | ``` 20 | 21 | ## Parameters 22 | 23 | * **IMAGE**: The Docker image name to apply to the newly built image. 24 | (_required_) 25 | * **DOCKERFILE**: The path to the `Dockerfile` to execute (_default:_ 26 | `./Dockerfile`) 27 | 28 | ## ServiceAccount 29 | 30 | kaniko builds an image and pushes it to the destination defined as a parameter. 31 | In order to properly authenticate to the remote container registry, the build 32 | needs to have the proper credentials. This is achieved using a build 33 | `ServiceAccount`. 34 | 35 | For an example on how to create such a `ServiceAccount` to push an image to 36 | Docker hub, see the 37 | [Authentication](https://github.com/knative/docs/blob/master/build/auth.md#basic-authentication-docker) 38 | documentation page. 39 | 40 | ## Usage 41 | 42 | Write a `Build` manifest and use the `template` section to refer to the kaniko 43 | build template. Set the value of the parameters such as the destination Docker 44 | image. Note the use of the `serviceAccountName` to push the image to a remote 45 | registry. 46 | 47 | ``` 48 | apiVersion: build.knative.dev/v1alpha1 49 | kind: Build 50 | metadata: 51 | name: kaniko-build 52 | spec: 53 | serviceAccountName: build-bot 54 | source: 55 | git: 56 | url: https://github.com/my-user/my-repo 57 | revision: master 58 | template: 59 | name: kaniko 60 | arguments: 61 | - name: IMAGE 62 | value: us.gcr.io/my-project/my-app 63 | ``` 64 | 65 | In this example, the Git repo being built is expected to have a `Dockerfile` at 66 | the root of the repository. 67 | -------------------------------------------------------------------------------- /kaniko/kaniko.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: build.knative.dev/v1alpha1 2 | kind: BuildTemplate 3 | metadata: 4 | name: kaniko 5 | spec: 6 | parameters: 7 | - name: IMAGE 8 | description: The name of the image to push 9 | - name: DOCKERFILE 10 | description: Path to the Dockerfile to build. 11 | default: /workspace/Dockerfile 12 | 13 | steps: 14 | - name: build-and-push 15 | image: gcr.io/kaniko-project/executor 16 | args: 17 | - --dockerfile=${DOCKERFILE} 18 | - --destination=${IMAGE} 19 | env: 20 | - name: DOCKER_CONFIG 21 | value: /builder/home/.docker 22 | -------------------------------------------------------------------------------- /makisu/README.md: -------------------------------------------------------------------------------- 1 | # makisu 2 | 3 | This build template builds source into a container image using uber's 4 | [`makisu`](https://github.com/uber/makisu) tool. 5 | 6 | >Makisu is a fast and flexible Docker image build tool designed for unprivileged 7 | >containerized environments such as Mesos or Kubernetes. 8 | > - [makisu website](https://github.com/uber/makisu) 9 | 10 | makisu is meant to be run as an image, `gcr.io/makisu-project/makisu`. This 11 | makes it a perfect tool to be part of a Knative build. 12 | 13 | ## Create the registry configuration 14 | 15 | makisu uses a [registry configuration](https://github.com/uber/makisu/blob/master/docs/REGISTRY.md) which should be stored as a secret in Kubernetes. Adjust the `registry.yaml` in this diretroy to contain your user and password for the Docker hub (or configure a different [registry](https://github.com/uber/makisu/blob/master/docs/REGISTRY.md#examples)). Keep in mind that the secret must exist in the same namespace as the build runs.: 16 | 17 | ```bash 18 | kubectl --namespace default create secret generic docker-registry-config --from-file=./registry.yaml 19 | ``` 20 | 21 | ## Create the template 22 | 23 | ``` 24 | kubectl apply -f https://raw.githubusercontent.com/knative/build-templates/master/makisu/makisu.yaml 25 | ``` 26 | 27 | ## Parameters 28 | 29 | * **IMAGE**: The Docker image name to apply to the newly built image. 30 | (_required_) 31 | * **CONTEXTPATH**: The path to the build context (_default:_ 32 | `/workspace`) 33 | * **PUSH_REGISTRY**: The Registry to push the image to (_default:_ 34 | `index.docker.io`) 35 | * **REGISTRY_SECRET**: Secret containing information about the used regsitry (_default:_ 36 | `docker-registry-config`) 37 | 38 | ## Usage 39 | 40 | Write a `Build` manifest and use the `template` section to refer to the makisu 41 | build template. Set the value of the parameters such as the destination Docker 42 | image. 43 | 44 | In this example, the Git repo being built is expected to have a `Dockerfile` at 45 | the root of the repository. 46 | 47 | ### Docker Registry 48 | 49 | ```yaml 50 | apiVersion: build.knative.dev/v1alpha1 51 | kind: Build 52 | metadata: 53 | name: makisu-build 54 | spec: 55 | source: 56 | git: 57 | url: https://github.com/my-user/my-repo 58 | revision: master 59 | template: 60 | name: makisu 61 | arguments: 62 | - name: IMAGE 63 | value: my-project/my-app 64 | ``` 65 | 66 | ### Other Registries 67 | 68 | The `PUSH_REGISTRY` **must** match the name of the registry specified in the registry.yaml 69 | 70 | ```yaml 71 | apiVersion: build.knative.dev/v1alpha1 72 | kind: Build 73 | metadata: 74 | name: makisu-build-gcr 75 | spec: 76 | source: 77 | git: 78 | url: https://github.com/my-user/my-repo 79 | revision: master 80 | template: 81 | name: makisu 82 | arguments: 83 | - name: IMAGE 84 | value: eu.gcr.io/gke-on-premise-inovex 85 | - name: PUSH_REGISTRY # must match the registry in the secret 86 | value: eu.gcr.io 87 | - name: REGISTRY_SECRET 88 | value: gcr-registry-config 89 | ``` 90 | -------------------------------------------------------------------------------- /makisu/makisu.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: build.knative.dev/v1alpha1 2 | kind: BuildTemplate 3 | metadata: 4 | name: makisu 5 | spec: 6 | parameters: 7 | - name: IMAGE 8 | description: The name of the image to push 9 | - name: CONTEXTPATH 10 | description: Path to the build context. 11 | default: /workspace 12 | - name: PUSH_REGISTRY 13 | description: Registry to push image to. 14 | default: index.docker.io 15 | - name: REGISTRY_SECRET 16 | description: Secret containing information about the used regsitry. 17 | default: docker-registry-config 18 | steps: 19 | - name: build-and-push 20 | image: gcr.io/makisu-project/makisu:v0.1.9 21 | args: 22 | - build 23 | - --push=${PUSH_REGISTRY} 24 | - --registry-config=/registry-config 25 | - --modifyfs=true 26 | - --tag=${IMAGE} 27 | - --registry-config=/registry-config/registry.yaml 28 | - ${CONTEXTPATH} 29 | env: 30 | - name: DOCKER_CONFIG 31 | value: /builder/home/.docker 32 | volumeMounts: 33 | - name: registry-config 34 | mountPath: /registry-config 35 | volumes: 36 | - name: registry-config 37 | secret: 38 | secretName: ${REGISTRY_SECRET} 39 | -------------------------------------------------------------------------------- /makisu/registry.yaml: -------------------------------------------------------------------------------- 1 | index.docker.io: 2 | .*: 3 | security: 4 | tls: 5 | client: 6 | disabled: false 7 | basic: 8 | username: "" 9 | password: "" 10 | -------------------------------------------------------------------------------- /test/OWNERS: -------------------------------------------------------------------------------- 1 | # The OWNERS file is used by prow to automatically merge approved PRs. 2 | 3 | approvers: 4 | - productivity-approvers 5 | 6 | reviewers: 7 | - productivity-reviewers 8 | 9 | labels: 10 | - area/test-and-release 11 | -------------------------------------------------------------------------------- /test/README.md: -------------------------------------------------------------------------------- 1 | # Test 2 | 3 | This directory contains tests and testing docs. 4 | 5 | * [Unit tests](#running-unit-tests) currently reside in the codebase alongside the code they test 6 | * [End-to-end tests](#running-end-to-end-tests) 7 | 8 | 9 | ## Running unit tests 10 | 11 | TODO(#22): Write real unit tests. 12 | 13 | ## Running end-to-end tests 14 | 15 | ### Dependencies 16 | 17 | You might need to install `kubetest` in order to run the end-to-end tests locally: 18 | 19 | ```shell 20 | go get -u k8s.io/test-infra/kubetest 21 | ``` 22 | 23 | Simply run the `e2e-tests.sh` script, setting `$PROJECT_ID` first to your GCP project. The script 24 | will create a GKE cluster, install Knative, run the end-to-end tests and delete the cluster. 25 | 26 | If you already have a cluster set, ensure that `$PROJECT_ID` is empty and call the script with the 27 | `--run-tests` argument. Note that this requires you to have Knative Build installed and configured 28 | to your particular configuration setup. 29 | -------------------------------------------------------------------------------- /test/build-cf.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: build.knative.dev/v1alpha1 2 | kind: Build 3 | metadata: 4 | name: cf-build 5 | spec: 6 | source: 7 | git: 8 | url: https://github.com/my-user/my-repo 9 | revision: v1.0 10 | template: 11 | name: buildpacks-cloudfoundry 12 | arguments: 13 | - name: IMAGE 14 | value: us.gcr.io/my-project/my-app 15 | -------------------------------------------------------------------------------- /test/build-cnb.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: build.knative.dev/v1alpha1 2 | kind: Build 3 | metadata: 4 | name: cnb-build 5 | spec: 6 | source: 7 | git: 8 | url: https://github.com/my-user/my-repo 9 | revision: v1.0 10 | template: 11 | name: buildpacks-cnb 12 | arguments: 13 | - name: IMAGE 14 | value: us.gcr.io/my-project/my-app 15 | -------------------------------------------------------------------------------- /test/e2e-tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Copyright 2018 The Knative Authors 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # This script runs the end-to-end tests for build templates. 18 | 19 | # If you already have a Knative Build cluster setup and kubectl pointing 20 | # to it, call this script with the --run-tests arguments and it will use 21 | # the cluster and run the tests. 22 | 23 | # Calling this script without arguments will create a new cluster in 24 | # project $PROJECT_ID, run the tests and delete the cluster. 25 | 26 | source $(dirname $0)/../vendor/knative.dev/test-infra/scripts/e2e-tests.sh 27 | 28 | # Helper functions. 29 | 30 | function run_cloudfoundry_buildpacks_test() { 31 | subheader "Running cloudfoundry test" 32 | echo "Installing template:" 33 | kubectl apply -f buildpacks/cf.yaml || return 1 34 | echo "Checking that template is installed:" 35 | kubectl get buildtemplates || return 1 36 | echo "Creating build:" 37 | kubectl apply -f test/build-cf.yaml || return 1 38 | # Wait 5s for processing to start 39 | sleep 5 40 | echo "Checking that build was started:" 41 | kubectl get build cf-build -oyaml 42 | # TODO(adrcunha): Add proper verification. 43 | } 44 | 45 | 46 | function run_cloud_native_buildpacks_test() { 47 | subheader "Running cloud native buildpacks test" 48 | echo "Installing template:" 49 | kubectl apply -f buildpacks/cnb.yaml || return 1 50 | echo "Checking that template is installed:" 51 | kubectl get buildtemplates || return 1 52 | echo "Creating build:" 53 | kubectl apply -f test/build-cnb.yaml || return 1 54 | # Wait 5s for processing to start 55 | sleep 5 56 | echo "Checking that build was started:" 57 | kubectl get build cnb-build -oyaml 58 | # TODO(adrcunha): Add proper verification. 59 | } 60 | 61 | function knative_setup() { 62 | header "Starting Knative Build" 63 | subheader "Installing Knative Build" 64 | echo "Installing Build from ${KNATIVE_BUILD_RELEASE}" 65 | kubectl apply -f ${KNATIVE_BUILD_RELEASE} || return 1 66 | wait_until_pods_running knative-build || return 1 67 | } 68 | 69 | # Script entry point. 70 | 71 | initialize $@ 72 | 73 | header "Running tests" 74 | 75 | # TODO(adrcunha): Add more tests. 76 | run_cloudfoundry_buildpacks_test || fail_test 77 | run_cloud_native_buildpacks_test || fail_test 78 | 79 | success 80 | -------------------------------------------------------------------------------- /test/presubmit-tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Copyright 2018 The Knative Authors 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # This script runs the presubmit tests, in the right order. 18 | # It is started by prow for each PR. 19 | # For convenience, it can also be executed manually. 20 | 21 | source $(dirname $0)/../vendor/knative.dev/test-infra/scripts/presubmit-tests.sh 22 | 23 | # TODO(#23): Write more build tests. 24 | 25 | function unit_tests() { 26 | header "TODO(#22): Write unit tests" 27 | } 28 | 29 | # We use the default build and integration test runners. 30 | 31 | main $@ 32 | -------------------------------------------------------------------------------- /vendor/knative.dev/test-infra/LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /vendor/knative.dev/test-infra/scripts/README.md: -------------------------------------------------------------------------------- 1 | # Helper scripts 2 | 3 | This directory contains helper scripts used by Prow test jobs, as well and local 4 | development scripts. 5 | 6 | ## Using the `presubmit-tests.sh` helper script 7 | 8 | This is a helper script to run the presubmit tests. To use it: 9 | 10 | 1. Source this script. 11 | 12 | 1. [optional] Define the function `build_tests()`. If you don't define this 13 | function, the default action for running the build tests is to: 14 | 15 | - check markdown files 16 | - run `go build` on the entire repo 17 | - run `/hack/verify-codegen.sh` (if it exists) 18 | - check licenses in all go packages 19 | 20 | The markdown link checker tool doesn't check `localhost` links by default. 21 | Its configuration file, `markdown-link-check-config.json`, lives in the 22 | `test-infra/scripts` directory. To override it, create a file with the same 23 | name, containing the custom config in the `/test` directory. 24 | 25 | The markdown lint tool ignores long lines by default. Its configuration file, 26 | `markdown-lint-config.rc`, lives in the `test-infra/scripts` directory. To 27 | override it, create a file with the same name, containing the custom config 28 | in the `/test` directory. 29 | 30 | 1. [optional] Customize the default build test runner, if you're using it. Set 31 | the following environment variables if the default values don't fit your 32 | needs: 33 | 34 | - `DISABLE_MD_LINTING`: Disable linting markdown files, defaults to 0 35 | (false). 36 | - `DISABLE_MD_LINK_CHECK`: Disable checking links in markdown files, defaults 37 | to 0 (false). 38 | - `PRESUBMIT_TEST_FAIL_FAST`: Fail the presubmit test immediately if a test 39 | fails, defaults to 0 (false). 40 | 41 | 1. [optional] Define the functions `pre_build_tests()` and/or 42 | `post_build_tests()`. These functions will be called before or after the 43 | build tests (either your custom one or the default action) and will cause the 44 | test to fail if they don't return success. 45 | 46 | 1. [optional] Define the function `unit_tests()`. If you don't define this 47 | function, the default action for running the unit tests is to run all go 48 | tests in the repo. 49 | 50 | 1. [optional] Define the functions `pre_unit_tests()` and/or 51 | `post_unit_tests()`. These functions will be called before or after the unit 52 | tests (either your custom one or the default action) and will cause the test 53 | to fail if they don't return success. 54 | 55 | 1. [optional] Define the function `integration_tests()`. If you don't define 56 | this function, the default action for running the integration tests is to run 57 | all run all `./test/e2e-*tests.sh` scripts, in sequence. 58 | 59 | 1. [optional] Define the functions `pre_integration_tests()` and/or 60 | `post_integration_tests()`. These functions will be called before or after 61 | the integration tests (either your custom one or the default action) and will 62 | cause the test to fail if they don't return success. 63 | 64 | 1. Call the `main()` function passing `$@` (without quotes). 65 | 66 | Running the script without parameters, or with the `--all-tests` flag causes all 67 | tests to be executed, in the right order (i.e., build, then unit, then 68 | integration tests). 69 | 70 | Use the flags `--build-tests`, `--unit-tests` and `--integration-tests` to run a 71 | specific set of tests. The flag `--emit-metrics` is used to emit metrics when 72 | running the tests, and is automatically handled by the default action for 73 | integration tests (see above). 74 | 75 | The script will automatically skip all presubmit tests for PRs where all changed 76 | files are exempt of tests (e.g., a PR changing only the `OWNERS` file). 77 | 78 | Also, for PRs touching only markdown files, the unit and integration tests are 79 | skipped. 80 | 81 | ### Sample presubmit test script 82 | 83 | ```bash 84 | source vendor/knative.dev/test-infra/scripts/presubmit-tests.sh 85 | 86 | function post_build_tests() { 87 | echo "Cleaning up after build tests" 88 | rm -fr ./build-cache 89 | } 90 | 91 | function unit_tests() { 92 | make -C tests test 93 | } 94 | 95 | function pre_integration_tests() { 96 | echo "Cleaning up before integration tests" 97 | rm -fr ./staging-area 98 | } 99 | 100 | # We use the default integration test runner. 101 | 102 | main $@ 103 | ``` 104 | 105 | ## Using the `e2e-tests.sh` helper script 106 | 107 | This is a helper script for Knative E2E test scripts. To use it: 108 | 109 | 1. [optional] Customize the test cluster. Set the following environment 110 | variables if the default values don't fit your needs: 111 | 112 | - `E2E_CLUSTER_REGION`: Cluster region, defaults to `us-central1`. 113 | - `E2E_CLUSTER_BACKUP_REGIONS`: Space-separated list of regions to retry test 114 | cluster creation in case of stockout. Defaults to `us-west1 us-east1`. 115 | - `E2E_CLUSTER_ZONE`: Cluster zone (e.g., `a`), defaults to none (i.e. use a 116 | regional cluster). 117 | - `E2E_CLUSTER_BACKUP_ZONES`: Space-separated list of zones to retry test 118 | cluster creation in case of stockout. If defined, 119 | `E2E_CLUSTER_BACKUP_REGIONS` will be ignored thus it defaults to none. 120 | - `E2E_CLUSTER_MACHINE`: Cluster node machine type, defaults to 121 | `n1-standard-4}`. 122 | - `E2E_MIN_CLUSTER_NODES`: Minimum number of nodes in the cluster when 123 | autoscaling, defaults to 1. 124 | - `E2E_MAX_CLUSTER_NODES`: Maximum number of nodes in the cluster when 125 | autoscaling, defaults to 3. 126 | 127 | 1. Source the script. 128 | 129 | 1. [optional] Write the `knative_setup()` function, which will set up your 130 | system under test (e.g., Knative Serving). This function won't be called if 131 | you use the `--skip-knative-setup` flag. 132 | 133 | 1. [optional] Write the `knative_teardown()` function, which will tear down your 134 | system under test (e.g., Knative Serving). This function won't be called if 135 | you use the `--skip-knative-setup` flag. 136 | 137 | 1. [optional] Write the `test_setup()` function, which will set up the test 138 | resources. 139 | 140 | 1. [optional] Write the `test_teardown()` function, which will tear down the 141 | test resources. 142 | 143 | 1. [optional] Write the `cluster_setup()` function, which will set up any 144 | resources before the test cluster is created. 145 | 146 | 1. [optional] Write the `cluster_teardown()` function, which will tear down any 147 | resources after the test cluster is destroyed. 148 | 149 | 1. [optional] Write the `dump_extra_cluster_state()` function. It will be called 150 | when a test fails, and can dump extra information about the current state of 151 | the cluster (typically using `kubectl`). 152 | 153 | 1. [optional] Write the `parse_flags()` function. It will be called whenever an 154 | unrecognized flag is passed to the script, allowing you to define your own 155 | flags. The function must return 0 if the flag is unrecognized, or the number 156 | of items to skip in the command line if the flag was parsed successfully. For 157 | example, return 1 for a simple flag, and 2 for a flag with a parameter. 158 | 159 | 1. Call the `initialize()` function passing `$@` (without quotes). 160 | 161 | 1. Write logic for the end-to-end tests. Run all go tests using `go_test_e2e()` 162 | (or `report_go_test()` if you need a more fine-grained control) and call 163 | `fail_test()` or `success()` if any of them failed. The environment variable 164 | `KO_DOCKER_REPO` and `E2E_PROJECT_ID` will be set according to the test 165 | cluster. You can also use the following boolean (0 is false, 1 is true) 166 | environment variables for the logic: 167 | 168 | - `EMIT_METRICS`: true if `--emit-metrics` was passed. 169 | 170 | All environment variables above are marked read-only. 171 | 172 | **Notes:** 173 | 174 | 1. Calling your script without arguments will create a new cluster in the GCP 175 | project `$PROJECT_ID` and run the tests against it. 176 | 177 | 1. Calling your script with `--run-tests` and the variable `KO_DOCKER_REPO` set 178 | will immediately start the tests against the cluster currently configured for 179 | `kubectl`. 180 | 181 | 1. By default Istio is installed on the cluster via Addon, use 182 | `--skip-istio-addon` if you choose not to have it preinstalled. 183 | 184 | 1. You can force running the tests against a specific GKE cluster version by 185 | using the `--cluster-version` flag and passing a full version as the flag 186 | value. 187 | 188 | ### Sample end-to-end test script 189 | 190 | This script will test that the latest Knative Serving nightly release works. It 191 | defines a special flag (`--no-knative-wait`) that causes the script not to wait 192 | for Knative Serving to be up before running the tests. It also requires that the 193 | test cluster is created in a specific region, `us-west2`. 194 | 195 | ```bash 196 | 197 | # This test requires a cluster in LA 198 | E2E_CLUSTER_REGION=us-west2 199 | 200 | source vendor/knative.dev/test-infra/scripts/e2e-tests.sh 201 | 202 | function knative_setup() { 203 | start_latest_knative_serving 204 | if (( WAIT_FOR_KNATIVE )); then 205 | wait_until_pods_running knative-serving || fail_test "Knative Serving not up" 206 | fi 207 | } 208 | 209 | function parse_flags() { 210 | if [[ "$1" == "--no-knative-wait" ]]; then 211 | WAIT_FOR_KNATIVE=0 212 | return 1 213 | fi 214 | return 0 215 | } 216 | 217 | WAIT_FOR_KNATIVE=1 218 | 219 | initialize $@ 220 | 221 | # TODO: use go_test_e2e to run the tests. 222 | kubectl get pods || fail_test 223 | 224 | success 225 | ``` 226 | 227 | ## Using the `release.sh` helper script 228 | 229 | This is a helper script for Knative release scripts. To use it: 230 | 231 | 1. Source the script. 232 | 233 | 1. [optional] By default, the release script will run 234 | `./test/presubmit-tests.sh` as the release validation tests. If you need to 235 | run something else, set the environment variable `VALIDATION_TESTS` to the 236 | executable to run. 237 | 238 | 1. Write logic for building the release in a function named `build_release()`. 239 | Set the environment variable `ARTIFACTS_TO_PUBLISH` to the list of files 240 | created, space separated. Use the following boolean (0 is false, 1 is true) 241 | and string environment variables for the logic: 242 | 243 | - `RELEASE_VERSION`: contains the release version if `--version` was passed. 244 | This also overrides the value of the `TAG` variable as `v`. 245 | - `RELEASE_BRANCH`: contains the release branch if `--branch` was passed. 246 | Otherwise it's empty and `master` HEAD will be considered the release 247 | branch. 248 | - `RELEASE_NOTES`: contains the filename with the release notes if 249 | `--release-notes` was passed. The release notes is a simple markdown file. 250 | - `RELEASE_GCS_BUCKET`: contains the GCS bucket name to store the manifests 251 | if `--release-gcs` was passed, otherwise the default value 252 | `knative-nightly/` will be used. It is empty if `--publish` was not 253 | passed. 254 | - `BUILD_COMMIT_HASH`: the commit short hash for the current repo. If the 255 | current git tree is dirty, it will have `-dirty` appended to it. 256 | - `BUILD_YYYYMMDD`: current UTC date in `YYYYMMDD` format. 257 | - `BUILD_TIMESTAMP`: human-readable UTC timestamp in `YYYY-MM-DD HH:MM:SS` 258 | format. 259 | - `BUILD_TAG`: a tag in the form `v$BUILD_YYYYMMDD-$BUILD_COMMIT_HASH`. 260 | - `KO_DOCKER_REPO`: contains the GCR to store the images if `--release-gcr` 261 | was passed, otherwise the default value `gcr.io/knative-nightly` will be 262 | used. It is set to `ko.local` if `--publish` was not passed. 263 | - `SKIP_TESTS`: true if `--skip-tests` was passed. This is handled 264 | automatically. 265 | - `TAG_RELEASE`: true if `--tag-release` was passed. In this case, the 266 | environment variable `TAG` will contain the release tag in the form 267 | `v$BUILD_TAG`. 268 | - `PUBLISH_RELEASE`: true if `--publish` was passed. In this case, the 269 | environment variable `KO_FLAGS` will be updated with the `-L` option and 270 | `TAG` will contain the release tag in the form `v$RELEASE_VERSION`. 271 | - `PUBLISH_TO_GITHUB`: true if `--version`, `--branch` and 272 | `--publish-release` were passed. 273 | 274 | All boolean environment variables default to false for safety. 275 | 276 | All environment variables above, except `KO_FLAGS`, are marked read-only once 277 | `main()` is called (see below). 278 | 279 | 1. Call the `main()` function passing `$@` (without quotes). 280 | 281 | ### Sample release script 282 | 283 | ```bash 284 | source vendor/knative.dev/test-infra/scripts/release.sh 285 | 286 | function build_release() { 287 | # config/ contains the manifests 288 | ko resolve ${KO_FLAGS} -f config/ > release.yaml 289 | ARTIFACTS_TO_PUBLISH="release.yaml" 290 | } 291 | 292 | main $@ 293 | ``` 294 | -------------------------------------------------------------------------------- /vendor/knative.dev/test-infra/scripts/dummy.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2018 The Knative Authors 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | https://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package scripts 18 | 19 | import ( 20 | "fmt" 21 | ) 22 | 23 | func main() { 24 | fmt.Println("This is a dummy go file so `go dep` can be used with knative/test-infra/scripts") 25 | fmt.Println("This file can be safely removed if one day this directory contains real, useful go code") 26 | } 27 | -------------------------------------------------------------------------------- /vendor/knative.dev/test-infra/scripts/e2e-tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Copyright 2019 The Knative Authors 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # This is a helper script for Knative E2E test scripts. 18 | # See README.md for instructions on how to use it. 19 | 20 | source $(dirname ${BASH_SOURCE})/library.sh 21 | 22 | # Build a resource name based on $E2E_BASE_NAME, a suffix and $BUILD_NUMBER. 23 | # Restricts the name length to 40 chars (the limit for resource names in GCP). 24 | # Name will have the form $E2E_BASE_NAME-$BUILD_NUMBER. 25 | # Parameters: $1 - name suffix 26 | function build_resource_name() { 27 | local prefix=${E2E_BASE_NAME}-$1 28 | local suffix=${BUILD_NUMBER} 29 | # Restrict suffix length to 20 chars 30 | if [[ -n "${suffix}" ]]; then 31 | suffix=${suffix:${#suffix}<20?0:-20} 32 | fi 33 | local name="${prefix:0:20}${suffix}" 34 | # Ensure name doesn't end with "-" 35 | echo "${name%-}" 36 | } 37 | 38 | # Test cluster parameters 39 | 40 | # Configurable parameters 41 | # export E2E_CLUSTER_REGION and E2E_CLUSTER_ZONE as they're used in the cluster setup subprocess 42 | export E2E_CLUSTER_REGION=${E2E_CLUSTER_REGION:-us-central1} 43 | # By default we use regional clusters. 44 | export E2E_CLUSTER_ZONE=${E2E_CLUSTER_ZONE:-} 45 | 46 | # Default backup regions in case of stockouts; by default we don't fall back to a different zone in the same region 47 | readonly E2E_CLUSTER_BACKUP_REGIONS=${E2E_CLUSTER_BACKUP_REGIONS:-us-west1 us-east1} 48 | readonly E2E_CLUSTER_BACKUP_ZONES=${E2E_CLUSTER_BACKUP_ZONES:-} 49 | 50 | readonly E2E_CLUSTER_MACHINE=${E2E_CLUSTER_MACHINE:-n1-standard-4} 51 | readonly E2E_GKE_ENVIRONMENT=${E2E_GKE_ENVIRONMENT:-prod} 52 | readonly E2E_GKE_COMMAND_GROUP=${E2E_GKE_COMMAND_GROUP:-beta} 53 | 54 | # Each knative repository may have a different cluster size requirement here, 55 | # so we allow calling code to set these parameters. If they are not set we 56 | # use some sane defaults. 57 | readonly E2E_MIN_CLUSTER_NODES=${E2E_MIN_CLUSTER_NODES:-1} 58 | readonly E2E_MAX_CLUSTER_NODES=${E2E_MAX_CLUSTER_NODES:-3} 59 | 60 | readonly E2E_BASE_NAME="k${REPO_NAME}" 61 | readonly E2E_CLUSTER_NAME=$(build_resource_name e2e-cls) 62 | readonly E2E_NETWORK_NAME=$(build_resource_name e2e-net) 63 | readonly TEST_RESULT_FILE=/tmp/${E2E_BASE_NAME}-e2e-result 64 | 65 | # Flag whether test is using a boskos GCP project 66 | IS_BOSKOS=0 67 | 68 | # Tear down the test resources. 69 | function teardown_test_resources() { 70 | # On boskos, save time and don't teardown as the cluster will be destroyed anyway. 71 | (( IS_BOSKOS )) && return 72 | header "Tearing down test environment" 73 | function_exists test_teardown && test_teardown 74 | (( ! SKIP_KNATIVE_SETUP )) && function_exists knative_teardown && knative_teardown 75 | # Delete the kubernetes source downloaded by kubetest 76 | rm -fr kubernetes kubernetes.tar.gz 77 | } 78 | 79 | # Run the given E2E tests. Assume tests are tagged e2e, unless `-tags=XXX` is passed. 80 | # Parameters: $1..$n - any go test flags, then directories containing the tests to run. 81 | function go_test_e2e() { 82 | local test_options="" 83 | local go_options="" 84 | (( EMIT_METRICS )) && test_options="-emitmetrics" 85 | [[ ! " $@" == *" -tags="* ]] && go_options="-tags=e2e" 86 | report_go_test -v -race -count=1 ${go_options} $@ ${test_options} 87 | } 88 | 89 | # Dump info about the test cluster. If dump_extra_cluster_info() is defined, calls it too. 90 | # This is intended to be called when a test fails to provide debugging information. 91 | function dump_cluster_state() { 92 | echo "***************************************" 93 | echo "*** E2E TEST FAILED ***" 94 | echo "*** Start of information dump ***" 95 | echo "***************************************" 96 | echo ">>> All resources:" 97 | kubectl get all --all-namespaces 98 | echo ">>> Services:" 99 | kubectl get services --all-namespaces 100 | echo ">>> Events:" 101 | kubectl get events --all-namespaces 102 | function_exists dump_extra_cluster_state && dump_extra_cluster_state 103 | echo "***************************************" 104 | echo "*** E2E TEST FAILED ***" 105 | echo "*** End of information dump ***" 106 | echo "***************************************" 107 | } 108 | 109 | # On a Prow job, save some metadata about the test for Testgrid. 110 | function save_metadata() { 111 | (( ! IS_PROW )) && return 112 | local geo_key="Region" 113 | local geo_value="${E2E_CLUSTER_REGION}" 114 | if [[ -n "${E2E_CLUSTER_ZONE}" ]]; then 115 | geo_key="Zone" 116 | geo_value="${E2E_CLUSTER_REGION}-${E2E_CLUSTER_ZONE}" 117 | fi 118 | local cluster_version="$(gcloud container clusters list --project=${E2E_PROJECT_ID} --format='value(currentMasterVersion)')" 119 | cat << EOF > ${ARTIFACTS}/metadata.json 120 | { 121 | "E2E:${geo_key}": "${geo_value}", 122 | "E2E:Machine": "${E2E_CLUSTER_MACHINE}", 123 | "E2E:Version": "${cluster_version}", 124 | "E2E:MinNodes": "${E2E_MIN_CLUSTER_NODES}", 125 | "E2E:MaxNodes": "${E2E_MAX_CLUSTER_NODES}" 126 | } 127 | EOF 128 | } 129 | 130 | # Set E2E_CLUSTER_VERSION to a specific GKE version. 131 | # Parameters: $1 - target GKE version (X.Y, X.Y.Z, X.Y.Z-gke.W, default or gke-latest). 132 | # $2 - region[-zone] where the clusteer will be created. 133 | function resolve_k8s_version() { 134 | local target_version="$1" 135 | if [[ "${target_version}" == "default" ]]; then 136 | local version="$(gcloud container get-server-config \ 137 | --format='value(defaultClusterVersion)' \ 138 | --zone=$2)" 139 | [[ -z "${version}" ]] && return 1 140 | E2E_CLUSTER_VERSION="${version}" 141 | echo "Using default version, ${E2E_CLUSTER_VERSION}" 142 | return 0 143 | fi 144 | # Fetch valid versions 145 | local versions="$(gcloud container get-server-config \ 146 | --format='value(validMasterVersions)' \ 147 | --zone=$2)" 148 | [[ -z "${versions}" ]] && return 1 149 | local gke_versions=($(echo -n "${versions//;/ }")) 150 | echo "Available GKE versions in $2 are [${versions//;/, }]" 151 | if [[ "${target_version}" == "gke-latest" ]]; then 152 | # Get first (latest) version, excluding the "-gke.#" suffix 153 | E2E_CLUSTER_VERSION="${gke_versions[0]}" 154 | echo "Using latest version, ${E2E_CLUSTER_VERSION}" 155 | else 156 | local latest="$(echo "${gke_versions[@]}" | tr ' ' '\n' | grep -E ^${target_version} | cut -f1 -d- | sort | tail -1)" 157 | if [[ -z "${latest}" ]]; then 158 | echo "ERROR: version ${target_version} is not available" 159 | return 1 160 | fi 161 | E2E_CLUSTER_VERSION="${latest}" 162 | echo "Using ${E2E_CLUSTER_VERSION} for supplied version ${target_version}" 163 | fi 164 | return 0 165 | } 166 | 167 | # Create a test cluster with kubetest and call the current script again. 168 | function create_test_cluster() { 169 | # Fail fast during setup. 170 | set -o errexit 171 | set -o pipefail 172 | 173 | if function_exists cluster_setup; then 174 | cluster_setup || fail_test "cluster setup failed" 175 | fi 176 | 177 | echo "Cluster will have a minimum of ${E2E_MIN_CLUSTER_NODES} and a maximum of ${E2E_MAX_CLUSTER_NODES} nodes." 178 | 179 | # Smallest cluster required to run the end-to-end-tests 180 | local CLUSTER_CREATION_ARGS=( 181 | --gke-create-command="container clusters create --quiet --enable-autoscaling --min-nodes=${E2E_MIN_CLUSTER_NODES} --max-nodes=${E2E_MAX_CLUSTER_NODES} --scopes=cloud-platform --enable-basic-auth --no-issue-client-certificate ${GKE_ADDONS} ${EXTRA_CLUSTER_CREATION_FLAGS[@]}" 182 | --gke-shape={\"default\":{\"Nodes\":${E2E_MIN_CLUSTER_NODES}\,\"MachineType\":\"${E2E_CLUSTER_MACHINE}\"}} 183 | --provider=gke 184 | --deployment=gke 185 | --cluster="${E2E_CLUSTER_NAME}" 186 | --gcp-network="${E2E_NETWORK_NAME}" 187 | --gcp-node-image="${SERVING_GKE_IMAGE}" 188 | --gke-environment="${E2E_GKE_ENVIRONMENT}" 189 | --gke-command-group="${E2E_GKE_COMMAND_GROUP}" 190 | --test=false 191 | --up 192 | ) 193 | if (( ! IS_BOSKOS )); then 194 | CLUSTER_CREATION_ARGS+=(--gcp-project=${GCP_PROJECT}) 195 | fi 196 | # SSH keys are not used, but kubetest checks for their existence. 197 | # Touch them so if they don't exist, empty files are create to satisfy the check. 198 | mkdir -p $HOME/.ssh 199 | touch $HOME/.ssh/google_compute_engine.pub 200 | touch $HOME/.ssh/google_compute_engine 201 | # Assume test failed (see details in set_test_return_code()). 202 | set_test_return_code 1 203 | local gcloud_project="${GCP_PROJECT}" 204 | [[ -z "${gcloud_project}" ]] && gcloud_project="$(gcloud config get-value project)" 205 | echo "gcloud project is ${gcloud_project}" 206 | echo "gcloud user is $(gcloud config get-value core/account)" 207 | (( IS_BOSKOS )) && echo "Using boskos for the test cluster" 208 | [[ -n "${GCP_PROJECT}" ]] && echo "GCP project for test cluster is ${GCP_PROJECT}" 209 | echo "Test script is ${E2E_SCRIPT}" 210 | # Set arguments for this script again 211 | local test_cmd_args="--run-tests" 212 | (( EMIT_METRICS )) && test_cmd_args+=" --emit-metrics" 213 | (( SKIP_KNATIVE_SETUP )) && test_cmd_args+=" --skip-knative-setup" 214 | [[ -n "${GCP_PROJECT}" ]] && test_cmd_args+=" --gcp-project ${GCP_PROJECT}" 215 | [[ -n "${E2E_SCRIPT_CUSTOM_FLAGS[@]}" ]] && test_cmd_args+=" ${E2E_SCRIPT_CUSTOM_FLAGS[@]}" 216 | local extra_flags=() 217 | if (( IS_BOSKOS )); then # Add arbitrary duration, wait for Boskos projects acquisition before error out 218 | extra_flags+=(--boskos-wait-duration=20m) 219 | else # Only let kubetest tear down the cluster if not using Boskos, it's done by Janitor if using Boskos 220 | extra_flags+=(--down) 221 | fi 222 | 223 | # Set a minimal kubernetes environment that satisfies kubetest 224 | # TODO(adrcunha): Remove once https://github.com/kubernetes/test-infra/issues/13029 is fixed. 225 | local kubedir="$(mktemp -d -t kubernetes.XXXXXXXXXX)" 226 | local test_wrapper="${kubedir}/e2e-test.sh" 227 | mkdir ${kubedir}/cluster 228 | ln -s "$(which kubectl)" ${kubedir}/cluster/kubectl.sh 229 | echo "#!/bin/bash" > ${test_wrapper} 230 | echo "cd $(pwd) && set -x" >> ${test_wrapper} 231 | echo "${E2E_SCRIPT} ${test_cmd_args}" >> ${test_wrapper} 232 | chmod +x ${test_wrapper} 233 | cd ${kubedir} 234 | 235 | # Create cluster and run the tests 236 | create_test_cluster_with_retries "${CLUSTER_CREATION_ARGS[@]}" \ 237 | --test-cmd "${test_wrapper}" \ 238 | ${extra_flags[@]} \ 239 | ${EXTRA_KUBETEST_FLAGS[@]} 240 | echo "Test subprocess exited with code $?" 241 | # Ignore any errors below, this is a best-effort cleanup and shouldn't affect the test result. 242 | set +o errexit 243 | function_exists cluster_teardown && cluster_teardown 244 | local result=$(get_test_return_code) 245 | echo "Artifacts were written to ${ARTIFACTS}" 246 | echo "Test result code is ${result}" 247 | exit ${result} 248 | } 249 | 250 | # Retry backup regions/zones if cluster creations failed due to stockout. 251 | # Parameters: $1..$n - any kubetest flags other than geo flag. 252 | function create_test_cluster_with_retries() { 253 | local cluster_creation_log=/tmp/${E2E_BASE_NAME}-cluster_creation-log 254 | # zone_not_provided is a placeholder for e2e_cluster_zone to make for loop below work 255 | local zone_not_provided="zone_not_provided" 256 | 257 | local e2e_cluster_regions=(${E2E_CLUSTER_REGION}) 258 | local e2e_cluster_zones=(${E2E_CLUSTER_ZONE}) 259 | 260 | if [[ -n "${E2E_CLUSTER_BACKUP_ZONES}" ]]; then 261 | e2e_cluster_zones+=(${E2E_CLUSTER_BACKUP_ZONES}) 262 | elif [[ -n "${E2E_CLUSTER_BACKUP_REGIONS}" ]]; then 263 | e2e_cluster_regions+=(${E2E_CLUSTER_BACKUP_REGIONS}) 264 | e2e_cluster_zones=(${zone_not_provided}) 265 | else 266 | echo "No backup region/zone set, cluster creation will fail in case of stockout" 267 | fi 268 | 269 | local e2e_cluster_target_version="${E2E_CLUSTER_VERSION}" 270 | for e2e_cluster_region in "${e2e_cluster_regions[@]}"; do 271 | for e2e_cluster_zone in "${e2e_cluster_zones[@]}"; do 272 | E2E_CLUSTER_REGION=${e2e_cluster_region} 273 | E2E_CLUSTER_ZONE=${e2e_cluster_zone} 274 | [[ "${E2E_CLUSTER_ZONE}" == "${zone_not_provided}" ]] && E2E_CLUSTER_ZONE="" 275 | local cluster_creation_zone="${E2E_CLUSTER_REGION}" 276 | [[ -n "${E2E_CLUSTER_ZONE}" ]] && cluster_creation_zone="${E2E_CLUSTER_REGION}-${E2E_CLUSTER_ZONE}" 277 | resolve_k8s_version ${e2e_cluster_target_version} ${cluster_creation_zone} || return 1 278 | 279 | header "Creating test cluster ${E2E_CLUSTER_VERSION} in ${cluster_creation_zone}" 280 | # Don't fail test for kubetest, as it might incorrectly report test failure 281 | # if teardown fails (for details, see success() below) 282 | set +o errexit 283 | export CLUSTER_API_VERSION=${E2E_CLUSTER_VERSION} 284 | run_go_tool k8s.io/test-infra/kubetest \ 285 | kubetest "$@" --gcp-region=${cluster_creation_zone} 2>&1 | tee ${cluster_creation_log} 286 | 287 | # Exit if test succeeded 288 | [[ "$(get_test_return_code)" == "0" ]] && return 0 289 | # Retry if cluster creation failed because of: 290 | # - stockout (https://github.com/knative/test-infra/issues/592) 291 | # - latest GKE not available in this region/zone yet (https://github.com/knative/test-infra/issues/694) 292 | [[ -z "$(grep -Fo 'does not have enough resources available to fulfill' ${cluster_creation_log})" \ 293 | && -z "$(grep -Fo 'ResponseError: code=400, message=No valid versions with the prefix' ${cluster_creation_log})" \ 294 | && -z "$(grep -Po 'ResponseError: code=400, message=Master version "[0-9a-z\-\.]+" is unsupported' ${cluster_creation_log})" ]] \ 295 | && return 1 296 | done 297 | done 298 | echo "No more region/zones to try, quitting" 299 | return 1 300 | } 301 | 302 | # Setup the test cluster for running the tests. 303 | function setup_test_cluster() { 304 | # Fail fast during setup. 305 | set -o errexit 306 | set -o pipefail 307 | 308 | header "Setting up test cluster" 309 | 310 | # Set the actual project the test cluster resides in 311 | # It will be a project assigned by Boskos if test is running on Prow, 312 | # otherwise will be ${GCP_PROJECT} set up by user. 313 | readonly export E2E_PROJECT_ID="$(gcloud config get-value project)" 314 | 315 | # Save some metadata about cluster creation for using in prow and testgrid 316 | save_metadata 317 | 318 | local k8s_user=$(gcloud config get-value core/account) 319 | local k8s_cluster=$(kubectl config current-context) 320 | 321 | is_protected_cluster ${k8s_cluster} && \ 322 | abort "kubeconfig context set to ${k8s_cluster}, which is forbidden" 323 | 324 | # If cluster admin role isn't set, this is a brand new cluster 325 | # Setup the admin role and also KO_DOCKER_REPO 326 | if [[ -z "$(kubectl get clusterrolebinding cluster-admin-binding 2> /dev/null)" ]]; then 327 | acquire_cluster_admin_role ${k8s_user} ${E2E_CLUSTER_NAME} ${E2E_CLUSTER_REGION} ${E2E_CLUSTER_ZONE} 328 | kubectl config set-context ${k8s_cluster} --namespace=default 329 | # Incorporate an element of randomness to ensure that each run properly publishes images. 330 | export KO_DOCKER_REPO=gcr.io/${E2E_PROJECT_ID}/${E2E_BASE_NAME}-e2e-img/${RANDOM} 331 | fi 332 | 333 | # Safety checks 334 | is_protected_gcr ${KO_DOCKER_REPO} && \ 335 | abort "\$KO_DOCKER_REPO set to ${KO_DOCKER_REPO}, which is forbidden" 336 | 337 | echo "- Project is ${E2E_PROJECT_ID}" 338 | echo "- Cluster is ${k8s_cluster}" 339 | echo "- User is ${k8s_user}" 340 | echo "- Docker is ${KO_DOCKER_REPO}" 341 | 342 | export KO_DATA_PATH="${REPO_ROOT_DIR}/.git" 343 | 344 | trap teardown_test_resources EXIT 345 | 346 | # Handle failures ourselves, so we can dump useful info. 347 | set +o errexit 348 | set +o pipefail 349 | 350 | if (( ! SKIP_KNATIVE_SETUP )) && function_exists knative_setup; then 351 | # Wait for Istio installation to complete, if necessary, before calling knative_setup. 352 | (( ! SKIP_ISTIO_ADDON )) && (wait_until_batch_job_complete istio-system || return 1) 353 | knative_setup || fail_test "Knative setup failed" 354 | fi 355 | if function_exists test_setup; then 356 | test_setup || fail_test "test setup failed" 357 | fi 358 | } 359 | 360 | # Gets the exit of the test script. 361 | # For more details, see set_test_return_code(). 362 | function get_test_return_code() { 363 | echo $(cat ${TEST_RESULT_FILE}) 364 | } 365 | 366 | # Set the return code that the test script will return. 367 | # Parameters: $1 - return code (0-255) 368 | function set_test_return_code() { 369 | # kubetest teardown might fail and thus incorrectly report failure of the 370 | # script, even if the tests pass. 371 | # We store the real test result to return it later, ignoring any teardown 372 | # failure in kubetest. 373 | # TODO(adrcunha): Get rid of this workaround. 374 | echo -n "$1"> ${TEST_RESULT_FILE} 375 | } 376 | 377 | # Signal (as return code and in the logs) that all E2E tests passed. 378 | function success() { 379 | set_test_return_code 0 380 | echo "**************************************" 381 | echo "*** E2E TESTS PASSED ***" 382 | echo "**************************************" 383 | exit 0 384 | } 385 | 386 | # Exit test, dumping current state info. 387 | # Parameters: $1 - error message (optional). 388 | function fail_test() { 389 | set_test_return_code 1 390 | [[ -n $1 ]] && echo "ERROR: $1" 391 | dump_cluster_state 392 | exit 1 393 | } 394 | 395 | RUN_TESTS=0 396 | EMIT_METRICS=0 397 | SKIP_KNATIVE_SETUP=0 398 | SKIP_ISTIO_ADDON=0 399 | GCP_PROJECT="" 400 | E2E_SCRIPT="" 401 | E2E_CLUSTER_VERSION="" 402 | GKE_ADDONS="" 403 | EXTRA_CLUSTER_CREATION_FLAGS=() 404 | EXTRA_KUBETEST_FLAGS=() 405 | E2E_SCRIPT_CUSTOM_FLAGS=() 406 | 407 | # Parse flags and initialize the test cluster. 408 | function initialize() { 409 | E2E_SCRIPT="$(get_canonical_path $0)" 410 | E2E_CLUSTER_VERSION="${SERVING_GKE_VERSION}" 411 | 412 | cd ${REPO_ROOT_DIR} 413 | while [[ $# -ne 0 ]]; do 414 | local parameter=$1 415 | # Try parsing flag as a custom one. 416 | if function_exists parse_flags; then 417 | parse_flags $@ 418 | local skip=$? 419 | if [[ ${skip} -ne 0 ]]; then 420 | # Skip parsed flag (and possibly argument) and continue 421 | # Also save it to it's passed through to the test script 422 | for ((i=1;i<=skip;i++)); do 423 | E2E_SCRIPT_CUSTOM_FLAGS+=("$1") 424 | shift 425 | done 426 | continue 427 | fi 428 | fi 429 | # Try parsing flag as a standard one. 430 | case ${parameter} in 431 | --run-tests) RUN_TESTS=1 ;; 432 | --emit-metrics) EMIT_METRICS=1 ;; 433 | --skip-knative-setup) SKIP_KNATIVE_SETUP=1 ;; 434 | --skip-istio-addon) SKIP_ISTIO_ADDON=1 ;; 435 | *) 436 | [[ $# -ge 2 ]] || abort "missing parameter after $1" 437 | shift 438 | case ${parameter} in 439 | --gcp-project) GCP_PROJECT=$1 ;; 440 | --cluster-version) E2E_CLUSTER_VERSION=$1 ;; 441 | --cluster-creation-flag) EXTRA_CLUSTER_CREATION_FLAGS+=($1) ;; 442 | --kubetest-flag) EXTRA_KUBETEST_FLAGS+=($1) ;; 443 | *) abort "unknown option ${parameter}" ;; 444 | esac 445 | esac 446 | shift 447 | done 448 | 449 | # Use PROJECT_ID if set, unless --gcp-project was used. 450 | if [[ -n "${PROJECT_ID:-}" && -z "${GCP_PROJECT}" ]]; then 451 | echo "\$PROJECT_ID is set to '${PROJECT_ID}', using it to run the tests" 452 | GCP_PROJECT="${PROJECT_ID}" 453 | fi 454 | if (( ! IS_PROW )) && (( ! RUN_TESTS )) && [[ -z "${GCP_PROJECT}" ]]; then 455 | abort "set \$PROJECT_ID or use --gcp-project to select the GCP project where the tests are run" 456 | fi 457 | 458 | (( IS_PROW )) && [[ -z "${GCP_PROJECT}" ]] && IS_BOSKOS=1 459 | 460 | (( SKIP_ISTIO_ADDON )) || GKE_ADDONS="--addons=Istio" 461 | 462 | readonly RUN_TESTS 463 | readonly EMIT_METRICS 464 | readonly GCP_PROJECT 465 | readonly IS_BOSKOS 466 | readonly EXTRA_CLUSTER_CREATION_FLAGS 467 | readonly EXTRA_KUBETEST_FLAGS 468 | readonly SKIP_KNATIVE_SETUP 469 | readonly GKE_ADDONS 470 | 471 | if (( ! RUN_TESTS )); then 472 | create_test_cluster 473 | else 474 | setup_test_cluster 475 | fi 476 | } 477 | -------------------------------------------------------------------------------- /vendor/knative.dev/test-infra/scripts/library.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Copyright 2018 The Knative Authors 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # This is a collection of useful bash functions and constants, intended 18 | # to be used in test scripts and the like. It doesn't do anything when 19 | # called from command line. 20 | 21 | # GCP project where all tests related resources live 22 | readonly KNATIVE_TESTS_PROJECT=knative-tests 23 | 24 | # Default GKE version to be used with Knative Serving 25 | readonly SERVING_GKE_VERSION=gke-latest 26 | readonly SERVING_GKE_IMAGE=cos 27 | 28 | # Conveniently set GOPATH if unset 29 | if [[ -z "${GOPATH:-}" ]]; then 30 | export GOPATH="$(go env GOPATH)" 31 | if [[ -z "${GOPATH}" ]]; then 32 | echo "WARNING: GOPATH not set and go binary unable to provide it" 33 | fi 34 | fi 35 | 36 | # Useful environment variables 37 | [[ -n "${PROW_JOB_ID:-}" ]] && IS_PROW=1 || IS_PROW=0 38 | readonly IS_PROW 39 | [[ -z "${REPO_ROOT_DIR:-}" ]] && REPO_ROOT_DIR="$(git rev-parse --show-toplevel)" 40 | readonly REPO_ROOT_DIR 41 | readonly REPO_NAME="$(basename ${REPO_ROOT_DIR})" 42 | 43 | # Useful flags about the current OS 44 | IS_LINUX=0 45 | IS_OSX=0 46 | IS_WINDOWS=0 47 | case "${OSTYPE}" in 48 | darwin*) IS_OSX=1 ;; 49 | linux*) IS_LINUX=1 ;; 50 | msys*) IS_WINDOWS=1 ;; 51 | *) echo "** Internal error in library.sh, unknown OS '${OSTYPE}'" ; exit 1 ;; 52 | esac 53 | readonly IS_LINUX 54 | readonly IS_OSX 55 | readonly IS_WINDOWS 56 | 57 | # Set ARTIFACTS to an empty temp dir if unset 58 | if [[ -z "${ARTIFACTS:-}" ]]; then 59 | export ARTIFACTS="$(mktemp -d)" 60 | fi 61 | 62 | # On a Prow job, redirect stderr to stdout so it's synchronously added to log 63 | (( IS_PROW )) && exec 2>&1 64 | 65 | # Print error message and exit 1 66 | # Parameters: $1..$n - error message to be displayed 67 | function abort() { 68 | echo "error: $@" 69 | exit 1 70 | } 71 | 72 | # Display a box banner. 73 | # Parameters: $1 - character to use for the box. 74 | # $2 - banner message. 75 | function make_banner() { 76 | local msg="$1$1$1$1 $2 $1$1$1$1" 77 | local border="${msg//[-0-9A-Za-z _.,\/()\']/$1}" 78 | echo -e "${border}\n${msg}\n${border}" 79 | # TODO(adrcunha): Remove once logs have timestamps on Prow 80 | # For details, see https://github.com/kubernetes/test-infra/issues/10100 81 | echo -e "$1$1$1$1 $(TZ='America/Los_Angeles' date)\n${border}" 82 | } 83 | 84 | # Simple header for logging purposes. 85 | function header() { 86 | local upper="$(echo $1 | tr a-z A-Z)" 87 | make_banner "=" "${upper}" 88 | } 89 | 90 | # Simple subheader for logging purposes. 91 | function subheader() { 92 | make_banner "-" "$1" 93 | } 94 | 95 | # Simple warning banner for logging purposes. 96 | function warning() { 97 | make_banner "!" "$1" 98 | } 99 | 100 | # Checks whether the given function exists. 101 | function function_exists() { 102 | [[ "$(type -t $1)" == "function" ]] 103 | } 104 | 105 | # Waits until the given object doesn't exist. 106 | # Parameters: $1 - the kind of the object. 107 | # $2 - object's name. 108 | # $3 - namespace (optional). 109 | function wait_until_object_does_not_exist() { 110 | local KUBECTL_ARGS="get $1 $2" 111 | local DESCRIPTION="$1 $2" 112 | 113 | if [[ -n $3 ]]; then 114 | KUBECTL_ARGS="get -n $3 $1 $2" 115 | DESCRIPTION="$1 $3/$2" 116 | fi 117 | echo -n "Waiting until ${DESCRIPTION} does not exist" 118 | for i in {1..150}; do # timeout after 5 minutes 119 | if ! kubectl ${KUBECTL_ARGS} > /dev/null 2>&1; then 120 | echo -e "\n${DESCRIPTION} does not exist" 121 | return 0 122 | fi 123 | echo -n "." 124 | sleep 2 125 | done 126 | echo -e "\n\nERROR: timeout waiting for ${DESCRIPTION} not to exist" 127 | kubectl ${KUBECTL_ARGS} 128 | return 1 129 | } 130 | 131 | # Waits until all pods are running in the given namespace. 132 | # Parameters: $1 - namespace. 133 | function wait_until_pods_running() { 134 | echo -n "Waiting until all pods in namespace $1 are up" 135 | for i in {1..150}; do # timeout after 5 minutes 136 | local pods="$(kubectl get pods --no-headers -n $1 2>/dev/null)" 137 | # All pods must be running 138 | local not_running=$(echo "${pods}" | grep -v Running | grep -v Completed | wc -l) 139 | if [[ -n "${pods}" && ${not_running} -eq 0 ]]; then 140 | local all_ready=1 141 | while read pod ; do 142 | local status=(`echo -n ${pod} | cut -f2 -d' ' | tr '/' ' '`) 143 | # All containers must be ready 144 | [[ -z ${status[0]} ]] && all_ready=0 && break 145 | [[ -z ${status[1]} ]] && all_ready=0 && break 146 | [[ ${status[0]} -lt 1 ]] && all_ready=0 && break 147 | [[ ${status[1]} -lt 1 ]] && all_ready=0 && break 148 | [[ ${status[0]} -ne ${status[1]} ]] && all_ready=0 && break 149 | done <<< "$(echo "${pods}" | grep -v Completed)" 150 | if (( all_ready )); then 151 | echo -e "\nAll pods are up:\n${pods}" 152 | return 0 153 | fi 154 | fi 155 | echo -n "." 156 | sleep 2 157 | done 158 | echo -e "\n\nERROR: timeout waiting for pods to come up\n${pods}" 159 | return 1 160 | } 161 | 162 | # Waits until all batch jobs complete in the given namespace. 163 | # Parameters: $1 - namespace. 164 | function wait_until_batch_job_complete() { 165 | echo -n "Waiting until all batch jobs in namespace $1 run to completion." 166 | for i in {1..150}; do # timeout after 5 minutes 167 | local jobs=$(kubectl get jobs -n $1 --no-headers \ 168 | -ocustom-columns='n:{.metadata.name},c:{.spec.completions},s:{.status.succeeded}') 169 | # All jobs must be complete 170 | local not_complete=$(echo "${jobs}" | awk '{if ($2!=$3) print $0}' | wc -l) 171 | if [[ ${not_complete} -eq 0 ]]; then 172 | echo -e "\nAll jobs are complete:\n${jobs}" 173 | return 0 174 | fi 175 | echo -n "." 176 | sleep 2 177 | done 178 | echo -e "\n\nERROR: timeout waiting for jobs to complete\n${jobs}" 179 | return 1 180 | } 181 | 182 | # Waits until the given service has an external address (IP/hostname). 183 | # Parameters: $1 - namespace. 184 | # $2 - service name. 185 | function wait_until_service_has_external_ip() { 186 | echo -n "Waiting until service $2 in namespace $1 has an external address (IP/hostname)" 187 | for i in {1..150}; do # timeout after 15 minutes 188 | local ip=$(kubectl get svc -n $1 $2 -o jsonpath="{.status.loadBalancer.ingress[0].ip}") 189 | if [[ -n "${ip}" ]]; then 190 | echo -e "\nService $2.$1 has IP $ip" 191 | return 0 192 | fi 193 | local hostname=$(kubectl get svc -n $1 $2 -o jsonpath="{.status.loadBalancer.ingress[0].hostname}") 194 | if [[ -n "${hostname}" ]]; then 195 | echo -e "\nService $2.$1 has hostname $hostname" 196 | return 0 197 | fi 198 | echo -n "." 199 | sleep 6 200 | done 201 | echo -e "\n\nERROR: timeout waiting for service $2.$1 to have an external address" 202 | kubectl get pods -n $1 203 | return 1 204 | } 205 | 206 | # Waits for the endpoint to be routable. 207 | # Parameters: $1 - External ingress IP address. 208 | # $2 - cluster hostname. 209 | function wait_until_routable() { 210 | echo -n "Waiting until cluster $2 at $1 has a routable endpoint" 211 | for i in {1..150}; do # timeout after 5 minutes 212 | local val=$(curl -H "Host: $2" "http://$1" 2>/dev/null) 213 | if [[ -n "$val" ]]; then 214 | echo -e "\nEndpoint is now routable" 215 | return 0 216 | fi 217 | echo -n "." 218 | sleep 2 219 | done 220 | echo -e "\n\nERROR: Timed out waiting for endpoint to be routable" 221 | return 1 222 | } 223 | 224 | # Returns the name of the first pod of the given app. 225 | # Parameters: $1 - app name. 226 | # $2 - namespace (optional). 227 | function get_app_pod() { 228 | local pods=($(get_app_pods $1 $2)) 229 | echo "${pods[0]}" 230 | } 231 | 232 | # Returns the name of all pods of the given app. 233 | # Parameters: $1 - app name. 234 | # $2 - namespace (optional). 235 | function get_app_pods() { 236 | local namespace="" 237 | [[ -n $2 ]] && namespace="-n $2" 238 | kubectl get pods ${namespace} --selector=app=$1 --output=jsonpath="{.items[*].metadata.name}" 239 | } 240 | 241 | # Capitalize the first letter of each word. 242 | # Parameters: $1..$n - words to capitalize. 243 | function capitalize() { 244 | local capitalized=() 245 | for word in $@; do 246 | local initial="$(echo ${word:0:1}| tr 'a-z' 'A-Z')" 247 | capitalized+=("${initial}${word:1}") 248 | done 249 | echo "${capitalized[@]}" 250 | } 251 | 252 | # Dumps pod logs for the given app. 253 | # Parameters: $1 - app name. 254 | # $2 - namespace. 255 | function dump_app_logs() { 256 | echo ">>> ${REPO_NAME_FORMATTED} $1 logs:" 257 | for pod in $(get_app_pods "$1" "$2") 258 | do 259 | echo ">>> Pod: $pod" 260 | kubectl -n "$2" logs "$pod" --all-containers 261 | done 262 | } 263 | 264 | # Sets the given user as cluster admin. 265 | # Parameters: $1 - user 266 | # $2 - cluster name 267 | # $3 - cluster region 268 | # $4 - cluster zone, optional 269 | function acquire_cluster_admin_role() { 270 | echo "Acquiring cluster-admin role for user '$1'" 271 | local geoflag="--region=$3" 272 | [[ -n $4 ]] && geoflag="--zone=$3-$4" 273 | # Get the password of the admin and use it, as the service account (or the user) 274 | # might not have the necessary permission. 275 | local password=$(gcloud --format="value(masterAuth.password)" \ 276 | container clusters describe $2 ${geoflag}) 277 | if [[ -n "${password}" ]]; then 278 | # Cluster created with basic authentication 279 | kubectl config set-credentials cluster-admin \ 280 | --username=admin --password=${password} 281 | else 282 | local cert=$(mktemp) 283 | local key=$(mktemp) 284 | echo "Certificate in ${cert}, key in ${key}" 285 | gcloud --format="value(masterAuth.clientCertificate)" \ 286 | container clusters describe $2 ${geoflag} | base64 -d > ${cert} 287 | gcloud --format="value(masterAuth.clientKey)" \ 288 | container clusters describe $2 ${geoflag} | base64 -d > ${key} 289 | kubectl config set-credentials cluster-admin \ 290 | --client-certificate=${cert} --client-key=${key} 291 | fi 292 | kubectl config set-context $(kubectl config current-context) \ 293 | --user=cluster-admin 294 | kubectl create clusterrolebinding cluster-admin-binding \ 295 | --clusterrole=cluster-admin \ 296 | --user=$1 297 | # Reset back to the default account 298 | gcloud container clusters get-credentials \ 299 | $2 ${geoflag} --project $(gcloud config get-value project) 300 | } 301 | 302 | # Run a command through tee and capture its output. 303 | # Parameters: $1 - file where the output will be stored. 304 | # $2... - command to run. 305 | function capture_output() { 306 | local report="$1" 307 | shift 308 | "$@" 2>&1 | tee "${report}" 309 | local failed=( ${PIPESTATUS[@]} ) 310 | [[ ${failed[0]} -eq 0 ]] && failed=${failed[1]} || failed=${failed[0]} 311 | return ${failed} 312 | } 313 | 314 | # Create a JUnit XML for a test. 315 | # Parameters: $1 - check class name as an identifier (e.g. BuildTests) 316 | # $2 - check name as an identifier (e.g., GoBuild) 317 | # $3 - failure message (can contain newlines), optional (means success) 318 | function create_junit_xml() { 319 | local xml="$(mktemp ${ARTIFACTS}/junit_XXXXXXXX.xml)" 320 | local failure="" 321 | if [[ "$3" != "" ]]; then 322 | # Transform newlines into HTML code. 323 | # Also escape `<` and `>` as here: https://github.com/golang/go/blob/50bd1c4d4eb4fac8ddeb5f063c099daccfb71b26/src/encoding/json/encode.go#L48, 324 | # this is temporary solution for fixing https://github.com/knative/test-infra/issues/1204, 325 | # which should be obsolete once Test-infra 2.0 is in place 326 | local msg="$(echo -n "$3" | sed 's/$/\ /g' | sed 's//\\u003e/' | tr -d '\n')" 327 | failure="${msg}" 328 | fi 329 | cat << EOF > "${xml}" 330 | 331 | 332 | 333 | ${failure} 334 | 335 | 336 | 337 | EOF 338 | } 339 | 340 | # Runs a go test and generate a junit summary. 341 | # Parameters: $1... - parameters to go test 342 | function report_go_test() { 343 | # Run tests in verbose mode to capture details. 344 | # go doesn't like repeating -v, so remove if passed. 345 | local args=" $@ " 346 | local go_test="go test -v ${args/ -v / }" 347 | # Just run regular go tests if not on Prow. 348 | echo "Running tests with '${go_test}'" 349 | local report="$(mktemp)" 350 | capture_output "${report}" ${go_test} 351 | local failed=$? 352 | echo "Finished run, return code is ${failed}" 353 | # Install go-junit-report if necessary. 354 | run_go_tool github.com/jstemmer/go-junit-report go-junit-report --help > /dev/null 2>&1 355 | local xml=$(mktemp ${ARTIFACTS}/junit_XXXXXXXX.xml) 356 | cat ${report} \ 357 | | go-junit-report \ 358 | | sed -e "s#\"\(github\.com/knative\|knative\.dev\)/${REPO_NAME}/#\"#g" \ 359 | > ${xml} 360 | echo "XML report written to ${xml}" 361 | if [[ -n "$(grep '' ${xml})" ]]; then 362 | # XML report is empty, something's wrong; use the output as failure reason 363 | create_junit_xml _go_tests "GoTests" "$(cat ${report})" 364 | fi 365 | # Capture and report any race condition errors 366 | local race_errors="$(sed -n '/^WARNING: DATA RACE$/,/^==================$/p' ${report})" 367 | create_junit_xml _go_tests "DataRaceAnalysis" "${race_errors}" 368 | if (( ! IS_PROW )); then 369 | # Keep the suffix, so files are related. 370 | local logfile=${xml/junit_/go_test_} 371 | logfile=${logfile/.xml/.log} 372 | cp ${report} ${logfile} 373 | echo "Test log written to ${logfile}" 374 | fi 375 | return ${failed} 376 | } 377 | 378 | # Install Knative Serving in the current cluster. 379 | # Parameters: $1 - Knative Serving manifest. 380 | function start_knative_serving() { 381 | header "Starting Knative Serving" 382 | subheader "Installing Knative Serving" 383 | echo "Installing Serving CRDs from $1" 384 | kubectl apply --selector knative.dev/crd-install=true -f "$1" 385 | echo "Installing the rest of serving components from $1" 386 | kubectl apply -f "$1" 387 | wait_until_pods_running knative-serving || return 1 388 | } 389 | 390 | # Install the stable release Knative/serving in the current cluster. 391 | # Parameters: $1 - Knative Serving version number, e.g. 0.6.0. 392 | function start_release_knative_serving() { 393 | start_knative_serving "https://storage.googleapis.com/knative-releases/serving/previous/v$1/serving.yaml" 394 | } 395 | 396 | # Install the latest stable Knative Serving in the current cluster. 397 | function start_latest_knative_serving() { 398 | start_knative_serving "${KNATIVE_SERVING_RELEASE}" 399 | } 400 | 401 | # Run a go tool, installing it first if necessary. 402 | # Parameters: $1 - tool package/dir for go get/install. 403 | # $2 - tool to run. 404 | # $3..$n - parameters passed to the tool. 405 | function run_go_tool() { 406 | local tool=$2 407 | if [[ -z "$(which ${tool})" ]]; then 408 | local action=get 409 | [[ $1 =~ ^[\./].* ]] && action=install 410 | go ${action} $1 411 | fi 412 | shift 2 413 | ${tool} "$@" 414 | } 415 | 416 | # Run dep-collector to update licenses. 417 | # Parameters: $1 - output file, relative to repo root dir. 418 | # $2...$n - directories and files to inspect. 419 | function update_licenses() { 420 | cd ${REPO_ROOT_DIR} || return 1 421 | local dst=$1 422 | shift 423 | run_go_tool knative.dev/test-infra/tools/dep-collector dep-collector $@ > ./${dst} 424 | } 425 | 426 | # Run dep-collector to check for forbidden liceses. 427 | # Parameters: $1...$n - directories and files to inspect. 428 | function check_licenses() { 429 | # Fetch the google/licenseclassifier for its license db 430 | rm -fr ${GOPATH}/src/github.com/google/licenseclassifier 431 | go get -u github.com/google/licenseclassifier 432 | # Check that we don't have any forbidden licenses in our images. 433 | run_go_tool knative.dev/test-infra/tools/dep-collector dep-collector -check $@ 434 | } 435 | 436 | # Run the given linter on the given files, checking it exists first. 437 | # Parameters: $1 - tool 438 | # $2 - tool purpose (for error message if tool not installed) 439 | # $3 - tool parameters (quote if multiple parameters used) 440 | # $4..$n - files to run linter on 441 | function run_lint_tool() { 442 | local checker=$1 443 | local params=$3 444 | if ! hash ${checker} 2>/dev/null; then 445 | warning "${checker} not installed, not $2" 446 | return 127 447 | fi 448 | shift 3 449 | local failed=0 450 | for file in $@; do 451 | ${checker} ${params} ${file} || failed=1 452 | done 453 | return ${failed} 454 | } 455 | 456 | # Check links in the given markdown files. 457 | # Parameters: $1...$n - files to inspect 458 | function check_links_in_markdown() { 459 | # https://github.com/raviqqe/liche 460 | local config="${REPO_ROOT_DIR}/test/markdown-link-check-config.rc" 461 | [[ ! -e ${config} ]] && config="${_TEST_INFRA_SCRIPTS_DIR}/markdown-link-check-config.rc" 462 | local options="$(grep '^-' ${config} | tr \"\n\" ' ')" 463 | run_lint_tool liche "checking links in markdown files" "-d ${REPO_ROOT_DIR} ${options}" $@ 464 | } 465 | 466 | # Check format of the given markdown files. 467 | # Parameters: $1..$n - files to inspect 468 | function lint_markdown() { 469 | # https://github.com/markdownlint/markdownlint 470 | local config="${REPO_ROOT_DIR}/test/markdown-lint-config.rc" 471 | [[ ! -e ${config} ]] && config="${_TEST_INFRA_SCRIPTS_DIR}/markdown-lint-config.rc" 472 | run_lint_tool mdl "linting markdown files" "-c ${config}" $@ 473 | } 474 | 475 | # Return whether the given parameter is an integer. 476 | # Parameters: $1 - integer to check 477 | function is_int() { 478 | [[ -n $1 && $1 =~ ^[0-9]+$ ]] 479 | } 480 | 481 | # Return whether the given parameter is the knative release/nightly GCF. 482 | # Parameters: $1 - full GCR name, e.g. gcr.io/knative-foo-bar 483 | function is_protected_gcr() { 484 | [[ -n $1 && $1 =~ ^gcr.io/knative-(releases|nightly)/?$ ]] 485 | } 486 | 487 | # Return whether the given parameter is any cluster under ${KNATIVE_TESTS_PROJECT}. 488 | # Parameters: $1 - Kubernetes cluster context (output of kubectl config current-context) 489 | function is_protected_cluster() { 490 | # Example: gke_knative-tests_us-central1-f_prow 491 | [[ -n $1 && $1 =~ ^gke_${KNATIVE_TESTS_PROJECT}_us\-[a-zA-Z0-9]+\-[a-z]+_[a-z0-9\-]+$ ]] 492 | } 493 | 494 | # Return whether the given parameter is ${KNATIVE_TESTS_PROJECT}. 495 | # Parameters: $1 - project name 496 | function is_protected_project() { 497 | [[ -n $1 && "$1" == "${KNATIVE_TESTS_PROJECT}" ]] 498 | } 499 | 500 | # Remove symlinks in a path that are broken or lead outside the repo. 501 | # Parameters: $1 - path name, e.g. vendor 502 | function remove_broken_symlinks() { 503 | for link in $(find $1 -type l); do 504 | # Remove broken symlinks 505 | if [[ ! -e ${link} ]]; then 506 | unlink ${link} 507 | continue 508 | fi 509 | # Get canonical path to target, remove if outside the repo 510 | local target="$(ls -l ${link})" 511 | target="${target##* -> }" 512 | [[ ${target} == /* ]] || target="./${target}" 513 | target="$(cd `dirname ${link}` && cd ${target%/*} && echo $PWD/${target##*/})" 514 | if [[ ${target} != *github.com/knative/* && ${target} != *knative.dev/* ]]; then 515 | unlink ${link} 516 | continue 517 | fi 518 | done 519 | } 520 | 521 | # Returns the canonical path of a filesystem object. 522 | # Parameters: $1 - path to return in canonical form 523 | # $2 - base dir for relative links; optional, defaults to current 524 | function get_canonical_path() { 525 | # We don't use readlink because it's not available on every platform. 526 | local path=$1 527 | local pwd=${2:-.} 528 | [[ ${path} == /* ]] || path="${pwd}/${path}" 529 | echo "$(cd ${path%/*} && echo $PWD/${path##*/})" 530 | } 531 | 532 | # Returns the URL to the latest manifest for the given Knative project. 533 | # Parameters: $1 - repository name of the given project 534 | # $2 - name of the yaml file, without extension 535 | function get_latest_knative_yaml_source() { 536 | local branch_name="" 537 | local repo_name="$1" 538 | local yaml_name="$2" 539 | # Get the branch name from Prow's env var, see https://github.com/kubernetes/test-infra/blob/master/prow/jobs.md. 540 | # Otherwise, try getting the current branch from git. 541 | (( IS_PROW )) && branch_name="${PULL_BASE_REF:-}" 542 | [[ -z "${branch_name}" ]] && branch_name="$(git rev-parse --abbrev-ref HEAD)" 543 | # If it's a release branch, the yaml source URL should point to a specific version. 544 | if [[ ${branch_name} =~ ^release-[0-9\.]+$ ]]; then 545 | # Get the latest tag name for the current branch, which is likely formatted as v0.5.0 546 | local tag_name="$(git describe --tags --abbrev=0)" 547 | # The given repo might not have this tag, so we need to find its latest release manifest with the same major&minor version. 548 | local major_minor="$(echo ${tag_name} | cut -d. -f1-2)" 549 | local yaml_source_path="$(gsutil ls gs://knative-releases/${repo_name}/previous/${major_minor}.*/${yaml_name}.yaml \ 550 | | sort \ 551 | | tail -n 1 \ 552 | | cut -b6-)" 553 | echo "https://storage.googleapis.com/${yaml_source_path}" 554 | # If it's not a release branch, the yaml source URL should be nightly build. 555 | else 556 | echo "https://storage.googleapis.com/knative-nightly/${repo_name}/latest/${yaml_name}.yaml" 557 | fi 558 | } 559 | 560 | # Initializations that depend on previous functions. 561 | # These MUST come last. 562 | 563 | readonly _TEST_INFRA_SCRIPTS_DIR="$(dirname $(get_canonical_path ${BASH_SOURCE[0]}))" 564 | readonly REPO_NAME_FORMATTED="Knative $(capitalize ${REPO_NAME//-/ })" 565 | 566 | # Public latest nightly or release yaml files. 567 | readonly KNATIVE_SERVING_RELEASE="$(get_latest_knative_yaml_source "serving" "serving")" 568 | readonly KNATIVE_BUILD_RELEASE="$(get_latest_knative_yaml_source "build" "build")" 569 | readonly KNATIVE_EVENTING_RELEASE="$(get_latest_knative_yaml_source "eventing" "release")" 570 | -------------------------------------------------------------------------------- /vendor/knative.dev/test-infra/scripts/markdown-link-check-config.rc: -------------------------------------------------------------------------------- 1 | # For help, see 2 | # https://github.com/raviqqe/liche/blob/master/README.md 3 | 4 | # Don't check localhost links 5 | -x "^https?://localhost($|[:/].*)" 6 | -------------------------------------------------------------------------------- /vendor/knative.dev/test-infra/scripts/markdown-lint-config.rc: -------------------------------------------------------------------------------- 1 | # For help, see 2 | # https://github.com/markdownlint/markdownlint/blob/master/docs/configuration.md 3 | 4 | # Ignore long lines 5 | rules "~MD013" 6 | -------------------------------------------------------------------------------- /vendor/knative.dev/test-infra/scripts/presubmit-tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Copyright 2018 The Knative Authors 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # This is a helper script for Knative presubmit test scripts. 18 | # See README.md for instructions on how to use it. 19 | 20 | source $(dirname ${BASH_SOURCE})/library.sh 21 | 22 | # Custom configuration of presubmit tests 23 | readonly DISABLE_MD_LINTING=${DISABLE_MD_LINTING:-0} 24 | readonly DISABLE_MD_LINK_CHECK=${DISABLE_MD_LINK_CHECK:-0} 25 | readonly PRESUBMIT_TEST_FAIL_FAST=${PRESUBMIT_TEST_FAIL_FAST:-0} 26 | 27 | # Extensions or file patterns that don't require presubmit tests. 28 | readonly NO_PRESUBMIT_FILES=(\.png \.gitignore \.gitattributes ^OWNERS ^OWNERS_ALIASES ^AUTHORS) 29 | 30 | # Flag if this is a presubmit run or not. 31 | [[ IS_PROW && -n "${PULL_PULL_SHA}" ]] && IS_PRESUBMIT=1 || IS_PRESUBMIT=0 32 | readonly IS_PRESUBMIT 33 | 34 | # List of changed files on presubmit, LF separated. 35 | CHANGED_FILES="" 36 | 37 | # Flags that this PR is exempt of presubmit tests. 38 | IS_PRESUBMIT_EXEMPT_PR=0 39 | 40 | # Flags that this PR contains only changes to documentation. 41 | IS_DOCUMENTATION_PR=0 42 | 43 | # Returns true if PR only contains the given file regexes. 44 | # Parameters: $1 - file regexes, space separated. 45 | function pr_only_contains() { 46 | [[ -z "$(echo "${CHANGED_FILES}" | grep -v "\(${1// /\\|}\)$")" ]] 47 | } 48 | 49 | # List changed files in the current PR. 50 | # This is implemented as a function so it can be mocked in unit tests. 51 | function list_changed_files() { 52 | /workspace/githubhelper -list-changed-files 53 | } 54 | 55 | # Initialize flags and context for presubmit tests: 56 | # CHANGED_FILES, IS_PRESUBMIT_EXEMPT_PR and IS_DOCUMENTATION_PR. 57 | function initialize_environment() { 58 | CHANGED_FILES="" 59 | IS_PRESUBMIT_EXEMPT_PR=0 60 | IS_DOCUMENTATION_PR=0 61 | (( ! IS_PRESUBMIT )) && return 62 | CHANGED_FILES="$(list_changed_files)" 63 | if [[ -n "${CHANGED_FILES}" ]]; then 64 | echo -e "Changed files in commit ${PULL_PULL_SHA}:\n${CHANGED_FILES}" 65 | local no_presubmit_files="${NO_PRESUBMIT_FILES[*]}" 66 | pr_only_contains "${no_presubmit_files}" && IS_PRESUBMIT_EXEMPT_PR=1 67 | # A documentation PR must contain markdown files 68 | if pr_only_contains "\.md ${no_presubmit_files}"; then 69 | [[ -n "$(echo "${CHANGED_FILES}" | grep '\.md')" ]] && IS_DOCUMENTATION_PR=1 70 | fi 71 | else 72 | header "NO CHANGED FILES REPORTED, ASSUMING IT'S AN ERROR AND RUNNING TESTS ANYWAY" 73 | fi 74 | readonly CHANGED_FILES 75 | readonly IS_DOCUMENTATION_PR 76 | readonly IS_PRESUBMIT_EXEMPT_PR 77 | } 78 | 79 | # Display a pass/fail banner for a test group. 80 | # Parameters: $1 - test group name (e.g., build) 81 | # $2 - result (0=passed, 1=failed) 82 | function results_banner() { 83 | local result 84 | [[ $2 -eq 0 ]] && result="PASSED" || result="FAILED" 85 | header "$1 tests ${result}" 86 | } 87 | 88 | # Run build tests. If there's no `build_tests` function, run the default 89 | # build test runner. 90 | function run_build_tests() { 91 | (( ! RUN_BUILD_TESTS )) && return 0 92 | header "Running build tests" 93 | local failed=0 94 | # Run pre-build tests, if any 95 | if function_exists pre_build_tests; then 96 | pre_build_tests || failed=1 97 | fi 98 | # Don't run build tests if pre-build tests failed 99 | if (( ! failed )); then 100 | if function_exists build_tests; then 101 | build_tests || failed=1 102 | else 103 | default_build_test_runner || failed=1 104 | fi 105 | fi 106 | # Don't run post-build tests if pre/build tests failed 107 | if (( ! failed )) && function_exists post_build_tests; then 108 | post_build_tests || failed=1 109 | fi 110 | results_banner "Build" ${failed} 111 | return ${failed} 112 | } 113 | 114 | # Run a build test and report its output as the failure if it fails. 115 | # Parameters: $1 - report name. 116 | # $2... - command (test) to run. 117 | function report_build_test() { 118 | local report="$(mktemp)" 119 | local report_name="$1" 120 | shift 121 | local errors="" 122 | capture_output "${report}" "$@" || errors="$(cat ${report})" 123 | create_junit_xml _build_tests "${report_name}" "${errors}" 124 | [[ -z "${errors}" ]] 125 | } 126 | 127 | # Perform markdown build tests if necessary, unless disabled. 128 | function markdown_build_tests() { 129 | (( DISABLE_MD_LINTING && DISABLE_MD_LINK_CHECK )) && return 0 130 | # Get changed markdown files (ignore /vendor and deleted files) 131 | local mdfiles="" 132 | for file in $(echo "${CHANGED_FILES}" | grep \.md$ | grep -v ^vendor/); do 133 | [[ -f "${file}" ]] && mdfiles="${mdfiles} ${file}" 134 | done 135 | [[ -z "${mdfiles}" ]] && return 0 136 | local failed=0 137 | if (( ! DISABLE_MD_LINTING )); then 138 | subheader "Linting the markdown files" 139 | report_build_test Markdown_Lint lint_markdown ${mdfiles} || failed=1 140 | fi 141 | if (( ! DISABLE_MD_LINK_CHECK )); then 142 | subheader "Checking links in the markdown files" 143 | report_build_test Markdown_Link check_links_in_markdown ${mdfiles} || failed=1 144 | fi 145 | return ${failed} 146 | } 147 | 148 | # Default build test runner that: 149 | # * check markdown files 150 | # * `go build` on the entire repo 151 | # * run `/hack/verify-codegen.sh` (if it exists) 152 | # * check licenses in all go packages 153 | function default_build_test_runner() { 154 | local failed=0 155 | # Perform markdown build checks first 156 | markdown_build_tests || failed=1 157 | # For documentation PRs, just check the md files 158 | (( IS_DOCUMENTATION_PR )) && return ${failed} 159 | # Skip build test if there is no go code 160 | local go_pkg_dirs="$(go list ./...)" 161 | [[ -z "${go_pkg_dirs}" ]] && return ${failed} 162 | # Ensure all the code builds 163 | subheader "Checking that go code builds" 164 | local report="$(mktemp)" 165 | local errors_go1="" 166 | local errors_go2="" 167 | if ! capture_output "${report}" go build -v ./... ; then 168 | failed=1 169 | # Consider an error message everything that's not a package name. 170 | errors_go1="$(grep -v '^\(github\.com\|knative\.dev\)/' "${report}" | sort | uniq)" 171 | fi 172 | # Get all build tags in go code (ignore /vendor) 173 | local tags="$(grep -r '// +build' . \ 174 | | grep -v '^./vendor/' | cut -f3 -d' ' | sort | uniq | tr '\n' ' ')" 175 | if [[ -n "${tags}" ]]; then 176 | errors="" 177 | if ! capture_output "${report}" go test -run=^$ -tags="${tags}" ./... ; then 178 | failed=1 179 | # Consider an error message everything that's not a successful test result. 180 | errors_go2="$(grep -v '^\(ok\|\?\)\s\+\(github\.com\|knative\.dev\)/' "${report}")" 181 | fi 182 | fi 183 | local errors_go="$(echo -e "${errors_go1}\n${errors_go2}" | uniq)" 184 | create_junit_xml _build_tests Build_Go "${errors_go}" 185 | if [[ -f ./hack/verify-codegen.sh ]]; then 186 | subheader "Checking autogenerated code is up-to-date" 187 | report_build_test Verify_CodeGen ./hack/verify-codegen.sh || failed=1 188 | fi 189 | # Check that we don't have any forbidden licenses in our images. 190 | subheader "Checking for forbidden licenses" 191 | report_build_test Check_Licenses check_licenses ${go_pkg_dirs} || failed=1 192 | return ${failed} 193 | } 194 | 195 | # Run unit tests. If there's no `unit_tests` function, run the default 196 | # unit test runner. 197 | function run_unit_tests() { 198 | (( ! RUN_UNIT_TESTS )) && return 0 199 | if (( IS_DOCUMENTATION_PR )); then 200 | header "Documentation only PR, skipping unit tests" 201 | return 0 202 | fi 203 | header "Running unit tests" 204 | local failed=0 205 | # Run pre-unit tests, if any 206 | if function_exists pre_unit_tests; then 207 | pre_unit_tests || failed=1 208 | fi 209 | # Don't run unit tests if pre-unit tests failed 210 | if (( ! failed )); then 211 | if function_exists unit_tests; then 212 | unit_tests || failed=1 213 | else 214 | default_unit_test_runner || failed=1 215 | fi 216 | fi 217 | # Don't run post-unit tests if pre/unit tests failed 218 | if (( ! failed )) && function_exists post_unit_tests; then 219 | post_unit_tests || failed=1 220 | fi 221 | results_banner "Unit" ${failed} 222 | return ${failed} 223 | } 224 | 225 | # Default unit test runner that runs all go tests in the repo. 226 | function default_unit_test_runner() { 227 | report_go_test -race ./... 228 | } 229 | 230 | # Run integration tests. If there's no `integration_tests` function, run the 231 | # default integration test runner. 232 | function run_integration_tests() { 233 | # Don't run integration tests if not requested OR on documentation PRs 234 | (( ! RUN_INTEGRATION_TESTS )) && return 0 235 | if (( IS_DOCUMENTATION_PR )); then 236 | header "Documentation only PR, skipping integration tests" 237 | return 0 238 | fi 239 | header "Running integration tests" 240 | local failed=0 241 | # Run pre-integration tests, if any 242 | if function_exists pre_integration_tests; then 243 | pre_integration_tests || failed=1 244 | fi 245 | # Don't run integration tests if pre-integration tests failed 246 | if (( ! failed )); then 247 | if function_exists integration_tests; then 248 | integration_tests || failed=1 249 | else 250 | default_integration_test_runner || failed=1 251 | fi 252 | fi 253 | # Don't run integration tests if pre/integration tests failed 254 | if (( ! failed )) && function_exists post_integration_tests; then 255 | post_integration_tests || failed=1 256 | fi 257 | results_banner "Integration" ${failed} 258 | return ${failed} 259 | } 260 | 261 | # Default integration test runner that runs all `test/e2e-*tests.sh`. 262 | function default_integration_test_runner() { 263 | local options="" 264 | local failed=0 265 | (( EMIT_METRICS )) && options="--emit-metrics" 266 | for e2e_test in $(find test/ -name e2e-*tests.sh); do 267 | echo "Running integration test ${e2e_test}" 268 | if ! ${e2e_test} ${options}; then 269 | failed=1 270 | fi 271 | done 272 | return ${failed} 273 | } 274 | 275 | # Options set by command-line flags. 276 | RUN_BUILD_TESTS=0 277 | RUN_UNIT_TESTS=0 278 | RUN_INTEGRATION_TESTS=0 279 | EMIT_METRICS=0 280 | 281 | # Process flags and run tests accordingly. 282 | function main() { 283 | initialize_environment 284 | if (( IS_PRESUBMIT_EXEMPT_PR )) && (( ! IS_DOCUMENTATION_PR )); then 285 | header "Commit only contains changes that don't require tests, skipping" 286 | exit 0 287 | fi 288 | 289 | # Show the version of the tools we're using 290 | if (( IS_PROW )); then 291 | # Disable gcloud update notifications 292 | gcloud config set component_manager/disable_update_check true 293 | header "Current test setup" 294 | echo ">> gcloud SDK version" 295 | gcloud version 296 | echo ">> kubectl version" 297 | kubectl version --client 298 | echo ">> go version" 299 | go version 300 | echo ">> git version" 301 | git version 302 | echo ">> ko built from commit" 303 | [[ -f /ko_version ]] && cat /ko_version || echo "unknown" 304 | echo ">> bazel version" 305 | [[ -f /bazel_version ]] && cat /bazel_version || echo "unknown" 306 | if [[ "${DOCKER_IN_DOCKER_ENABLED}" == "true" ]]; then 307 | echo ">> docker version" 308 | docker version 309 | fi 310 | # node/pod names are important for debugging purposes, but they are missing 311 | # after migrating from bootstrap to podutil. 312 | # Report it here with the same logic as in bootstrap until it is fixed. 313 | # (https://github.com/kubernetes/test-infra/blob/09bd4c6709dc64308406443f8996f90cf3b40ed1/jenkins/bootstrap.py#L588) 314 | # TODO(chaodaiG): follow up on https://github.com/kubernetes/test-infra/blob/0fabd2ea816daa8c15d410c77a0c93c0550b283f/prow/initupload/run.go#L49 315 | echo ">> node name" 316 | echo "$(curl -H "Metadata-Flavor: Google" 'http://169.254.169.254/computeMetadata/v1/instance/name' 2> /dev/null)" 317 | echo ">> pod name" 318 | echo ${HOSTNAME} 319 | fi 320 | 321 | [[ -z $1 ]] && set -- "--all-tests" 322 | 323 | local TEST_TO_RUN="" 324 | 325 | while [[ $# -ne 0 ]]; do 326 | local parameter=$1 327 | case ${parameter} in 328 | --build-tests) RUN_BUILD_TESTS=1 ;; 329 | --unit-tests) RUN_UNIT_TESTS=1 ;; 330 | --integration-tests) RUN_INTEGRATION_TESTS=1 ;; 331 | --emit-metrics) EMIT_METRICS=1 ;; 332 | --all-tests) 333 | RUN_BUILD_TESTS=1 334 | RUN_UNIT_TESTS=1 335 | RUN_INTEGRATION_TESTS=1 336 | ;; 337 | --run-test) 338 | shift 339 | [[ $# -ge 1 ]] || abort "missing executable after --run-test" 340 | TEST_TO_RUN=$1 341 | ;; 342 | *) abort "error: unknown option ${parameter}" ;; 343 | esac 344 | shift 345 | done 346 | 347 | readonly RUN_BUILD_TESTS 348 | readonly RUN_UNIT_TESTS 349 | readonly RUN_INTEGRATION_TESTS 350 | readonly EMIT_METRICS 351 | readonly TEST_TO_RUN 352 | 353 | cd ${REPO_ROOT_DIR} 354 | 355 | # Tests to be performed, in the right order if --all-tests is passed. 356 | 357 | local failed=0 358 | 359 | if [[ -n "${TEST_TO_RUN}" ]]; then 360 | if (( RUN_BUILD_TESTS || RUN_UNIT_TESTS || RUN_INTEGRATION_TESTS )); then 361 | abort "--run-test must be used alone" 362 | fi 363 | # If this is a presubmit run, but a documentation-only PR, don't run the test 364 | if (( IS_PRESUBMIT && IS_DOCUMENTATION_PR )); then 365 | header "Documentation only PR, skipping running custom test" 366 | exit 0 367 | fi 368 | ${TEST_TO_RUN} || failed=1 369 | fi 370 | 371 | run_build_tests || failed=1 372 | # If PRESUBMIT_TEST_FAIL_FAST is set to true, don't run unit tests if build tests failed 373 | if (( ! PRESUBMIT_TEST_FAIL_FAST )) || (( ! failed )); then 374 | run_unit_tests || failed=1 375 | fi 376 | # If PRESUBMIT_TEST_FAIL_FAST is set to true, don't run integration tests if build/unit tests failed 377 | if (( ! PRESUBMIT_TEST_FAIL_FAST )) || (( ! failed )); then 378 | run_integration_tests || failed=1 379 | fi 380 | 381 | exit ${failed} 382 | } 383 | -------------------------------------------------------------------------------- /vendor/knative.dev/test-infra/scripts/release.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Copyright 2018 The Knative Authors 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # This is a helper script for Knative release scripts. 18 | # See README.md for instructions on how to use it. 19 | 20 | source $(dirname ${BASH_SOURCE})/library.sh 21 | 22 | # Organization name in GitHub; defaults to Knative. 23 | readonly ORG_NAME="${ORG_NAME:-knative}" 24 | 25 | # GitHub upstream. 26 | readonly REPO_UPSTREAM="https://github.com/${ORG_NAME}/${REPO_NAME}" 27 | 28 | # GCRs for Knative releases. 29 | readonly NIGHTLY_GCR="gcr.io/knative-nightly/github.com/${ORG_NAME}/${REPO_NAME}" 30 | readonly RELEASE_GCR="gcr.io/knative-releases/github.com/${ORG_NAME}/${REPO_NAME}" 31 | 32 | # Georeplicate images to {us,eu,asia}.gcr.io 33 | readonly GEO_REPLICATION=(us eu asia) 34 | 35 | # Simple banner for logging purposes. 36 | # Parameters: $1 - message to display. 37 | function banner() { 38 | make_banner "@" "$1" 39 | } 40 | 41 | # Tag images in the yaml files if $TAG is not empty. 42 | # $KO_DOCKER_REPO is the registry containing the images to tag with $TAG. 43 | # Parameters: $1..$n - files to parse for images (non .yaml files are ignored). 44 | function tag_images_in_yamls() { 45 | [[ -z ${TAG} ]] && return 0 46 | local SRC_DIR="${GOPATH}/src/" 47 | local DOCKER_BASE="${KO_DOCKER_REPO}/${REPO_ROOT_DIR/$SRC_DIR}" 48 | local GEO_REGIONS="${GEO_REPLICATION[@]} " 49 | echo "Tagging any images under '${DOCKER_BASE}' with ${TAG}" 50 | for file in $@; do 51 | [[ "${file##*.}" != "yaml" ]] && continue 52 | echo "Inspecting ${file}" 53 | for image in $(grep -o "${DOCKER_BASE}/[a-z\./-]\+@sha256:[0-9a-f]\+" ${file}); do 54 | for region in "" ${GEO_REGIONS// /. }; do 55 | gcloud -q container images add-tag ${image} ${region}${image%%@*}:${TAG} 56 | done 57 | done 58 | done 59 | } 60 | 61 | # Copy the given files to the $RELEASE_GCS_BUCKET bucket's "latest" directory. 62 | # If $TAG is not empty, also copy them to $RELEASE_GCS_BUCKET bucket's "previous" directory. 63 | # Parameters: $1..$n - files to copy. 64 | function publish_to_gcs() { 65 | function verbose_gsutil_cp { 66 | local DEST="gs://${RELEASE_GCS_BUCKET}/$1/" 67 | shift 68 | echo "Publishing [$@] to ${DEST}" 69 | gsutil -m cp $@ ${DEST} 70 | } 71 | # Before publishing the files, cleanup the `latest` dir if it exists. 72 | local latest_dir="gs://${RELEASE_GCS_BUCKET}/latest" 73 | if [[ -n "$(gsutil ls ${latest_dir} 2> /dev/null)" ]]; then 74 | echo "Cleaning up '${latest_dir}' first" 75 | gsutil -m rm ${latest_dir}/** 76 | fi 77 | verbose_gsutil_cp latest $@ 78 | [[ -n ${TAG} ]] && verbose_gsutil_cp previous/${TAG} $@ 79 | } 80 | 81 | # These are global environment variables. 82 | SKIP_TESTS=0 83 | PRESUBMIT_TEST_FAIL_FAST=1 84 | TAG_RELEASE=0 85 | PUBLISH_RELEASE=0 86 | PUBLISH_TO_GITHUB=0 87 | TAG="" 88 | BUILD_COMMIT_HASH="" 89 | BUILD_YYYYMMDD="" 90 | BUILD_TIMESTAMP="" 91 | BUILD_TAG="" 92 | RELEASE_VERSION="" 93 | RELEASE_NOTES="" 94 | RELEASE_BRANCH="" 95 | RELEASE_GCS_BUCKET="knative-nightly/${REPO_NAME}" 96 | KO_FLAGS="-P" 97 | VALIDATION_TESTS="./test/presubmit-tests.sh" 98 | YAMLS_TO_PUBLISH="" 99 | ARTIFACTS_TO_PUBLISH="" 100 | FROM_NIGHTLY_RELEASE="" 101 | FROM_NIGHTLY_RELEASE_GCS="" 102 | export KO_DOCKER_REPO="gcr.io/knative-nightly" 103 | export GITHUB_TOKEN="" 104 | 105 | # Convenience function to run the hub tool. 106 | # Parameters: $1..$n - arguments to hub. 107 | function hub_tool() { 108 | run_go_tool github.com/github/hub hub $@ 109 | } 110 | 111 | # Shortcut to "git push" that handles authentication. 112 | # Parameters: $1..$n - arguments to "git push ". 113 | function git_push() { 114 | local repo_url="${REPO_UPSTREAM}" 115 | [[ -n "${GITHUB_TOKEN}}" ]] && repo_url="${repo_url/:\/\//:\/\/${GITHUB_TOKEN}@}" 116 | git push ${repo_url} $@ 117 | } 118 | 119 | # Return the master version of a release. 120 | # For example, "v0.2.1" returns "0.2" 121 | # Parameters: $1 - release version label. 122 | function master_version() { 123 | local release="${1//v/}" 124 | local tokens=(${release//\./ }) 125 | echo "${tokens[0]}.${tokens[1]}" 126 | } 127 | 128 | # Return the release build number of a release. 129 | # For example, "v0.2.1" returns "1". 130 | # Parameters: $1 - release version label. 131 | function release_build_number() { 132 | local tokens=(${1//\./ }) 133 | echo "${tokens[2]}" 134 | } 135 | 136 | # Return the short commit SHA from a release tag. 137 | # For example, "v20010101-deadbeef" returns "deadbeef". 138 | function hash_from_tag() { 139 | local tokens=(${1//-/ }) 140 | echo "${tokens[1]}" 141 | } 142 | 143 | # Setup the repository upstream, if not set. 144 | function setup_upstream() { 145 | # hub and checkout need the upstream URL to be set 146 | # TODO(adrcunha): Use "git remote get-url" once available on Prow. 147 | local upstream="$(git config --get remote.upstream.url)" 148 | echo "Remote upstream URL is '${upstream}'" 149 | if [[ -z "${upstream}" ]]; then 150 | echo "Setting remote upstream URL to '${REPO_UPSTREAM}'" 151 | git remote add upstream ${REPO_UPSTREAM} 152 | fi 153 | } 154 | 155 | # Fetch the release branch, so we can check it out. 156 | function setup_branch() { 157 | [[ -z "${RELEASE_BRANCH}" ]] && return 158 | git fetch ${REPO_UPSTREAM} ${RELEASE_BRANCH}:upstream/${RELEASE_BRANCH} 159 | } 160 | 161 | # Setup version, branch and release notes for a auto release. 162 | function prepare_auto_release() { 163 | echo "Auto release requested" 164 | TAG_RELEASE=1 165 | PUBLISH_RELEASE=1 166 | 167 | git fetch --all || abort "error fetching branches/tags from remote" 168 | local tags="$(git tag | cut -d 'v' -f2 | cut -d '.' -f1-2 | sort | uniq)" 169 | local branches="$( { (git branch -r | grep upstream/release-) ; (git branch | grep release-); } | cut -d '-' -f2 | sort | uniq)" 170 | 171 | echo "Versions released (from tags): [" ${tags} "]" 172 | echo "Versions released (from branches): [" ${branches} "]" 173 | 174 | local release_number="" 175 | for i in ${branches}; do 176 | release_number="${i}" 177 | for j in ${tags}; do 178 | if [[ "${i}" == "${j}" ]]; then 179 | release_number="" 180 | fi 181 | done 182 | done 183 | 184 | if [[ -z "${release_number}" ]]; then 185 | echo "*** No new release will be generated, as no new branches exist" 186 | exit 0 187 | fi 188 | 189 | RELEASE_VERSION="${release_number}.0" 190 | RELEASE_BRANCH="release-${release_number}" 191 | echo "Will create release ${RELEASE_VERSION} from branch ${RELEASE_BRANCH}" 192 | # If --release-notes not used, add a placeholder 193 | if [[ -z "${RELEASE_NOTES}" ]]; then 194 | RELEASE_NOTES="$(mktemp)" 195 | echo "[add release notes here]" > ${RELEASE_NOTES} 196 | fi 197 | } 198 | 199 | # Setup version, branch and release notes for a "dot" release. 200 | function prepare_dot_release() { 201 | echo "Dot release requested" 202 | TAG_RELEASE=1 203 | PUBLISH_RELEASE=1 204 | git fetch --all || abort "error fetching branches/tags from remote" 205 | # List latest release 206 | local releases # don't combine with the line below, or $? will be 0 207 | releases="$(hub_tool release)" 208 | [[ $? -eq 0 ]] || abort "cannot list releases" 209 | # If --release-branch passed, restrict to that release 210 | if [[ -n "${RELEASE_BRANCH}" ]]; then 211 | local version_filter="v${RELEASE_BRANCH##release-}" 212 | echo "Dot release will be generated for ${version_filter}" 213 | releases="$(echo "${releases}" | grep ^${version_filter})" 214 | fi 215 | local last_version="$(echo "${releases}" | grep '^v[0-9]\+\.[0-9]\+\.[0-9]\+$' | sort -r | head -1)" 216 | [[ -n "${last_version}" ]] || abort "no previous release exist" 217 | local major_minor_version="" 218 | if [[ -z "${RELEASE_BRANCH}" ]]; then 219 | echo "Last release is ${last_version}" 220 | # Determine branch 221 | major_minor_version="$(master_version ${last_version})" 222 | RELEASE_BRANCH="release-${major_minor_version}" 223 | echo "Last release branch is ${RELEASE_BRANCH}" 224 | else 225 | major_minor_version="${RELEASE_BRANCH##release-}" 226 | fi 227 | [[ -n "${major_minor_version}" ]] || abort "cannot get release major/minor version" 228 | # Ensure there are new commits in the branch, otherwise we don't create a new release 229 | setup_branch 230 | local last_release_commit="$(git rev-list -n 1 ${last_version})" 231 | local release_branch_commit="$(git rev-list -n 1 upstream/${RELEASE_BRANCH})" 232 | [[ -n "${last_release_commit}" ]] || abort "cannot get last release commit" 233 | [[ -n "${release_branch_commit}" ]] || abort "cannot get release branch last commit" 234 | echo "Version ${last_version} is at commit ${last_release_commit}" 235 | echo "Branch ${RELEASE_BRANCH} is at commit ${release_branch_commit}" 236 | if [[ "${last_release_commit}" == "${release_branch_commit}" ]]; then 237 | echo "*** Branch ${RELEASE_BRANCH} has no new cherry-picks since release ${last_version}" 238 | echo "*** No dot release will be generated, as no changes exist" 239 | exit 0 240 | fi 241 | # Create new release version number 242 | local last_build="$(release_build_number ${last_version})" 243 | RELEASE_VERSION="${major_minor_version}.$(( last_build + 1 ))" 244 | echo "Will create release ${RELEASE_VERSION} at commit ${release_branch_commit}" 245 | # If --release-notes not used, copy from the latest release 246 | if [[ -z "${RELEASE_NOTES}" ]]; then 247 | RELEASE_NOTES="$(mktemp)" 248 | hub_tool release show -f "%b" ${last_version} > ${RELEASE_NOTES} 249 | echo "Release notes from ${last_version} copied to ${RELEASE_NOTES}" 250 | fi 251 | } 252 | 253 | # Setup source nightly image for a release. 254 | function prepare_from_nightly_release() { 255 | echo "Release from nightly requested" 256 | SKIP_TESTS=1 257 | if [[ "${FROM_NIGHTLY_RELEASE}" == "latest" ]]; then 258 | echo "Finding the latest nightly release" 259 | find_latest_nightly "${NIGHTLY_GCR}" || abort "cannot find the latest nightly release" 260 | echo "Latest nightly is ${FROM_NIGHTLY_RELEASE}" 261 | fi 262 | readonly FROM_NIGHTLY_RELEASE_GCS="gs://knative-nightly/${REPO_NAME}/previous/${FROM_NIGHTLY_RELEASE}" 263 | gsutil ls -d "${FROM_NIGHTLY_RELEASE_GCS}" > /dev/null \ 264 | || abort "nightly release ${FROM_NIGHTLY_RELEASE} doesn't exist" 265 | } 266 | 267 | # Build a release from an existing nightly one. 268 | function build_from_nightly_release() { 269 | banner "Building the release" 270 | echo "Fetching manifests from nightly" 271 | local yamls_dir="$(mktemp -d)" 272 | gsutil -m cp -r "${FROM_NIGHTLY_RELEASE_GCS}/*" "${yamls_dir}" || abort "error fetching manifests" 273 | # Update references to release GCR 274 | for yaml in ${yamls_dir}/*.yaml; do 275 | sed -i -e "s#${NIGHTLY_GCR}#${RELEASE_GCR}#" "${yaml}" 276 | done 277 | ARTIFACTS_TO_PUBLISH="$(find ${yamls_dir} -name '*.yaml' -printf '%p ')" 278 | echo "Copying nightly images" 279 | copy_nightly_images_to_release_gcr "${NIGHTLY_GCR}" "${FROM_NIGHTLY_RELEASE}" 280 | # Create a release branch from the nightly release tag. 281 | local commit="$(hash_from_tag ${FROM_NIGHTLY_RELEASE})" 282 | echo "Creating release branch ${RELEASE_BRANCH} at commit ${commit}" 283 | git checkout -b ${RELEASE_BRANCH} ${commit} || abort "cannot create branch" 284 | git_push upstream ${RELEASE_BRANCH} || abort "cannot push branch" 285 | } 286 | 287 | # Build a release from source. 288 | function build_from_source() { 289 | run_validation_tests ${VALIDATION_TESTS} 290 | banner "Building the release" 291 | build_release 292 | # Do not use `||` above or any error will be swallowed. 293 | if [[ $? -ne 0 ]]; then 294 | abort "error building the release" 295 | fi 296 | } 297 | 298 | # Copy tagged images from the nightly GCR to the release GCR, tagging them 'latest'. 299 | # This is a recursive function, first call must pass $NIGHTLY_GCR as first parameter. 300 | # Parameters: $1 - GCR to recurse into. 301 | # $2 - tag to be used to select images to copy. 302 | function copy_nightly_images_to_release_gcr() { 303 | for entry in $(gcloud --format="value(name)" container images list --repository="$1"); do 304 | copy_nightly_images_to_release_gcr "${entry}" "$2" 305 | # Copy each image with the given nightly tag 306 | for x in $(gcloud --format="value(tags)" container images list-tags "${entry}" --filter="tags=$2" --limit=1); do 307 | local path="${entry/${NIGHTLY_GCR}}" # Image "path" (remove GCR part) 308 | local dst="${RELEASE_GCR}${path}:latest" 309 | gcloud container images add-tag "${entry}:$2" "${dst}" || abort "error copying image" 310 | done 311 | done 312 | } 313 | 314 | # Recurse into GCR and find the nightly tag of the first `latest` image found. 315 | # Parameters: $1 - GCR to recurse into. 316 | function find_latest_nightly() { 317 | for entry in $(gcloud --format="value(name)" container images list --repository="$1"); do 318 | find_latest_nightly "${entry}" && return 0 319 | for tag in $(gcloud --format="value(tags)" container images list-tags "${entry}" \ 320 | --filter="tags=latest" --limit=1); do 321 | local tags=( ${tag//,/ } ) 322 | # Skip if more than one nightly tag, as we don't know what's the latest. 323 | if [[ ${#tags[@]} -eq 2 ]]; then 324 | local nightly_tag="${tags[@]/latest}" # Remove 'latest' tag 325 | FROM_NIGHTLY_RELEASE="${nightly_tag// /}" # Remove spaces 326 | return 0 327 | fi 328 | done 329 | done 330 | return 1 331 | } 332 | 333 | # Parses flags and sets environment variables accordingly. 334 | function parse_flags() { 335 | local has_gcr_flag=0 336 | local has_gcs_flag=0 337 | local is_dot_release=0 338 | local is_auto_release=0 339 | 340 | cd ${REPO_ROOT_DIR} 341 | while [[ $# -ne 0 ]]; do 342 | local parameter=$1 343 | case ${parameter} in 344 | --skip-tests) SKIP_TESTS=1 ;; 345 | --tag-release) TAG_RELEASE=1 ;; 346 | --notag-release) TAG_RELEASE=0 ;; 347 | --publish) PUBLISH_RELEASE=1 ;; 348 | --nopublish) PUBLISH_RELEASE=0 ;; 349 | --dot-release) is_dot_release=1 ;; 350 | --auto-release) is_auto_release=1 ;; 351 | --from-latest-nightly) FROM_NIGHTLY_RELEASE=latest ;; 352 | *) 353 | [[ $# -ge 2 ]] || abort "missing parameter after $1" 354 | shift 355 | case ${parameter} in 356 | --github-token) 357 | [[ ! -f "$1" ]] && abort "file $1 doesn't exist" 358 | # Remove any trailing newline/space from token 359 | GITHUB_TOKEN="$(echo -n $(cat $1))" 360 | [[ -n "${GITHUB_TOKEN}" ]] || abort "file $1 is empty" 361 | ;; 362 | --release-gcr) 363 | KO_DOCKER_REPO=$1 364 | has_gcr_flag=1 365 | ;; 366 | --release-gcs) 367 | RELEASE_GCS_BUCKET=$1 368 | has_gcs_flag=1 369 | ;; 370 | --version) 371 | [[ $1 =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]] || abort "version format must be '[0-9].[0-9].[0-9]'" 372 | RELEASE_VERSION=$1 373 | ;; 374 | --branch) 375 | [[ $1 =~ ^release-[0-9]+\.[0-9]+$ ]] || abort "branch name must be 'release-[0-9].[0-9]'" 376 | RELEASE_BRANCH=$1 377 | ;; 378 | --release-notes) 379 | [[ ! -f "$1" ]] && abort "file $1 doesn't exist" 380 | RELEASE_NOTES=$1 381 | ;; 382 | --from-nightly) 383 | [[ $1 =~ ^v[0-9]+-[0-9a-f]+$ ]] || abort "nightly tag must be 'vYYYYMMDD-commithash'" 384 | FROM_NIGHTLY_RELEASE=$1 385 | ;; 386 | *) abort "unknown option ${parameter}" ;; 387 | esac 388 | esac 389 | shift 390 | done 391 | 392 | # Do auto release unless release is forced 393 | if (( is_auto_release )); then 394 | (( is_dot_release )) && abort "cannot have both --dot-release and --auto-release set simultaneously" 395 | [[ -n "${RELEASE_VERSION}" ]] && abort "cannot have both --version and --auto-release set simultaneously" 396 | [[ -n "${RELEASE_BRANCH}" ]] && abort "cannot have both --branch and --auto-release set simultaneously" 397 | [[ -n "${FROM_NIGHTLY_RELEASE}" ]] && abort "cannot have --auto-release with a nightly source" 398 | setup_upstream 399 | prepare_auto_release 400 | fi 401 | 402 | # Setup source nightly image 403 | if [[ -n "${FROM_NIGHTLY_RELEASE}" ]]; then 404 | (( is_dot_release )) && abort "dot releases are built from source" 405 | [[ -z "${RELEASE_VERSION}" ]] && abort "release version must be specified with --version" 406 | # TODO(adrcunha): "dot" releases from release branches require releasing nightlies 407 | # for such branches, which we don't do yet. 408 | [[ "${RELEASE_VERSION}" =~ ^[0-9]+\.[0-9]+\.0$ ]] || abort "version format must be 'X.Y.0'" 409 | RELEASE_BRANCH="release-$(master_version ${RELEASE_VERSION})" 410 | prepare_from_nightly_release 411 | setup_upstream 412 | fi 413 | 414 | # Setup dot releases 415 | if (( is_dot_release )); then 416 | setup_upstream 417 | prepare_dot_release 418 | fi 419 | 420 | # Update KO_DOCKER_REPO and KO_FLAGS if we're not publishing. 421 | if (( ! PUBLISH_RELEASE )); then 422 | (( has_gcr_flag )) && echo "Not publishing the release, GCR flag is ignored" 423 | (( has_gcs_flag )) && echo "Not publishing the release, GCS flag is ignored" 424 | KO_DOCKER_REPO="ko.local" 425 | KO_FLAGS="-L ${KO_FLAGS}" 426 | RELEASE_GCS_BUCKET="" 427 | fi 428 | 429 | # Get the commit, excluding any tags but keeping the "dirty" flag 430 | BUILD_COMMIT_HASH="$(git describe --always --dirty --match '^$')" 431 | [[ -n "${BUILD_COMMIT_HASH}" ]] || abort "error getting the current commit" 432 | BUILD_YYYYMMDD="$(date -u +%Y%m%d)" 433 | BUILD_TIMESTAMP="$(date -u '+%Y-%m-%d %H:%M:%S')" 434 | BUILD_TAG="v${BUILD_YYYYMMDD}-${BUILD_COMMIT_HASH}" 435 | 436 | (( TAG_RELEASE )) && TAG="${BUILD_TAG}" 437 | [[ -n "${RELEASE_VERSION}" ]] && TAG="v${RELEASE_VERSION}" 438 | [[ -n "${RELEASE_VERSION}" && -n "${RELEASE_BRANCH}" ]] && (( PUBLISH_RELEASE )) && PUBLISH_TO_GITHUB=1 439 | 440 | readonly BUILD_COMMIT_HASH 441 | readonly BUILD_YYYYMMDD 442 | readonly BUILD_TIMESTAMP 443 | readonly BUILD_TAG 444 | readonly SKIP_TESTS 445 | readonly TAG_RELEASE 446 | readonly PUBLISH_RELEASE 447 | readonly PUBLISH_TO_GITHUB 448 | readonly TAG 449 | readonly RELEASE_VERSION 450 | readonly RELEASE_NOTES 451 | readonly RELEASE_BRANCH 452 | readonly RELEASE_GCS_BUCKET 453 | readonly KO_DOCKER_REPO 454 | readonly VALIDATION_TESTS 455 | readonly FROM_NIGHTLY_RELEASE 456 | } 457 | 458 | # Run tests (unless --skip-tests was passed). Conveniently displays a banner indicating so. 459 | # Parameters: $1 - executable that runs the tests. 460 | function run_validation_tests() { 461 | if (( ! SKIP_TESTS )); then 462 | banner "Running release validation tests" 463 | # Run tests. 464 | if ! $1; then 465 | banner "Release validation tests failed, aborting" 466 | exit 1 467 | fi 468 | fi 469 | } 470 | 471 | # Publishes the generated artifacts to GCS, GitHub, etc. 472 | # Parameters: $1..$n - files to add to the release. 473 | function publish_artifacts() { 474 | (( ! PUBLISH_RELEASE )) && return 475 | tag_images_in_yamls ${ARTIFACTS_TO_PUBLISH} 476 | publish_to_gcs ${ARTIFACTS_TO_PUBLISH} 477 | publish_to_github ${ARTIFACTS_TO_PUBLISH} 478 | banner "New release published successfully" 479 | } 480 | 481 | # Entry point for a release script. 482 | function main() { 483 | function_exists build_release || abort "function 'build_release()' not defined" 484 | [[ -x ${VALIDATION_TESTS} ]] || abort "test script '${VALIDATION_TESTS}' doesn't exist" 485 | parse_flags $@ 486 | # Log what will be done and where. 487 | banner "Release configuration" 488 | echo "- gcloud user: $(gcloud config get-value core/account)" 489 | echo "- Go path: ${GOPATH}" 490 | echo "- Repository root: ${REPO_ROOT_DIR}" 491 | echo "- Destination GCR: ${KO_DOCKER_REPO}" 492 | (( SKIP_TESTS )) && echo "- Tests will NOT be run" || echo "- Tests will be run" 493 | if (( TAG_RELEASE )); then 494 | echo "- Artifacts will be tagged '${TAG}'" 495 | else 496 | echo "- Artifacts WILL NOT be tagged" 497 | fi 498 | if (( PUBLISH_RELEASE )); then 499 | echo "- Release WILL BE published to '${RELEASE_GCS_BUCKET}'" 500 | else 501 | echo "- Release will not be published" 502 | fi 503 | if (( PUBLISH_TO_GITHUB )); then 504 | echo "- Release WILL BE published to GitHub" 505 | fi 506 | if [[ -n "${FROM_NIGHTLY_RELEASE}" ]]; then 507 | echo "- Release will be A COPY OF '${FROM_NIGHTLY_RELEASE}' nightly" 508 | else 509 | echo "- Release will be BUILT FROM SOURCE" 510 | [[ -n "${RELEASE_BRANCH}" ]] && echo "- Release will be built from branch '${RELEASE_BRANCH}'" 511 | fi 512 | [[ -n "${RELEASE_NOTES}" ]] && echo "- Release notes are generated from '${RELEASE_NOTES}'" 513 | 514 | # Checkout specific branch, if necessary 515 | if [[ -n "${RELEASE_BRANCH}" && -z "${FROM_NIGHTLY_RELEASE}" ]]; then 516 | setup_upstream 517 | setup_branch 518 | git checkout upstream/${RELEASE_BRANCH} || abort "cannot checkout branch ${RELEASE_BRANCH}" 519 | fi 520 | 521 | if [[ -n "${FROM_NIGHTLY_RELEASE}" ]]; then 522 | build_from_nightly_release 523 | else 524 | set -e -o pipefail 525 | build_from_source 526 | set +e +o pipefail 527 | fi 528 | # TODO(adrcunha): Remove once all repos use ARTIFACTS_TO_PUBLISH. 529 | [[ -z "${ARTIFACTS_TO_PUBLISH}" ]] && ARTIFACTS_TO_PUBLISH="${YAMLS_TO_PUBLISH}" 530 | [[ -z "${ARTIFACTS_TO_PUBLISH}" ]] && abort "no artifacts were generated" 531 | # Ensure no empty file will be published. 532 | for artifact in ${ARTIFACTS_TO_PUBLISH}; do 533 | [[ -s ${artifact} ]] || abort "Artifact ${artifact} is empty" 534 | done 535 | echo "New release built successfully" 536 | publish_artifacts 537 | } 538 | 539 | # Publishes a new release on GitHub, also git tagging it (unless this is not a versioned release). 540 | # Parameters: $1..$n - files to add to the release. 541 | function publish_to_github() { 542 | (( PUBLISH_TO_GITHUB )) || return 0 543 | local title="${REPO_NAME_FORMATTED} release ${TAG}" 544 | local attachments=() 545 | local description="$(mktemp)" 546 | local attachments_dir="$(mktemp -d)" 547 | local commitish="" 548 | # Copy files to a separate dir 549 | for artifact in $@; do 550 | cp ${artifact} ${attachments_dir}/ 551 | attachments+=("--attach=${artifact}#$(basename ${artifact})") 552 | done 553 | echo -e "${title}\n" > ${description} 554 | if [[ -n "${RELEASE_NOTES}" ]]; then 555 | cat ${RELEASE_NOTES} >> ${description} 556 | fi 557 | git tag -a ${TAG} -m "${title}" 558 | git_push tag ${TAG} 559 | 560 | [[ -n "${RELEASE_BRANCH}" ]] && commitish="--commitish=${RELEASE_BRANCH}" 561 | for i in {2..0}; do 562 | hub_tool release create \ 563 | --prerelease \ 564 | ${attachments[@]} \ 565 | --file=${description} \ 566 | ${commitish} \ 567 | ${TAG} && return 0 568 | if [[ "${i}" -gt 0 ]]; then 569 | echo "Error publishing the release, retrying in 15s..." 570 | sleep 15 571 | fi 572 | done 573 | abort "Cannot publish release to GitHub" 574 | } 575 | --------------------------------------------------------------------------------