├── README.md ├── src └── com │ └── docker │ ├── projects │ ├── Docker.groovy │ └── EngineApi.groovy │ └── utilities │ └── AWSSync.groovy └── vars ├── codecov.groovy ├── codecov.txt ├── dockerImageTagToDigest.groovy ├── dockerImageTagToDigest.txt ├── documentationChecker.groovy ├── documentationChecker.txt ├── getOutput.groovy ├── getOutput.txt ├── gitCommit.groovy ├── gitCommit.txt ├── golangTester.groovy ├── golangTester.txt ├── nodeExists.groovy ├── nodeExists.txt ├── s3Archive.groovy ├── s3Archive.txt ├── s3Fetch.groovy ├── s3Fetch.txt ├── withChownWorkspace.groovy ├── withChownWorkspace.txt ├── withTool.groovy ├── withTool.txt ├── withVpn.groovy ├── withVpn.txt ├── wrappedNode.groovy └── wrappedNode.txt /README.md: -------------------------------------------------------------------------------- 1 | jenkins-pipeline-scripts 2 | ======================== 3 | 4 | *NOTE: This repository is being deprecated internally at Docker, Inc and hence 5 | will receive few updates going forward.* 6 | 7 | This repository contains helper functions and classes to be used with the Jenkins Pipeline Plugin. 8 | This repository is used on https://jenkins.dockerproject.org and other Jenkins instances managed by Docker, Inc. 9 | 10 | To use this library from your `Jenkinsfile`, 11 | make sure you have installed the _GitHub Organization Folder_ in version 1.5 or later, 12 | then start off with: 13 | 14 | ```groovy 15 | @Library('github.com/docker/jenkins-pipeline-scripts') _ 16 | ``` 17 | 18 | See [Extending with Shared Libraries](https://jenkins.io/doc/book/pipeline/shared-libraries/) for more 19 | information on Jenkins pipeline extensions. 20 | -------------------------------------------------------------------------------- /src/com/docker/projects/Docker.groovy: -------------------------------------------------------------------------------- 1 | package com.docker.projects; 2 | 3 | import groovy.transform.Field 4 | 5 | @Field 6 | def versionString = null 7 | 8 | @Field 9 | def imageId = null 10 | 11 | def makeTask(nodeType, taskName, extraEnv, Closure body=null) { 12 | return { 13 | wrappedNode(label: nodeType) { 14 | deleteDir() 15 | checkout(scm) 16 | echo "Pulling image ${imageId}" 17 | docker.image(imageId).pull() 18 | s3Fetch(destinationPath: "ci-metadata/", path: "ci-metadata/") 19 | s3Fetch(destinationPath: "bundles/", path: "bundles/") 20 | sh('''( [[ -f ci-metadata/executable-files.txt ]] && chmod -vv u+x $( cat ci-metadata/executable-files.txt ) ) || true; rm -rf ci-metadata''') 21 | def envParts = [ 22 | "KEEPBUNDLE=true", 23 | "DOCKER_IMAGE=${imageId}", 24 | ] 25 | if (extraEnv) { 26 | try { 27 | envParts += extraEnv 28 | } catch (Exception exc) { 29 | echo "Couldn't glue together extra env, ignoring. ${extraEnv}; ${exc}" 30 | } 31 | } 32 | withEnv(envParts) { 33 | withTool(["jo", "jq", "git-appraise"]) { 34 | withChownWorkspace { 35 | sshagent(['docker-jenkins.github.ssh']) { 36 | sh(""" 37 | export DOCKER_GRAPHDRIVER=\$( docker info | awk -F ': ' '\$1 == "Storage Driver" { print \$2; exit }' ) 38 | make -e ci-${taskName} 39 | """) 40 | } 41 | } 42 | } 43 | if (this.versionString == null) { 44 | sh("pushd bundles && ls | grep -v latest > ../version-string.txt && popd") 45 | this.versionString = readFile("version-string.txt").trim() 46 | sh("rm version-string.txt") 47 | echo "Got version string: ${this.versionString}" 48 | } 49 | if (body) { body() } 50 | echo("${taskName} complete") 51 | sh("[[ -L bundles/latest ]] && rm bundles/latest") 52 | sh(''' 53 | find bundles -type l -print0 | while read -d $'\0' f ; do 54 | echo "found link $f -> $(readlink "$f")" 55 | target="$( dirname "$f" )/$( readlink "$f" )" 56 | if [[ -e "$target" ]] ; then 57 | [[ -d "$target" ]] && CP_FLAGS="-R" && RM_FLAGS="-r" 58 | mv "$f" "$f.lnk" && cp $CP_FLAGS "$target" "$f" && rm $RM_FLAGS "$f.lnk" 59 | fi 60 | echo "Realized symlink for: $f -> $target" 61 | done 62 | ''') 63 | sh("mkdir -p ci-metadata && find bundles -type f -executable | tee ci-metadata/executable-files.txt") 64 | s3Archive(sourcePath: "ci-metadata/", path: "ci-metadata/") 65 | s3Archive(sourcePath: "bundles/", path: "bundles/") 66 | } 67 | } 68 | } 69 | } 70 | 71 | def go2xunit(task) { 72 | sh("cd bundles/${this.versionString}/${task} && [ -e test-stdout.log ] && docker run --rm bmangold/go2xunit < test-stdout.log > test.xml") 73 | } 74 | 75 | def junitArchive(task) { 76 | step([$class: 'JUnitResultArchiver', testResults: "bundles/${this.versionString}/${task}/test.xml", keepLongStdio: true]) 77 | } 78 | 79 | def testTask(testName, label=null, extraEnv=null) { 80 | def needsXunit = testName != "test-docker-py" 81 | return this.makeTask(label ?: "docker", testName, extraEnv ?: []) { 82 | // Need to refresh timestamps else junit archiver will die. 83 | sh "find 'bundles/${this.versionString}/${testName}' -type f -print0 | xargs -0 touch" 84 | if (needsXunit) { this.go2xunit(testName) } 85 | this.junitArchive(testName) 86 | } 87 | } 88 | 89 | def integrationTask(label) { 90 | return this.testTask("test-integration-cli", label, ["CI_TASK=test-integration-cli/JENKINS_LABEL=${label}"]) 91 | } 92 | 93 | def packageTask(pkgTask, distro) { 94 | return this.makeTask("docker", "${pkgTask}", ["DOCKER_BUILD_PKGS=${distro}", "CI_TASK=${pkgTask}/DOCKER_BUILD_PKGS=${distro}"]) 95 | } 96 | 97 | def buildTask(buildTaskName) { 98 | return this.makeTask("docker", buildTaskName, []) 99 | } 100 | 101 | def validateTask(validateTaskName) { 102 | return this.makeTask("docker", validateTaskName, []) 103 | } 104 | 105 | return this 106 | -------------------------------------------------------------------------------- /src/com/docker/projects/EngineApi.groovy: -------------------------------------------------------------------------------- 1 | package com.docker.projects; 2 | 3 | import groovy.transform.Field 4 | 5 | @Field 6 | def gocycloMax = 0 7 | 8 | @Field 9 | def goPackage = "github.com/docker/engine-api" 10 | 11 | def testJob(Map options) { 12 | def platform = options.get("platform", "linux") 13 | def label = options.get("label", "docker") 14 | def go_version = options.get("go_version", "1.5.3") 15 | 16 | return { 17 | wrappedNode(label: label) { 18 | deleteDir() 19 | if (platform == "windows") { tool 'hg' } 20 | checkout scm 21 | withEnv([ 22 | "GOVERSION=${go_version}", 23 | "GOCYCLO_MAX=${this.gocycloMax}", 24 | "GOPACKAGE=${this.goPackage}" 25 | ]) { 26 | withCredentials([[$class: 'StringBinding', credentialsId: 'docker-jenkins.token.github.com', variable: 'GITHUB_TOKEN']]) { 27 | sh "hack/test-${platform}.sh" 28 | } 29 | } 30 | step([$class: 'JUnitResultArchiver', testResults: 'results/tests.xml', keepLongStdio: true]) 31 | // step([$class: 'hudson.plugins.cobertura.CoberturaPublisher', coberturaReportFile: 'results/coverage.xml']) 32 | step([ 33 | $class: 'WarningsPublisher', 34 | parserConfigurations: [[ 35 | parserName: "Go Lint", 36 | pattern: "results/fmt.txt,results/lint.txt,results/cyclo.txt", 37 | ], [ 38 | parserName: "Go Vet", 39 | pattern: "results/vet.txt" 40 | ]], 41 | unstableTotalAll: '0' 42 | ]) 43 | archive 'results' 44 | } 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /src/com/docker/utilities/AWSSync.groovy: -------------------------------------------------------------------------------- 1 | package com.docker.utilities 2 | 3 | def s3UpdateOptions(options) { 4 | options.bucket = options.bucket ?: 'docker-ci-artifacts' 5 | if (options.bucket == 'docker-ci-artifacts') { 6 | options.region = 'us-west-2' 7 | } 8 | options.credentials = options.credentials ?: 'ci@docker-qa.aws' 9 | if (options.project == null) { 10 | def path = scm.repositories[0].URIs[0].path 11 | def repoOwner = path.split('/')[0] 12 | def repoName = path.split('/')[-1].replaceAll(/\.git$/, '') 13 | options.project = "${repoOwner}/${repoName}" 14 | } 15 | options.path = options.path ?: '' 16 | options.ref = options.ref ?: gitCommit() 17 | if (options.includeRef == null) { 18 | options.includeRef = true 19 | } 20 | options.refPathPart = options.includeRef ? "${options.ref}/" : "" 21 | if (options.fullRemotePath == null) { 22 | options.fullRemotePath = "${options.bucket}/${options.project}/${options.refPathPart}/${options.path}" 23 | } 24 | options.fullRemotePath = "s3://" + options.fullRemotePath.replaceAll('//', '/') 25 | } 26 | 27 | def s3Sync(options, src, dst) { 28 | withEnv(["AWS_DEFAULT_REGION=${options.region}"]) { 29 | withCredentials([[$class: "AmazonWebServicesCredentialsBinding", credentialsId: options.credentials]]) { 30 | sh """ 31 | docker run \\ 32 | --rm \\ 33 | -e AWS_SECRET_ACCESS_KEY \\ 34 | -e AWS_ACCESS_KEY_ID \\ 35 | -e AWS_DEFAULT_REGION \\ 36 | -v "\$(pwd):/files" \\ 37 | --workdir="/files" \\ 38 | anigeo/awscli \\ 39 | s3 sync ${options.s3SyncArgs ?: ''} "${src}" "${dst}" 40 | """ 41 | } 42 | } 43 | } 44 | def s3Download(options=[:]) { 45 | s3UpdateOptions(options) 46 | 47 | withChownWorkspace { 48 | try { 49 | s3Sync(options, options.fullRemotePath, options.destinationPath ?: ".") 50 | } catch (Exception exc) { 51 | if (options.required) { 52 | throw exc 53 | } else { 54 | echo "Ignoring error in s3Download. set `required: true` to propagate error." 55 | } 56 | } 57 | } 58 | } 59 | 60 | def s3Upload(options=[:]) { 61 | s3UpdateOptions(options) 62 | s3Sync(options, options.sourcePath ?: ".", options.fullRemotePath) 63 | } 64 | 65 | return this 66 | -------------------------------------------------------------------------------- /vars/codecov.groovy: -------------------------------------------------------------------------------- 1 | import org.kohsuke.github.GitHub 2 | 3 | def call(credsName=null) { 4 | if (!credsName) { 5 | def path = scm.repositories[0].URIs[0].path 6 | def repoOwner = path.split('/')[0] 7 | def repoName = path.split('/')[-1].replaceAll(/\.git$/, '') 8 | credsName = repoName 9 | if (repoOwner && repoOwner != 'docker') { 10 | credsName = "${repoOwner}-${credsName}" 11 | } 12 | } 13 | 14 | def branchName = env.BRANCH_NAME 15 | if (env.CHANGE_ID) { 16 | def repoUrl = sh script: "git config --get remote.origin.url", returnStdout: true 17 | // Need to get name from url, supports these variants: 18 | // git@github.com:docker/docker.git -> docker/docker 19 | // git://github.com/docker/docker.git -> docker/docker 20 | // https://github.com/docker/docker.git -> docker/docker 21 | // ssh://git@github.com/docker/docker.git -> docker/docker 22 | // 1. split on colon, take the last part. 23 | // 2. split that on slash, take the last 2 parts and rejoin them with /. 24 | // 3. remove .git at the end 25 | // 4. ta-da 26 | def repoName = repoUrl.split(":")[-1].split("/")[-2..-1].join("/").replaceAll(/\.git$/, '') 27 | def githubToken 28 | withCredentials([[ 29 | variable: "GITHUB_TOKEN", 30 | credentialsId: "docker-ci-scanner.token-only.github.com", 31 | $class: "StringBinding", 32 | ]]) { 33 | githubToken = env.GITHUB_TOKEN 34 | } 35 | def gh = GitHub.connectUsingOAuth(githubToken) 36 | def pr = gh.getRepository(repoName).getPullRequest(env.CHANGE_ID.toInteger()) 37 | branchName = "${pr.head.repo.owner.login}/${pr.head.ref}" 38 | } 39 | 40 | // Set some env variables so codecov detection script works correctly 41 | withEnv(["ghprbPullId=${env.CHANGE_ID}", "GIT_BRANCH=${branchName}"]) { 42 | withCredentials([[$class: 'StringBinding', credentialsId: "${credsName}.codecov-token", variable: 'CODECOV_TOKEN']]) { 43 | sh 'bash <(curl -s https://codecov.io/bash) || echo "codecov exited with \$?"' 44 | } 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /vars/codecov.txt: -------------------------------------------------------------------------------- 1 | codecov(credsName=null) 2 | 3 |

Submits coverage information to codecov.io using their bash script.

4 |

5 | credsName is optional: 6 |

17 | The result of this is then combined with the suffix .codecov-token 18 | (e.g.: swarm.codecov-token) and used to look up a "secret text" 19 | credentials item which is used to submit the codecov token. Please contact your 20 | Jenkins administrator if you need credentials added to the system for your project. 21 |

22 | -------------------------------------------------------------------------------- /vars/dockerImageTagToDigest.groovy: -------------------------------------------------------------------------------- 1 | import java.net.URLEncoder 2 | 3 | // TODO: implement this in pure groovy instead of relying on a 4 | // linux node and curl 5 | // 6 | // TODO: support other registry endpoints 7 | 8 | @NonCPS 9 | def urlEncode(Map map) { 10 | map.collect { k,v -> URLEncoder.encode(k, 'UTF-8') + '=' + URLEncoder.encode(v, 'UTF-8') }.join('&') 11 | } 12 | 13 | def call(String imageName, String credentialsId="dockerbuildbot-hub.docker.com") { 14 | if (imageName.contains("@")) { 15 | echo "dockerImageTagToDigest: ${imageName} appears to be a digest. Returning it unmodified." 16 | return imageName 17 | } 18 | 19 | if (!imageName.contains("/")) { 20 | // For some reason just automatically fixing this doesn't work. not sure why :( 21 | throw new Exception("Specify image name with 'library/${imageName}' instead of bare '${imageName}'") 22 | } 23 | 24 | def repo = imageName 25 | def tag = 'latest' 26 | 27 | if (imageName.contains(":")) { 28 | def imageNameParts = imageName.split(":", 2) 29 | repo = imageNameParts[0] 30 | tag = imageNameParts[1] 31 | } 32 | 33 | String token = null 34 | 35 | withTool("jq") { 36 | withCredentials([[ 37 | $class: 'UsernamePasswordMultiBinding', 38 | credentialsId: credentialsId, 39 | usernameVariable: '__JP_DOCKERHUB_USERNAME', 40 | passwordVariable: '__JP_DOCKERHUB_PASSWORD' 41 | ]]) { 42 | def params = [ 43 | service: "registry.docker.io", 44 | scope: "repository:${repo}:pull", 45 | account: env.__JP_DOCKERHUB_USERNAME 46 | ] 47 | token = sh( 48 | returnStdout: true, 49 | script: """set +x; set -o pipefail; curl -sSl \\ 50 | -u "\$__JP_DOCKERHUB_USERNAME:\$__JP_DOCKERHUB_PASSWORD" \\ 51 | "https://auth.docker.io/token?${urlEncode(params)}" \\ 52 | | jq -r .token""" 53 | ).trim() 54 | } 55 | } 56 | 57 | try { 58 | return repo + "@" + sh( 59 | returnStdout: true, 60 | script: """set +x; set -o pipefail; curl -sfi \\ 61 | -H "Authorization: Bearer ${token}" \\ 62 | -H "Accept: application/vnd.docker.distribution.manifest.v2+json" \\ 63 | "https://registry-1.docker.io/v2/${repo}/manifests/${tag}" \\ 64 | | awk -F ': ' '\$1 == "Docker-Content-Digest" {print \$2}' 65 | """).trim() 66 | } catch (Exception exc) { 67 | return false 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /vars/dockerImageTagToDigest.txt: -------------------------------------------------------------------------------- 1 | 2 | dockerImageTagToDigest(String imageName, String credentialsId="dockerbuildbot-hub.docker.com") 3 | 4 |

5 | Uses the Docker Hub API to convert an image name to its unique identifier. Example: 6 | 7 | dockerImageTagToDigest("dockerqa/jo") -> "dockerqa/jo@sha256:b62d281f32df8cf006a87b7516cfe0dac7f350372cb2c1a26b0728686fd78480" 8 |
9 | dockerImageTagToDigest("something/thatdoesnt:exist") -> false 10 |

11 | -------------------------------------------------------------------------------- /vars/documentationChecker.groovy: -------------------------------------------------------------------------------- 1 | 2 | 3 | // Add the following code to any Jenkinsfile. 4 | // It will return with success if there are no changes in the specified docs dir 5 | // and Will return with success if run on a non Pull Request branch 6 | 7 | // documentationChecker("docs") 8 | 9 | // docsDir ~~ the subdirectory within your repo that the documentation resides 10 | def call(String docsDir) { 11 | stage("docs PR checker") { 12 | tokens = "${env.JOB_NAME}".tokenize('/') 13 | org = tokens[0] 14 | repo = tokens[1] 15 | branch = tokens[2] 16 | sha = gitCommit() 17 | imageName = "${repo}/${branch}:${env.BUILD_ID}" 18 | imageName = imageName.toLowerCase() 19 | containerName = "${repo}-${branch}-${env.BUILD_ID}" 20 | containerName = containerName.toLowerCase() 21 | 22 | changes = sh(script: "git log origin/master..${sha} -- ${docsDir}", returnStdout: true).trim() 23 | if (changes.size() == 0) { 24 | echo "no changes found in ${docsDir}" 25 | return 26 | } 27 | 28 | try { 29 | echo changes 30 | if (env.CHANGE_ID) { 31 | try { 32 | slackSend channel: '#docs-automation', message: "Starting docs test of - <${env.CHANGE_URL}|${repo} PR#${env.CHANGE_ID}> : ${env.CHANGE_TITLE}- see <${env.BUILD_URL}/console|the Jenkins console for job ${env.BUILD_ID}>" 33 | } catch (java.lang.Throwable err1) { 34 | echo "Failed to send start message to slack: ${err1}" 35 | } 36 | } else { 37 | echo "Skipping slack start message; no CHANGE_ID" 38 | } 39 | sh "docker pull docs/base:oss" 40 | try { 41 | sh "docker build -t ${imageName} ${docsDir}" 42 | try { 43 | sh "docker run --name=${containerName} ${imageName} bash /docs/validate.sh" 44 | 45 | // TODO: summarize the changes & errors (these are files used by GHPRB and the summary plugin 46 | sh "docker cp ${containerName}:/validate.junit.xml ." 47 | sh "docker cp ${containerName}:/docs/markdownlint.summary.txt ." 48 | 49 | //setGitHubPullRequestStatus message: "docs test complete" 50 | } finally { 51 | sh "docker rm -f ${containerName}" 52 | } 53 | } finally { 54 | sh "docker rmi -f ${imageName}" 55 | } 56 | } catch (java.lang.Throwable err2) { 57 | if (env.CHANGE_ID) { 58 | try { 59 | slackSend channel: '#docs-automation', message: "BUILD FAILURE: @${env.CHANGE_AUTHOR} - <${env.CHANGE_URL}|${repo} PR#${env.CHANGE_ID}> : ${env.CHANGE_TITLE}- see <${env.BUILD_URL}/console|the Jenkins console for job ${env.BUILD_ID}>" 60 | } catch (java.lang.Throwable err3) { 61 | echo "Failed to send failure message to slack: ${err3}" 62 | } 63 | } else { 64 | echo "Skipping slack error message; no CHANGE_ID" 65 | } 66 | } 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /vars/documentationChecker.txt: -------------------------------------------------------------------------------- 1 | 2 | documentationChecker(docsDir string) 3 | 4 |

5 | Checks the docsDir markdown files have valid frontmatter, and that links are 6 | consistent and resolveable. It will skip the check if there are no git changes 7 | in the specified docs dir and for now will do nothing if the job is a non-PR job. 8 |

9 | 10 | 11 | -------------------------------------------------------------------------------- /vars/getOutput.groovy: -------------------------------------------------------------------------------- 1 | String call(cmd) { 2 | echo 'getOutput is deprecated, just use the returnStdout option to sh' 3 | sh script: cmd, returnStdout: true 4 | } 5 | -------------------------------------------------------------------------------- /vars/getOutput.txt: -------------------------------------------------------------------------------- 1 | Deprecated 2 | 3 |

4 | Use the returnStdout option of sh instead. 5 |

6 | -------------------------------------------------------------------------------- /vars/gitCommit.groovy: -------------------------------------------------------------------------------- 1 | String call() { 2 | if (env.GIT_COMMIT == null) { 3 | env.GIT_COMMIT = sh(script: "git rev-parse HEAD", returnStdout: true).trim() 4 | } 5 | env.GIT_COMMIT 6 | } 7 | -------------------------------------------------------------------------------- /vars/gitCommit.txt: -------------------------------------------------------------------------------- 1 | gitCommit() 2 | 3 |

Determines the current git revision in the workspace. The revision is returned directly as a string and also set as the environment variable GIT_COMMIT.

4 |

This function must be executed in a node context as it requires a workspace.

5 | -------------------------------------------------------------------------------- /vars/golangTester.groovy: -------------------------------------------------------------------------------- 1 | def call(args=null, Closure body=null) { 2 | args = args ?: [:] 3 | 4 | def githubCredentials = args.get("github_credentials", "docker-jenkins.token.github.com") 5 | def packageName = args.get("package", null) 6 | def label = args.get("label", "docker") 7 | def goVersion = args.get("go_version", null) 8 | def gocycloMax = args.get('gocyclo_max', "20") 9 | def testerTag = args.get("golang_tag", goVersion) ?: "gimme" 10 | def envVars = args.get("env_vars", []) 11 | def gocovArgs = args.get("gocov_args", "") 12 | def maxWarnings = args.get("max_warnings", 0) 13 | 14 | if (!packageName) { 15 | throw new Exception("Please specify a 'package': `golangTester(package: 'github.com/docker/my-proj')`") 16 | } 17 | 18 | if (!goVersion) { 19 | echo "INFO: using latest version of golang in 'gimme' image. To change this, specify a go_version: `golangTester(go_version: '1.6.1')`" 20 | } 21 | 22 | return { 23 | wrappedNode(label: label) { 24 | deleteDir() 25 | checkout scm 26 | def image = docker.image("dockerautomation/golang-tester:${testerTag}") 27 | def testsPassed 28 | image.pull() 29 | 30 | withCredentials([[ 31 | variable: "GITHUB_TOKEN", 32 | credentialsId: githubCredentials, 33 | $class: "StringBinding", 34 | ]]) { 35 | sh 'echo "machine github.com login $GITHUB_TOKEN" > net-rc' 36 | } 37 | 38 | withChownWorkspace { 39 | withEnv([ 40 | "GOVERSION=${goVersion ?: ''}", 41 | "GOCYCLO_MAX=${gocycloMax ?: ''}", 42 | "GOPACKAGE=${packageName}", 43 | "GOCOV_ARGS=${gocovArgs}" 44 | ] + envVars) { 45 | def cmd = """#!/bin/bash -x 46 | docker run \\ 47 | --rm \\ 48 | -i \\ 49 | -v "\$(pwd)/net-rc:/root/.netrc" \\ 50 | -v "\$(pwd):/go/src/\$GOPACKAGE" \\ 51 | -v "\$(pwd)/results:/output" \\ 52 | -e "GOVERSION=\$GOVERSION" \\ 53 | -e "GOCYCLO_MAX" \\ 54 | -e "GOPACKAGE" \\ 55 | -e "GOCOV_ARGS" \\ 56 | """ 57 | for (int i = 0; i < envVars.size(); i++) { 58 | cmd += "-e \"${envVars.get(i)}\" \\\n" 59 | } 60 | cmd += image.id 61 | try { 62 | sh(cmd) 63 | testsPassed = true 64 | } catch (Exception exc) { 65 | testsPassed = false 66 | } 67 | } 68 | } 69 | 70 | if (readFile('results/tests.xml').size() != 0) { 71 | step([$class: 'JUnitResultArchiver', testResults: 'results/tests.xml', keepLongStdio: true]) 72 | } 73 | /* Cobertura publisher not yet supported in Pipeline */ 74 | // if (readFile('results/coverage.xml').size() != 0) { 75 | // step([$class: 'CoberturaPublisher', coberturaReportFile: 'results/coverage.xml']) 76 | // } 77 | step([ 78 | $class: 'WarningsPublisher', 79 | parserConfigurations: [[ 80 | parserName: "Go Lint", 81 | pattern: "results/fmt.txt,results/lint.txt,results/cyclo.txt,results/ineffassign.txt", 82 | ], [ 83 | parserName: "Go Vet", 84 | pattern: "results/vet.txt" 85 | ]], 86 | unstableTotalAll: "${maxWarnings}" 87 | ]) 88 | archive 'results/**' 89 | if (body) { body() } 90 | if (!testsPassed && currentBuild.result && currentBuild.result == 'SUCCESS') { 91 | currentBuild.result = 'UNSTABLE' 92 | } 93 | } 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /vars/golangTester.txt: -------------------------------------------------------------------------------- 1 |
 2 | // Defaults shown; only package is required
 3 | golangTester(
 4 |   package: "your go package name", // like "github.com/docker/docker"
 5 |   label: "docker",
 6 |   env_vars: ["KEY=value", "FOO=bar"],
 7 |   go_version: "",
 8 |   gocyclo_max: "20",
 9 |   golang_tag: "gimme", // or go_version if specified
10 |   gocov_args: "",
11 |   max_warnings: 0,
12 |   github_credentials: "docker-jenkins.token.github.com"
13 | ) {
14 |   // optional
15 | }
16 | 
17 | 18 | Uses images from the dockerautomation/golang-tester repository to test a golang package. It runs golint, go fmt, go vet, gocyclo, and go test (with gocov). 19 | Lint, fmt, vet, and cyclo results are recorder using the Warnings plugin. The build will be marked Unstable if there are more than max_warnings. 20 | Test results are recorded (as JUnit) and the build will be marked Unstable if there are failures. 21 | After running these tasks, it executes the given closure (if any). 22 | -------------------------------------------------------------------------------- /vars/nodeExists.groovy: -------------------------------------------------------------------------------- 1 | import jenkins.model.Jenkins 2 | 3 | def call(label) { 4 | def labelObj = Jenkins.instance.getLabel(label) 5 | return (labelObj.nodes.size() + labelObj.clouds.size()) > 0 6 | } 7 | -------------------------------------------------------------------------------- /vars/nodeExists.txt: -------------------------------------------------------------------------------- 1 | nodeExists(label) 2 | 3 |

Returns true if there is at least one node or cloud configuration that matches the given label; false otherwise.

4 | -------------------------------------------------------------------------------- /vars/s3Archive.groovy: -------------------------------------------------------------------------------- 1 | def call(options=[:]) { 2 | def aws = new com.docker.utilities.AWSSync() 3 | aws.s3Upload(options) 4 | } 5 | -------------------------------------------------------------------------------- /vars/s3Archive.txt: -------------------------------------------------------------------------------- 1 |
 2 | // Defaults shown; none are required but sourcePath is recommended.
 3 | s3Archive(
 4 |     sourcePath = '.',
 5 |     bucket = "docker-ci-artifacts",
 6 |     region = "us-west-2", // NOTE: this default is only provided when bucket is "docker-ci-artifacts"
 7 |     credentials = "ci@docker-qa.aws", // Credentials ID of an AWS Credentials item.
 8 |     project = null, // determined by SCM url. e.g., docker/docker for github.com/docker/docker.git
 9 |     path = "", // additional partial path to append to remote path
10 |     ref = gitCommit(), // Use this to override the SHA if necessary.
11 |     includeRef = true, // by default, the SCM ref is part of the remote desination. set to false to disable this
12 |     fullRemotePath = null, // Use this to set your own destination. this overrides: bucket, project, path, ref, includeRef, and path.
13 |     s3SyncArgs = null, // Set additional arguments for `aws s3 sync` command. See: http://docs.aws.amazon.com/cli/latest/reference/s3/sync.html
14 | )
15 | 
16 | 17 | 18 |

Upload files to S3.

19 |

20 | sourcePath is recommended. By default, they will be uploaded to "${bucket}/${project}/${ref}/${path}/". 21 | Each component of the path can be controlled via the options shown above, or the entire path can be 22 | provided with the fullRemotePath option. 23 |

24 | 25 |

Basic example usage:

26 |
27 | // In the github.com/docker/docker.git project with git SHA = deadbeef:
28 | s3Archive("bundles/")
29 | 
30 |

This will put the contents of the bundles directory at s3://docker-ci-artifacts/docker/docker/deadbeef/

31 | -------------------------------------------------------------------------------- /vars/s3Fetch.groovy: -------------------------------------------------------------------------------- 1 | def call(options=[:]) { 2 | def aws = new com.docker.utilities.AWSSync() 3 | aws.s3Download(options) 4 | } 5 | -------------------------------------------------------------------------------- /vars/s3Fetch.txt: -------------------------------------------------------------------------------- 1 |
 2 | s3Fetch(
 3 |     destinationPath = '.',
 4 |     bucket = "docker-ci-artifacts",
 5 |     region = "us-west-2", // NOTE: this default is only provided when bucket is "docker-ci-artifacts"
 6 |     credentials = "ci@docker-qa.aws", // Credentials ID of an AWS Credentials item.
 7 |     project = null, // determined by SCM url. e.g., docker/docker for github.com/docker/docker.git
 8 |     path = "", // additional partial path to append to remote path
 9 |     ref = gitCommit(), // Use this to override the SHA if necessary.
10 |     includeRef = true, // by default, the SCM ref is part of the remote desination. set to false to disable this
11 |     fullRemotePath = null, // Use this to set your own destination. this overrides: bucket, project, path, ref, includeRef, and path.
12 |     s3SyncArgs = null, // Set additional arguments for `aws s3 sync` command. See: http://docs.aws.amazon.com/cli/latest/reference/s3/sync.html
13 | )
14 | 
15 | 16 |

Fetch files from S3.

17 |

18 | destinationPath is recommended. By default, files will be downloaded from "${bucket}/${project}/${ref}/${path}/". 19 | Each component of the path can be controlled via the options shown above, or the entire path can be 20 | provided with the fullRemotePath option. 21 |

22 | 23 |

Basic example usage:

24 |
25 | // In the github.com/docker/docker.git project with git SHA = deadbeef:
26 | s3Fetch(destinationPath: "bundles/")
27 | 
28 |

This will put the contents of s3://docker-ci-artifacts/docker/docker/deadbeef/ in the bundles directory.

29 | -------------------------------------------------------------------------------- /vars/withChownWorkspace.groovy: -------------------------------------------------------------------------------- 1 | def call(Closure body=null) { 2 | def retVal 3 | try { 4 | if (body) { retVal = body() } 5 | } finally { 6 | try { 7 | echo "chowning workspace" 8 | def arch = sh(script: "uname -m", returnStdout: true).trim() 9 | def image = "busybox" 10 | if (arch.startsWith("arm")) { 11 | image = "armhf/busybox" 12 | } else if (arch == "aarch64" ) { 13 | image = "arm64v8/busybox" 14 | } else if (arch == "ppc64le" || arch == "s390x") { 15 | image = "${arch}/busybox" 16 | } 17 | sh "docker run --rm -v \$(pwd):/workspace ${image} chown -R \"\$(id -u):\$(id -g)\" /workspace" 18 | } catch (Exception e) { 19 | println e 20 | } 21 | } 22 | retVal 23 | } 24 | -------------------------------------------------------------------------------- /vars/withChownWorkspace.txt: -------------------------------------------------------------------------------- 1 | withChownWorkspace { ... } 2 | 3 |

4 | Performs the tasks specified in the given closure and chowns the workspace after. 5 | It requires docker and is unlikely to work on a non-linux host. 6 | The exact mechanism is as follows: 7 |

docker run --rm -v $(pwd):/workspace busybox chown -R "$(id -u):$(id -g)" /workspace
8 |

9 | -------------------------------------------------------------------------------- /vars/withTool.groovy: -------------------------------------------------------------------------------- 1 | import hudson.tools.ToolInstallation 2 | 3 | def call(tools, Closure body=null) { 4 | if (! (tools instanceof List)) { 5 | tools = [tools] 6 | } 7 | def toolEnv = [] 8 | def pathEnv = [] 9 | def toolNames = [] 10 | 11 | for (int i = 0; i < tools.size(); i++) { 12 | def toolString = tools[i] 13 | 14 | def match = (toolString =~ /^([^@]+)(?:@(.+))?$/) 15 | if (!match) { continue; } 16 | 17 | def toolName = match[0][1] 18 | def toolVersion = match[0][2] 19 | match = null 20 | 21 | toolNames << toolName 22 | 23 | for (int j = 0; j < ToolInstallation.all().size(); j++) { 24 | def installations = ToolInstallation.all()[j].installations 25 | for (int k = 0; k < installations.size(); k++) { 26 | def toolInstallation = installations[k] 27 | // This is not the tool we're looking for 28 | if (toolInstallation.name != toolName) { continue; } 29 | // We found our tool and it doesn't have different versions 30 | if (!toolInstallation.toolVersion) { 31 | if (toolVersion) { 32 | echo "Tool installer: '${toolName}' will be installed but versions are not configured so the version string '${toolVersion}' is being ignored." 33 | toolVersion = null 34 | } 35 | break 36 | } 37 | 38 | if (!toolVersion) { 39 | // Versions are enabled but toolVersion is not set. use default 40 | toolVersion = toolInstallation.toolVersion.versionsListSource.defaultValue 41 | } else { 42 | // Let's check if the request version exists 43 | def versionSource = toolInstallation.toolVersion.versionsListSource 44 | def availableVersions = versionSource.value.split(versionSource.multiSelectDelimiter) 45 | if (!availableVersions.contains(toolVersion)) { 46 | throw new Exception("Tool installer: '${toolName}' has no configuration for version '${toolVersion}'") 47 | } 48 | } 49 | 50 | if (toolInstallation.hasAdditionalVariables()) { 51 | def extraVars = toolInstallation.additionalVariables.split("\n") 52 | for (int l = 0; l < extraVars.size(); l++) { 53 | def extraVar = extraVars[l].trim() 54 | if (extraVar.size() == 0) { continue; } 55 | if (!extraVar.contains('=')) { 56 | echo "Ignoring invalid extra variable for '${toolName}': '${extraVar}'" 57 | continue 58 | } 59 | toolEnv.add(extraVar) 60 | } 61 | } 62 | toolInstallation = null 63 | } 64 | } 65 | // Still possible that we don't have a version 66 | if (toolVersion) { 67 | toolEnv << toolName.replaceAll(/\W/, '_').toUpperCase() + "_VERSION=${toolVersion}" 68 | } 69 | } 70 | withEnv(toolEnv) { 71 | for (i = 0; i < toolNames.size(); i++) { 72 | def toolName = toolNames[i] 73 | pathEnv << tool(toolName) 74 | } 75 | pathEnv << env.PATH 76 | withEnv(["PATH=${pathEnv.join(":")}"]) { 77 | if (body) { body() } 78 | } 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /vars/withTool.txt: -------------------------------------------------------------------------------- 1 | withTool("tool-name") { ... }
2 | withTool("tool-name@1.0") { ... }
3 | withTool(["tool-one@1.0", "tool-two"]) { ... }
4 | 5 |

6 | Install one or more pre-configured tools with specified versions and execute 7 | the given closure with PATH configured to use the installed tools. 8 | Without a version specified, the default version will be used. Caution: a tool's default version may change over time. 9 | Please contact your Jenkins administrator if you need additional tools or versions made available for installation. 10 |

11 | -------------------------------------------------------------------------------- /vars/withVpn.groovy: -------------------------------------------------------------------------------- 1 | def call(vpnHost, Closure body=null) { 2 | def vpnImage = docker.image("dckr/vpn-client") 3 | def vpnContainerName = "${env.BUILD_TAG}-vpn-client" 4 | withDockerRegistry(credentialsId: "dockerbuildbot-index.docker.io") { 5 | vpnImage.pull() 6 | } 7 | try { 8 | // Start VPN client for stage 9 | withCredentials([[ 10 | usernameVariable: "VPN_USERNAME", 11 | passwordVariable: "VPN_PASSWORD", 12 | credentialsId: "jenkins.${vpnHost}", 13 | $class: "UsernamePasswordMultiBinding" 14 | ]]) { 15 | sh """ 16 | docker run \\ 17 | -d \\ 18 | --name "${vpnContainerName}" \\ 19 | --restart=always \\ 20 | --cap-add NET_ADMIN \\ 21 | --device /dev/net/tun \\ 22 | -e REMOTE_VPN=${vpnHost} \\ 23 | -e VPN_USERNAME \\ 24 | -e VPN_PASSWORD \\ 25 | --net=host \\ 26 | ${vpnImage.imageName()} 27 | """ 28 | } 29 | 30 | if (body) { body() } 31 | 32 | } finally { 33 | sh "docker rm -f ${vpnContainerName} ||:" 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /vars/withVpn.txt: -------------------------------------------------------------------------------- 1 | withVpn("vpn-host.example.org") { ... } 2 | 3 |

4 | Starts a VPN container, runs the given closure, and removes the VPN container. Connects using credentials 5 | with ID matching "jenkins.$HOST" (e.g. "jenkins.vpn-host.example.org"). Please contact your 6 | Jenkins administrator if you need credentials added to the system for your project. 7 |

8 | -------------------------------------------------------------------------------- /vars/wrappedNode.groovy: -------------------------------------------------------------------------------- 1 | def call(Map vars, Closure body=null) { 2 | vars = vars ?: [:] 3 | node(vars.get("label", null)) { 4 | withDockerRegistry(url: vars.get("registryUrl", "https://index.docker.io/v1/"), credentialsId: vars.get("registryCreds", "dockerbuildbot-index.docker.io")) { 5 | wrap([$class: 'TimestamperBuildWrapper']) { 6 | wrap([$class: 'AnsiColorBuildWrapper']) { 7 | if (vars.get('cleanWorkspace', false)) { 8 | // NOTE: `withChownWorkspace` uses docker. if our `label` doesn't have docker 9 | // or is misconfigured, these operations will fail and the exception will be 10 | // propogated. 11 | withChownWorkspace { echo "cleanWorkspace: Ensuring workspace is owned by ${env.USER}" } 12 | echo "Removing all docker containers" 13 | try { 14 | sh("for cid in \$(docker container ls -aq); do docker container rm -vf \$cid; done") 15 | } catch (Exception e) { 16 | println(e) 17 | } 18 | echo "Docker containers have been removed" 19 | 20 | echo "cleanWorkspace: Removing existing workspace" 21 | deleteDir() 22 | echo "cleanWorkspace: Workspace is clean." 23 | } 24 | if (body) { body() } 25 | } 26 | } 27 | } 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /vars/wrappedNode.txt: -------------------------------------------------------------------------------- 1 | wrappedNode(label: null, registryCreds: "dockerbuildbot-index.docker.io", cleanWorkspace: false) { ... } 2 | 3 |

4 | Runs the given closure on a node matching label with the following behaviors: 5 |

11 |

12 | --------------------------------------------------------------------------------