├── .gitignore ├── .taskkey ├── .vscode └── settings.json ├── DockerIntegration_ExtensionForVisualStudioTeamServices_Pre-ReleaseEULA.docx ├── LICENSE ├── README.md ├── SECURITY.md ├── ThirdPartyNotices.txt ├── gulpfile.js ├── package.json ├── pytests.sh ├── src ├── docker.md ├── images │ ├── DockerBuild.png │ ├── DockerCommand.png │ ├── DockerComposeBuild.png │ ├── DockerComposeCommand.png │ ├── DockerComposeConfig.png │ ├── DockerComposeLock.png │ ├── DockerComposePush.png │ ├── DockerComposeRun.png │ ├── DockerComposeUp.png │ ├── DockerDeployAcsDcos.png │ ├── DockerHost.png │ ├── DockerPush.png │ ├── DockerRegistry.png │ ├── DockerRun.png │ ├── DockerRunWorkload.png │ ├── ServicesTab.png │ ├── docker_logo.png │ └── docker_logo_large.png ├── tasks │ ├── docker │ │ ├── docker.ts │ │ ├── dockerBuild.ts │ │ ├── dockerCommand.ts │ │ ├── dockerConnection.ts │ │ ├── dockerImageUtils.ts │ │ ├── dockerPush.ts │ │ ├── dockerRun.ts │ │ ├── gitUtils.ts │ │ ├── icon.png │ │ ├── sourceUtils.ts │ │ └── task.json │ ├── dockerCompose │ │ ├── dockerCompose.ts │ │ ├── dockerComposeBuild.ts │ │ ├── dockerComposeCommand.ts │ │ ├── dockerComposeConfig.ts │ │ ├── dockerComposeConnection.ts │ │ ├── dockerComposeDigests.ts │ │ ├── dockerComposeLock.ts │ │ ├── dockerComposePush.ts │ │ ├── dockerComposeRun.ts │ │ ├── dockerComposeUp.ts │ │ ├── dockerConnection.ts │ │ ├── dockerImageUtils.ts │ │ ├── gitUtils.ts │ │ ├── icon.png │ │ ├── sourceUtils.ts │ │ └── task.json │ └── dockerDeploy │ │ ├── acs-dcos │ │ ├── Dockerfile.task │ │ ├── acsclient.py │ │ ├── acsinfo.py │ │ ├── conf │ │ │ ├── exhibitor-data.json │ │ │ └── external-nginx-lb.json │ │ ├── createmarathon.py │ │ ├── dockercomposeparser.py │ │ ├── dockerregistry.py │ │ ├── exhibitor.py │ │ ├── healthcheck.py │ │ ├── hexifier.py │ │ ├── marathon.py │ │ ├── marathon_deployments.py │ │ ├── mesos.py │ │ ├── mesos_task.py │ │ ├── nginx.py │ │ ├── portmappings.py │ │ ├── requirements.txt │ │ ├── serviceparser.py │ │ ├── test_acsclient.py │ │ ├── test_compose_1.yml │ │ ├── test_compose_1_expected.json │ │ ├── test_deployment_monitor.py │ │ ├── test_dockercomposeparser.py │ │ ├── test_healthcheck.py │ │ ├── test_hexifier.py │ │ ├── test_marathon_event.py │ │ ├── test_mesos.py │ │ ├── test_mesos_task.py │ │ ├── test_nginx.py │ │ ├── test_portmappings.py │ │ └── test_serviceparser.py │ │ ├── acs-kubernetes │ │ ├── Dockerfile.task │ │ ├── acsclient.py │ │ ├── clusterinfo.py │ │ ├── deploy.py │ │ ├── dockercomposeparser.py │ │ ├── groupinfo.py │ │ ├── ingress │ │ │ ├── default-backend-svc.json │ │ │ ├── default-backend.json │ │ │ ├── nginx-ingress-lb-svc.json │ │ │ └── nginx-ingress-lb.json │ │ ├── ingress_controller.py │ │ ├── kubernetes.py │ │ ├── portparser.py │ │ ├── registryinfo.py │ │ ├── requirements.txt │ │ ├── serviceparser.py │ │ └── test_serviceparser.py │ │ ├── dockerComposeConnection.ts │ │ ├── dockerConnection.ts │ │ ├── dockerDeploy.ts │ │ ├── dockerDeployAcsDcos.ts │ │ ├── dockerDeployAcsKube.ts │ │ ├── dockerImageUtils.ts │ │ ├── icon.png │ │ └── task.json └── vss-extension.json ├── tests └── tasks │ └── docker │ └── dockerTests.ts ├── tsconfig.json ├── tsd.json ├── tslint.json └── typings ├── assertion-error └── assertion-error.d.ts ├── chai └── chai.d.ts ├── del └── del.d.ts ├── glob └── glob.d.ts ├── js-yaml └── js-yaml.d.ts ├── minimatch └── minimatch.d.ts ├── mocha └── mocha.d.ts ├── node └── node.d.ts ├── shelljs └── shelljs.d.ts ├── sinon-chai └── sinon-chai.d.ts ├── sinon └── sinon.d.ts └── tsd.d.ts /.gitignore: -------------------------------------------------------------------------------- 1 | # Logs 2 | logs 3 | *.log 4 | npm-debug.log* 5 | 6 | # Runtime data 7 | pids 8 | *.pid 9 | *.seed 10 | 11 | # Python 12 | *.pyc 13 | *.coverage 14 | 15 | # Directory for instrumented libs generated by jscoverage/JSCover 16 | lib-cov 17 | 18 | # Coverage directory used by tools like istanbul 19 | coverage 20 | 21 | # Grunt intermediate storage (http://gruntjs.com/creating-plugins#storing-task-files) 22 | .grunt 23 | 24 | # node-waf configuration 25 | .lock-wscript 26 | 27 | # Compiled binary addons (http://nodejs.org/api/addons.html) 28 | build/Release 29 | 30 | # Dependency directory 31 | node_modules 32 | 33 | # Optional npm cache directory 34 | .npm 35 | 36 | # Optional REPL history 37 | .node_repl_history 38 | 39 | # Application Specific 40 | _build -------------------------------------------------------------------------------- /.taskkey: -------------------------------------------------------------------------------- 1 | f9ff5ff0-8fe3-11e6-a4d4-33808cce0bd6 -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "typescript.tsdk": "./node_modules/typescript/lib" 3 | } -------------------------------------------------------------------------------- /DockerIntegration_ExtensionForVisualStudioTeamServices_Pre-ReleaseEULA.docx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/vsts-docker/7b94ccdb581f66968b28efb4b9e4b59dea3e13f8/DockerIntegration_ExtensionForVisualStudioTeamServices_Pre-ReleaseEULA.docx -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | Copyright (c) Microsoft Corporation 3 | 4 | Permission is hereby granted, free of charge, to any person obtaining a copy 5 | of this software and associated documentation files (the "Software"), to deal 6 | in the Software without restriction, including without limitation the rights 7 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 8 | copies of the Software, and to permit persons to whom the Software is 9 | furnished to do so, subject to the following conditions: 10 | 11 | The above copyright notice and this permission notice shall be included in all 12 | copies or substantial portions of the Software. 13 | 14 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 17 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 19 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 20 | SOFTWARE. 21 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # vsts-docker 2 | This is the source code repository of Docker extension for Visual Studio Team Services. This extension contains VSTS build tasks to work with Docker. 3 | 4 | ## Working with this repo 5 | 6 | ### Implementation details 7 | * Task and corresponding tests are in TypeScript. 8 | * Lint is the static analysis tool. 9 | * Istanbul is the code coverage tool. 10 | * Mocha is the testing framework. 11 | * Chai, Sinon and Sinon-Chai are used for assertions. 12 | 13 | ### Commands 14 | (assuming node is installed) 15 | 16 | Once: 17 | ```bash 18 | $ npm install 19 | $ npm install gulp -g 20 | $ npm install tfx-cli -g 21 | ``` 22 | 23 | Build: 24 | ```bash 25 | $ gulp build 26 | ``` 27 | 28 | Test: 29 | ```bash 30 | $ gulp test 31 | ``` 32 | 33 | Package (vsix will be generated at _build/package): 34 | ```bash 35 | $ gulp package 36 | ``` 37 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | ## Security 4 | 5 | Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/). 6 | 7 | If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below. 8 | 9 | ## Reporting Security Issues 10 | 11 | **Please do not report security vulnerabilities through public GitHub issues.** 12 | 13 | Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report). 14 | 15 | If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey). 16 | 17 | You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc). 18 | 19 | Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: 20 | 21 | * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) 22 | * Full paths of source file(s) related to the manifestation of the issue 23 | * The location of the affected source code (tag/branch/commit or direct URL) 24 | * Any special configuration required to reproduce the issue 25 | * Step-by-step instructions to reproduce the issue 26 | * Proof-of-concept or exploit code (if possible) 27 | * Impact of the issue, including how an attacker might exploit the issue 28 | 29 | This information will help us triage your report more quickly. 30 | 31 | If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs. 32 | 33 | ## Preferred Languages 34 | 35 | We prefer all communications to be in English. 36 | 37 | ## Policy 38 | 39 | Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd). 40 | 41 | 42 | -------------------------------------------------------------------------------- /gulpfile.js: -------------------------------------------------------------------------------- 1 | var del = require("del"); 2 | var fs = require('fs'); 3 | var gulp = require("gulp"); 4 | var istanbul = require("gulp-istanbul"); 5 | var mocha = require("gulp-mocha"); 6 | var path = require("path"); 7 | var shell = require("shelljs"); 8 | var tsb = require("gulp-tsb"); 9 | var tslint = require("gulp-tslint"); 10 | 11 | var buildDirectory = "_build"; 12 | var srcBuildDirectory = "_build/src"; 13 | var codeCoverageDirectory = buildDirectory + "/codeCoverage"; 14 | var packageDirectory = buildDirectory + "/package"; 15 | var sourcePaths = { 16 | typescriptFiles: "src/**/*.ts", 17 | copyFiles: ["src/**/*", "!src/**/*.ts"], 18 | lcaFiles: ["ThirdPartyNotices.txt", "DockerIntegration_ExtensionForVisualStudioTeamServices_Pre-ReleaseEULA.docx"], 19 | tasksPath: "src/tasks" 20 | }; 21 | var testPaths = { 22 | typescriptFiles: "tests/**/*.ts", 23 | compiledTestFiles: buildDirectory + "/tests/**/*Tests.js" 24 | }; 25 | var packageManifestFile = "vss-extension.json"; 26 | var nodeModulesDirectory = "node_modules"; 27 | 28 | var compilation = tsb.create({ 29 | target: 'es5', 30 | module: 'commonjs', 31 | declaration: false, 32 | verbose: false 33 | }); 34 | 35 | gulp.task("clean", function () { 36 | return del([buildDirectory]); 37 | }); 38 | 39 | gulp.task("lint", ["clean"], function () { 40 | return gulp.src([sourcePaths.typescriptFiles, testPaths.typescriptFiles]) 41 | .pipe(tslint()) 42 | .pipe(tslint.report("verbose")); 43 | }); 44 | 45 | gulp.task("compile", ["lint"], function () { 46 | return gulp.src([sourcePaths.typescriptFiles, testPaths.typescriptFiles], { base: "." }) 47 | .pipe(compilation()) 48 | .pipe(gulp.dest(buildDirectory)) 49 | .pipe(istanbul({ includeUntested: true })) 50 | .pipe(istanbul.hookRequire()); 51 | }); 52 | 53 | gulp.task("build", ["compile"], function () { 54 | return gulp.src(sourcePaths.copyFiles.concat(sourcePaths.lcaFiles), { base: "." }) 55 | .pipe(gulp.dest(buildDirectory)); 56 | }); 57 | 58 | gulp.task("test", ["build"], function () { 59 | shell.chmod(755, 'pytests.sh'); 60 | shell.exec('./pytests.sh'); 61 | shell.rm('./r.sh'); 62 | 63 | return gulp.src(testPaths.compiledTestFiles, { read: false }) 64 | .pipe(mocha()) 65 | .pipe(istanbul.writeReports({ dir: codeCoverageDirectory })) 66 | .pipe(istanbul.enforceThresholds({ thresholds: { global: 100 } })); 67 | }); 68 | 69 | gulp.task("default", ["build"]); 70 | 71 | gulp.task("package", ["build"], function () { 72 | getNodeDependencies(function () { 73 | copyLcaFiles(); 74 | copyNodeModulesToTasks(); 75 | createVsixPackage(); 76 | }); 77 | }); 78 | 79 | var getNodeDependencies = function (callback) { 80 | del(packageDirectory); 81 | shell.mkdir("-p", path.join(packageDirectory, nodeModulesDirectory)); 82 | shell.cp("-f", "package.json", packageDirectory); 83 | shell.pushd(packageDirectory); 84 | 85 | var npmPath = shell.which("npm"); 86 | var npmInstallCommand = '"' + npmPath + '" install --production'; 87 | executeCommand(npmInstallCommand, function () { 88 | shell.popd(); 89 | callback(); 90 | }); 91 | } 92 | 93 | var copyLcaFiles = function () { 94 | gulp.src(sourcePaths.lcaFiles, { base: "." }) 95 | .pipe(gulp.dest(srcBuildDirectory)); 96 | 97 | fs.readdirSync(sourcePaths.tasksPath).forEach(function (taskName) { 98 | var taskpath = path.join(buildDirectory, sourcePaths.tasksPath, taskName); 99 | gulp.src(sourcePaths.lcaFiles, { base: "." }) 100 | .pipe(gulp.dest(taskpath)); 101 | }) 102 | } 103 | 104 | var copyNodeModulesToTasks = function () { 105 | fs.readdirSync(sourcePaths.tasksPath).forEach(function (taskName) { 106 | var taskpath = path.join(buildDirectory, sourcePaths.tasksPath, taskName); 107 | del(path.join(taskpath, nodeModulesDirectory)); 108 | shell.cp("-rf", path.join(packageDirectory, nodeModulesDirectory), taskpath); 109 | }); 110 | } 111 | 112 | var createVsixPackage = function () { 113 | var packagingCmd = "tfx extension create --manifest-globs " + packageManifestFile + " --root " + srcBuildDirectory + " --output-path " + packageDirectory; 114 | executeCommand(packagingCmd, function () { }); 115 | } 116 | 117 | var executeCommand = function (cmd, callback) { 118 | shell.exec(cmd, { silent: true }, function (code, output) { 119 | if (code != 0) { 120 | console.error("command failed: " + cmd + "\nManually execute to debug"); 121 | } 122 | else { 123 | callback(); 124 | } 125 | }); 126 | } -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "devDependencies": { 3 | "chai": "^3.5.0", 4 | "gulp": "^3.9.0", 5 | "gulp-istanbul": "^0.10.3", 6 | "gulp-mocha": "^2.2.0", 7 | "gulp-tsb": "^1.10.1", 8 | "gulp-tslint": "^4.3.1", 9 | "shelljs": "^0.6.0", 10 | "sinon": "^1.17.3", 11 | "sinon-chai": "^2.8.0", 12 | "tslint": "^3.3.0", 13 | "typescript": "^1.7.5" 14 | }, 15 | "dependencies": { 16 | "del": "^2.2.0", 17 | "js-yaml": "^3.6.1", 18 | "vsts-task-lib": "^0.9.20" 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /pytests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | cat << EOF > r.sh 5 | { 6 | #!/bin/bash 7 | set -e 8 | python -m pip install requests pyyaml mock paramiko sshtunnel coverage sseclient 9 | coverage run --source=src/tasks/dockerDeploy/acs-dcos -m unittest discover -s src/tasks/dockerDeploy/acs-dcos 10 | coverage run --source=src/tasks/dockerDeploy/acs-kubernetes -m unittest discover -s src/tasks/dockerDeploy/acs-kubernetes 11 | coverage report -m 12 | } 13 | EOF 14 | chmod +x r.sh 15 | docker run --rm -v "$PWD":/tests -w /tests python:2.7 /bin/bash -c /tests/r.sh 16 | exit $? -------------------------------------------------------------------------------- /src/images/DockerBuild.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/vsts-docker/7b94ccdb581f66968b28efb4b9e4b59dea3e13f8/src/images/DockerBuild.png -------------------------------------------------------------------------------- /src/images/DockerCommand.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/vsts-docker/7b94ccdb581f66968b28efb4b9e4b59dea3e13f8/src/images/DockerCommand.png -------------------------------------------------------------------------------- /src/images/DockerComposeBuild.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/vsts-docker/7b94ccdb581f66968b28efb4b9e4b59dea3e13f8/src/images/DockerComposeBuild.png -------------------------------------------------------------------------------- /src/images/DockerComposeCommand.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/vsts-docker/7b94ccdb581f66968b28efb4b9e4b59dea3e13f8/src/images/DockerComposeCommand.png -------------------------------------------------------------------------------- /src/images/DockerComposeConfig.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/vsts-docker/7b94ccdb581f66968b28efb4b9e4b59dea3e13f8/src/images/DockerComposeConfig.png -------------------------------------------------------------------------------- /src/images/DockerComposeLock.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/vsts-docker/7b94ccdb581f66968b28efb4b9e4b59dea3e13f8/src/images/DockerComposeLock.png -------------------------------------------------------------------------------- /src/images/DockerComposePush.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/vsts-docker/7b94ccdb581f66968b28efb4b9e4b59dea3e13f8/src/images/DockerComposePush.png -------------------------------------------------------------------------------- /src/images/DockerComposeRun.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/vsts-docker/7b94ccdb581f66968b28efb4b9e4b59dea3e13f8/src/images/DockerComposeRun.png -------------------------------------------------------------------------------- /src/images/DockerComposeUp.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/vsts-docker/7b94ccdb581f66968b28efb4b9e4b59dea3e13f8/src/images/DockerComposeUp.png -------------------------------------------------------------------------------- /src/images/DockerDeployAcsDcos.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/vsts-docker/7b94ccdb581f66968b28efb4b9e4b59dea3e13f8/src/images/DockerDeployAcsDcos.png -------------------------------------------------------------------------------- /src/images/DockerHost.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/vsts-docker/7b94ccdb581f66968b28efb4b9e4b59dea3e13f8/src/images/DockerHost.png -------------------------------------------------------------------------------- /src/images/DockerPush.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/vsts-docker/7b94ccdb581f66968b28efb4b9e4b59dea3e13f8/src/images/DockerPush.png -------------------------------------------------------------------------------- /src/images/DockerRegistry.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/vsts-docker/7b94ccdb581f66968b28efb4b9e4b59dea3e13f8/src/images/DockerRegistry.png -------------------------------------------------------------------------------- /src/images/DockerRun.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/vsts-docker/7b94ccdb581f66968b28efb4b9e4b59dea3e13f8/src/images/DockerRun.png -------------------------------------------------------------------------------- /src/images/DockerRunWorkload.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/vsts-docker/7b94ccdb581f66968b28efb4b9e4b59dea3e13f8/src/images/DockerRunWorkload.png -------------------------------------------------------------------------------- /src/images/ServicesTab.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/vsts-docker/7b94ccdb581f66968b28efb4b9e4b59dea3e13f8/src/images/ServicesTab.png -------------------------------------------------------------------------------- /src/images/docker_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/vsts-docker/7b94ccdb581f66968b28efb4b9e4b59dea3e13f8/src/images/docker_logo.png -------------------------------------------------------------------------------- /src/images/docker_logo_large.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/vsts-docker/7b94ccdb581f66968b28efb4b9e4b59dea3e13f8/src/images/docker_logo_large.png -------------------------------------------------------------------------------- /src/tasks/docker/docker.ts: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | import * as tl from "vsts-task-lib/task"; 4 | import DockerConnection from "./dockerConnection"; 5 | 6 | // Change to any specified working directory 7 | tl.cd(tl.getInput("cwd")); 8 | 9 | // Connect to any specified Docker host and/or registry 10 | var connection = new DockerConnection(); 11 | connection.open(tl.getInput("dockerHostEndpoint"), tl.getInput("dockerRegistryEndpoint")); 12 | 13 | // Run the specified action 14 | var action = tl.getInput("action", true); 15 | /* tslint:disable:no-var-requires */ 16 | require({ 17 | "Build an image": "./dockerBuild", 18 | "Push an image": "./dockerPush", 19 | "Run an image": "./dockerRun", 20 | "Run a Docker command": "./dockerCommand" 21 | }[action]).run(connection) 22 | /* tslint:enable:no-var-requires */ 23 | .fin(function cleanup() { 24 | connection.close(); 25 | }) 26 | .then(function success() { 27 | tl.setResult(tl.TaskResult.Succeeded, ""); 28 | }, function failure(err) { 29 | tl.setResult(tl.TaskResult.Failed, err.message); 30 | }) 31 | .done(); 32 | -------------------------------------------------------------------------------- /src/tasks/docker/dockerBuild.ts: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | import * as path from "path"; 4 | import * as tl from "vsts-task-lib/task"; 5 | import DockerConnection from "./dockerConnection"; 6 | import * as sourceUtils from "./sourceUtils"; 7 | import * as imageUtils from "./dockerImageUtils"; 8 | 9 | export function run(connection: DockerConnection): any { 10 | var command = connection.createCommand(); 11 | command.arg("build"); 12 | 13 | var dockerFile = tl.globFirst(tl.getInput("dockerFile", true)); 14 | if (!dockerFile) { 15 | throw new Error("No Docker file matching " + tl.getInput("dockerFile") + " was found."); 16 | } 17 | command.arg(["-f", dockerFile]); 18 | 19 | tl.getDelimitedInput("buildArguments", "\n").forEach(buildArgument => { 20 | command.arg(["--build-arg", buildArgument]); 21 | }); 22 | 23 | var imageName = tl.getInput("imageName", true); 24 | var qualifyImageName = tl.getBoolInput("qualifyImageName"); 25 | if (qualifyImageName) { 26 | imageName = connection.qualifyImageName(imageName); 27 | } 28 | command.arg(["-t", imageName]); 29 | 30 | var baseImageName = imageUtils.imageNameWithoutTag(imageName); 31 | 32 | tl.getDelimitedInput("additionalImageTags", "\n").forEach(tag => { 33 | command.arg(["-t", baseImageName + ":" + tag]); 34 | }); 35 | 36 | var includeSourceTags = tl.getBoolInput("includeSourceTags"); 37 | if (includeSourceTags) { 38 | sourceUtils.getSourceTags().forEach(tag => { 39 | command.arg(["-t", baseImageName + ":" + tag]); 40 | }); 41 | } 42 | 43 | var includeLatestTag = tl.getBoolInput("includeLatestTag"); 44 | if (baseImageName !== imageName && includeLatestTag) { 45 | command.arg(["-t", baseImageName]); 46 | } 47 | 48 | var context: string; 49 | var defaultContext = tl.getBoolInput("defaultContext"); 50 | if (defaultContext) { 51 | context = path.dirname(dockerFile); 52 | } else { 53 | context = tl.getPathInput("context"); 54 | } 55 | command.arg(context); 56 | 57 | return connection.execCommand(command); 58 | } 59 | -------------------------------------------------------------------------------- /src/tasks/docker/dockerCommand.ts: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | import * as tl from "vsts-task-lib/task"; 4 | import DockerConnection from "./dockerConnection"; 5 | 6 | export function run(connection: DockerConnection): any { 7 | var command = connection.createCommand(); 8 | command.line(tl.getInput("customCommand", true)); 9 | return connection.execCommand(command); 10 | } 11 | -------------------------------------------------------------------------------- /src/tasks/docker/dockerConnection.ts: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | import * as del from "del"; 4 | import * as fs from "fs"; 5 | import * as path from "path"; 6 | import * as url from "url"; 7 | import * as tl from "vsts-task-lib/task"; 8 | import * as tr from "vsts-task-lib/toolrunner"; 9 | import * as imageUtils from "./dockerImageUtils"; 10 | 11 | export default class DockerConnection { 12 | private dockerPath: string; 13 | protected hostUrl: string; 14 | protected certsDir: string; 15 | private caPath: string; 16 | private certPath: string; 17 | private keyPath: string; 18 | private registryAuth: { [key: string]: string }; 19 | private registryHost: string; 20 | 21 | constructor() { 22 | this.dockerPath = tl.which("docker", true); 23 | } 24 | 25 | public createCommand(): tr.ToolRunner { 26 | var command = tl.tool(this.dockerPath); 27 | if (this.hostUrl) { 28 | command.arg(["-H", this.hostUrl]); 29 | command.arg("--tls"); 30 | command.arg("--tlscacert='" + this.caPath + "'"); 31 | command.arg("--tlscert='" + this.certPath + "'"); 32 | command.arg("--tlskey='" + this.keyPath + "'"); 33 | } 34 | return command; 35 | } 36 | 37 | public execCommand(command: tr.ToolRunner, options?: tr.IExecOptions) { 38 | var errlines = []; 39 | command.on("errline", line => { 40 | errlines.push(line); 41 | }); 42 | return command.exec(options).fail(error => { 43 | errlines.forEach(line => tl.error(line)); 44 | throw error; 45 | }); 46 | } 47 | 48 | public open(hostEndpoint?: string, registryEndpoint?: string): void { 49 | if (hostEndpoint) { 50 | this.hostUrl = tl.getEndpointUrl(hostEndpoint, false); 51 | if (this.hostUrl.charAt(this.hostUrl.length - 1) == "/") { 52 | this.hostUrl = this.hostUrl.substring(0, this.hostUrl.length - 1); 53 | } 54 | 55 | this.certsDir = path.join("", ".dockercerts"); 56 | if (!fs.existsSync(this.certsDir)) { 57 | fs.mkdirSync(this.certsDir); 58 | } 59 | 60 | var authDetails = tl.getEndpointAuthorization(hostEndpoint, false).parameters; 61 | 62 | this.caPath = path.join(this.certsDir, "ca.pem"); 63 | fs.writeFileSync(this.caPath, authDetails["cacert"]); 64 | 65 | this.certPath = path.join(this.certsDir, "cert.pem"); 66 | fs.writeFileSync(this.certPath, authDetails["cert"]); 67 | 68 | this.keyPath = path.join(this.certsDir, "key.pem"); 69 | fs.writeFileSync(this.keyPath, authDetails["key"]); 70 | } 71 | 72 | if (registryEndpoint) { 73 | var command = this.createCommand(); 74 | this.registryAuth = tl.getEndpointAuthorization(registryEndpoint, true).parameters; 75 | if (this.registryAuth) { 76 | command.arg("login"); 77 | command.arg(["-u", this.registryAuth["username"]]); 78 | command.arg(["-p", this.registryAuth["password"]]); 79 | command.arg(this.registryAuth["registry"]); 80 | command.execSync(); 81 | this.registryHost = this.registryAuth["registry"]; 82 | } 83 | } 84 | } 85 | 86 | public qualifyImageName(imageName: string) { 87 | if (!imageUtils.hasRegistryComponent(imageName) && this.registryAuth) { 88 | var regUrl = url.parse(this.registryAuth["registry"]), 89 | hostname = !regUrl.slashes ? regUrl.href : regUrl.host; 90 | if (hostname.toLowerCase() !== "index.docker.io") { 91 | imageName = hostname + "/" + imageName; 92 | } 93 | } 94 | return imageName; 95 | } 96 | 97 | public close(): void { 98 | if (this.registryHost) { 99 | var command = this.createCommand(); 100 | command.arg("logout"); 101 | command.arg(this.registryHost); 102 | command.execSync(); 103 | } 104 | if (this.certsDir && fs.existsSync(this.certsDir)) { 105 | del.sync(this.certsDir); 106 | } 107 | } 108 | } 109 | -------------------------------------------------------------------------------- /src/tasks/docker/dockerImageUtils.ts: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | export function hasRegistryComponent(imageName: string): boolean { 4 | var periodIndex = imageName.indexOf("."), 5 | colonIndex = imageName.indexOf(":"), 6 | slashIndex = imageName.indexOf("/"); 7 | return ((periodIndex > 0 && periodIndex < slashIndex) || 8 | (colonIndex > 0 && colonIndex < slashIndex)); 9 | } 10 | 11 | export function imageNameWithoutTag(imageName: string): string { 12 | var endIndex = 0; 13 | if (hasRegistryComponent(imageName)) { 14 | // Contains a registry component that may include ":", so omit 15 | // this part of the name from the main delimiter determination 16 | endIndex = imageName.indexOf("/"); 17 | } 18 | endIndex = imageName.indexOf(":", endIndex); 19 | return endIndex < 0 ? imageName : imageName.substr(0, endIndex); 20 | } 21 | -------------------------------------------------------------------------------- /src/tasks/docker/dockerPush.ts: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | import * as fs from "fs"; 4 | import * as tl from "vsts-task-lib/task"; 5 | import DockerConnection from "./dockerConnection"; 6 | import * as sourceUtils from "./sourceUtils"; 7 | import * as imageUtils from "./dockerImageUtils"; 8 | 9 | function dockerPush(connection: DockerConnection, imageName: string, imageDigestFile?: string): any { 10 | var command = connection.createCommand(); 11 | command.arg("push"); 12 | command.arg(imageName); 13 | 14 | if (!imageDigestFile) { 15 | return connection.execCommand(command); 16 | } 17 | 18 | var output = ""; 19 | command.on("stdout", data => { 20 | output += data; 21 | }); 22 | 23 | return connection.execCommand(command).then(() => { 24 | // Parse the output to find the repository digest 25 | var imageDigest = output.match(/^[^:]*: digest: ([^ ]*) size: \d*$/m)[1]; 26 | if (imageDigest) { 27 | var baseImageName = imageUtils.imageNameWithoutTag(imageName); 28 | fs.writeFileSync(imageDigestFile, baseImageName + "@" + imageDigest); 29 | } 30 | }); 31 | } 32 | 33 | export function run(connection: DockerConnection): any { 34 | var images = []; 35 | var imageName = tl.getInput("imageName", true); 36 | var qualifyImageName = tl.getBoolInput("qualifyImageName"); 37 | if (qualifyImageName) { 38 | imageName = connection.qualifyImageName(imageName); 39 | } 40 | var baseImageName = imageUtils.imageNameWithoutTag(imageName); 41 | 42 | if (baseImageName === imageName) { 43 | images.push(imageName + ":latest"); 44 | } else { 45 | images.push(imageName); 46 | } 47 | 48 | tl.getDelimitedInput("additionalImageTags", "\n").forEach(tag => { 49 | images.push(baseImageName + ":" + tag); 50 | }); 51 | 52 | var includeSourceTags = tl.getBoolInput("includeSourceTags"); 53 | if (includeSourceTags) { 54 | sourceUtils.getSourceTags().forEach(tag => { 55 | images.push(baseImageName + ":" + tag); 56 | }); 57 | } 58 | 59 | var includeLatestTag = tl.getBoolInput("includeLatestTag"); 60 | if (baseImageName !== imageName && includeLatestTag) { 61 | images.push(baseImageName + ":latest"); 62 | } 63 | 64 | var imageDigestFile: string; 65 | if (tl.filePathSupplied("imageDigestFile")) { 66 | imageDigestFile = tl.getPathInput("imageDigestFile"); 67 | } 68 | 69 | var promise = dockerPush(connection, images.shift(), imageDigestFile); 70 | images.forEach(imageName => { 71 | promise = promise.then(() => dockerPush(connection, imageName)); 72 | }); 73 | 74 | return promise; 75 | } 76 | -------------------------------------------------------------------------------- /src/tasks/docker/dockerRun.ts: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | import * as path from "path"; 4 | import * as tl from "vsts-task-lib/task"; 5 | import DockerConnection from "./dockerConnection"; 6 | 7 | export function run(connection: DockerConnection): any { 8 | var command = connection.createCommand(); 9 | command.arg("run"); 10 | 11 | var detached = tl.getBoolInput("detached"); 12 | if (detached) { 13 | command.arg("-d"); 14 | } 15 | 16 | var entrypoint = tl.getInput("entrypoint"); 17 | if (entrypoint) { 18 | command.arg(["--entrypoint", entrypoint]); 19 | } 20 | 21 | tl.getDelimitedInput("envVars", "\n").forEach(envVar => { 22 | command.arg(["-e", envVar]); 23 | }); 24 | 25 | var containerName = tl.getInput("containerName"); 26 | if (containerName) { 27 | command.arg(["--name", containerName]); 28 | } 29 | 30 | tl.getDelimitedInput("ports", "\n").forEach(port => { 31 | command.arg(["-p", port]); 32 | }); 33 | 34 | if (!detached) { 35 | command.arg("--rm"); 36 | } else { 37 | var restartPolicy = { 38 | no: "no", 39 | onFailure: "on-failure", 40 | always: "always", 41 | unlessStopped: "unless-stopped" 42 | }[tl.getInput("restartPolicy")]; 43 | if (restartPolicy) { 44 | if (restartPolicy === "on-failure") { 45 | var restartMaxRetries = tl.getInput("restartMaxRetries"); 46 | if (restartMaxRetries) { 47 | var restartMaxRetriesNum = parseInt(restartMaxRetries, 10); 48 | if (isNaN(restartMaxRetriesNum)) { 49 | throw new Error("Maximum Restart Retries is not a number."); 50 | } 51 | restartPolicy += ":" + restartMaxRetriesNum; 52 | } 53 | } 54 | command.arg(["--restart", restartPolicy]); 55 | } 56 | } 57 | 58 | tl.getDelimitedInput("volumes", "\n").forEach(volume => { 59 | command.arg(["-v", volume]); 60 | }); 61 | 62 | var workDir = tl.getInput("workDir"); 63 | if (workDir) { 64 | command.arg(["-w", workDir]); 65 | } 66 | 67 | var imageName = tl.getInput("imageName", true); 68 | var qualifyImageName = tl.getBoolInput("qualifyImageName"); 69 | if (qualifyImageName) { 70 | imageName = connection.qualifyImageName(imageName); 71 | } 72 | command.arg(imageName); 73 | 74 | var containerCommand = tl.getInput("containerCommand"); 75 | if (containerCommand) { 76 | command.line(containerCommand); 77 | } 78 | 79 | return connection.execCommand(command); 80 | } 81 | -------------------------------------------------------------------------------- /src/tasks/docker/gitUtils.ts: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | import * as cp from "child_process"; 4 | import * as tl from "vsts-task-lib/task"; 5 | 6 | export function tagsAt(commit: string): string[] { 7 | var git = tl.which("git", true); 8 | var args = ["tag", "--points-at", commit]; 9 | var gitDir = tl.getVariable("Build.Repository.LocalPath"); 10 | console.log("[command]" + git + " " + args.join(" ")); 11 | var result = (cp.execFileSync(git, args, { 12 | encoding: "utf8", 13 | cwd: gitDir 14 | }) as string).trim(); 15 | console.log(result); 16 | return result.length ? result.split("\n") : []; 17 | } 18 | -------------------------------------------------------------------------------- /src/tasks/docker/icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/vsts-docker/7b94ccdb581f66968b28efb4b9e4b59dea3e13f8/src/tasks/docker/icon.png -------------------------------------------------------------------------------- /src/tasks/docker/sourceUtils.ts: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | import * as tl from "vsts-task-lib/task"; 4 | import * as gitUtils from "./gitUtils"; 5 | 6 | export function getSourceTags(): string[] { 7 | var tags: string[]; 8 | 9 | var sourceProvider = tl.getVariable("Build.Repository.Provider"); 10 | if (!sourceProvider) { 11 | tl.warning("Cannot retrieve source tags because Build.Repository.Provider is not set."); 12 | return []; 13 | } 14 | if (sourceProvider === "TfsVersionControl") { 15 | // TFVC has no concept of source tags 16 | return []; 17 | } 18 | 19 | var sourceVersion = tl.getVariable("Build.SourceVersion"); 20 | if (!sourceVersion) { 21 | tl.warning("Cannot retrieve source tags because Build.SourceVersion is not set."); 22 | return []; 23 | } 24 | 25 | switch (sourceProvider) { 26 | case "TfsGit": 27 | case "GitHub": 28 | case "Git": 29 | tags = gitUtils.tagsAt(sourceVersion); 30 | break; 31 | case "Subversion": 32 | // TODO: support subversion tags 33 | tl.warning("Retrieving Subversion tags is not currently supported."); 34 | break; 35 | } 36 | 37 | return tags || []; 38 | } 39 | -------------------------------------------------------------------------------- /src/tasks/dockerCompose/dockerCompose.ts: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | import * as tl from "vsts-task-lib/task"; 4 | import DockerComposeConnection from "./dockerComposeConnection"; 5 | 6 | // Change to any specified working directory 7 | tl.cd(tl.getInput("cwd")); 8 | 9 | var dockerComposeFile = tl.getInput("dockerComposeFile", true); 10 | var nopIfNoDockerComposeFile = tl.getBoolInput("nopIfNoDockerComposeFile"); 11 | if (nopIfNoDockerComposeFile && !tl.globFirst(dockerComposeFile)) { 12 | console.log("No Docker Compose file matching " + dockerComposeFile + " was found."); 13 | tl.setResult(tl.TaskResult.Succeeded, ""); 14 | } else { 15 | // Connect to any specified Docker host and/or registry 16 | var connection = new DockerComposeConnection(); 17 | connection.open(tl.getInput("dockerHostEndpoint"), tl.getInput("dockerRegistryEndpoint")) 18 | .then(function runAction() { 19 | // Run the specified action 20 | var action = tl.getInput("action", true); 21 | /* tslint:disable:no-var-requires */ 22 | return require({ 23 | "Build services": "./dockerComposeBuild", 24 | "Push services": "./dockerComposePush", 25 | "Run services": "./dockerComposeUp", 26 | "Run a specific service": "./dockerComposeRun", 27 | "Lock services": "./dockerComposeLock", 28 | "Write service image digests": "./dockerComposeDigests", 29 | "Combine configuration": "./dockerComposeConfig", 30 | "Run a Docker Compose command": "./dockerComposeCommand" 31 | }[action]).run(connection); 32 | /* tslint:enable:no-var-requires */ 33 | }) 34 | .fin(function cleanup() { 35 | connection.close(); 36 | }) 37 | .then(function success() { 38 | tl.setResult(tl.TaskResult.Succeeded, ""); 39 | }, function failure(err) { 40 | tl.setResult(tl.TaskResult.Failed, err.message); 41 | }) 42 | .done(); 43 | } 44 | -------------------------------------------------------------------------------- /src/tasks/dockerCompose/dockerComposeBuild.ts: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | import * as tl from "vsts-task-lib/task"; 4 | import DockerComposeConnection from "./dockerComposeConnection"; 5 | import * as sourceUtils from "./sourceUtils"; 6 | import * as imageUtils from "./dockerImageUtils"; 7 | 8 | function dockerTag(connection: DockerComposeConnection, source: string, target: string) { 9 | var command = connection.createCommand(); 10 | command.arg("tag"); 11 | command.arg(source); 12 | command.arg(target); 13 | return connection.execCommand(command); 14 | } 15 | 16 | function addTag(promise: any, connection: DockerComposeConnection, source: string, target: string) { 17 | if (!promise) { 18 | return dockerTag(connection, source, target); 19 | } else { 20 | return promise.then(() => dockerTag(connection, source, target)); 21 | } 22 | } 23 | 24 | function addOtherTags(connection: DockerComposeConnection, imageName: string): any { 25 | var baseImageName = imageUtils.imageNameWithoutTag(imageName); 26 | 27 | function addAdditionalTags() { 28 | var promise: any; 29 | tl.getDelimitedInput("additionalImageTags", "\n").forEach(tag => { 30 | promise = addTag(promise, connection, imageName, baseImageName + ":" + tag); 31 | }); 32 | return promise; 33 | } 34 | 35 | function addSourceTags() { 36 | var promise: any; 37 | var includeSourceTags = tl.getBoolInput("includeSourceTags"); 38 | if (includeSourceTags) { 39 | sourceUtils.getSourceTags().forEach(tag => { 40 | promise = addTag(promise, connection, imageName, baseImageName + ":" + tag); 41 | }); 42 | } 43 | return promise; 44 | } 45 | 46 | function addLatestTag() { 47 | var includeLatestTag = tl.getBoolInput("includeLatestTag"); 48 | if (baseImageName !== imageName && includeLatestTag) { 49 | return dockerTag(connection, imageName, baseImageName); 50 | } 51 | } 52 | 53 | var promise = addAdditionalTags(); 54 | promise = !promise ? addSourceTags() : promise.then(addSourceTags); 55 | promise = !promise ? addLatestTag() : promise.then(addLatestTag); 56 | 57 | return promise; 58 | } 59 | 60 | export function run(connection: DockerComposeConnection): any { 61 | var command = connection.createComposeCommand(); 62 | command.arg("build"); 63 | return connection.execCommand(command) 64 | .then(() => connection.getImages(true)) 65 | .then(images => { 66 | var promise: any; 67 | Object.keys(images).map(serviceName => images[serviceName]).forEach(imageName => { 68 | if (!promise) { 69 | promise = addOtherTags(connection, imageName); 70 | } else { 71 | promise = promise.then(() => addOtherTags(connection, imageName)); 72 | } 73 | }); 74 | return promise; 75 | }); 76 | } 77 | -------------------------------------------------------------------------------- /src/tasks/dockerCompose/dockerComposeCommand.ts: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | import * as tl from "vsts-task-lib/task"; 4 | import DockerComposeConnection from "./dockerComposeConnection"; 5 | 6 | export function run(connection: DockerComposeConnection): any { 7 | var command = connection.createComposeCommand(); 8 | command.line(tl.getInput("dockerComposeCommand", true)); 9 | return connection.execCommand(command); 10 | } 11 | -------------------------------------------------------------------------------- /src/tasks/dockerCompose/dockerComposeConfig.ts: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | import * as fs from "fs"; 4 | import * as tl from "vsts-task-lib/task"; 5 | import * as yaml from "js-yaml"; 6 | import DockerComposeConnection from "./dockerComposeConnection"; 7 | 8 | export function run(connection: DockerComposeConnection, imageDigestComposeFile?: string): any { 9 | return connection.getCombinedConfig(imageDigestComposeFile).then(output => { 10 | var removeBuildOptions = tl.getBoolInput("removeBuildOptions"); 11 | if (removeBuildOptions) { 12 | var doc = yaml.safeLoad(output); 13 | for (var serviceName in doc.services || {}) { 14 | delete doc.services[serviceName].build; 15 | } 16 | output = yaml.safeDump(doc, {lineWidth: -1} as any); 17 | } 18 | 19 | var baseResolveDir = tl.getPathInput("baseResolveDirectory"); 20 | if (baseResolveDir) { 21 | // This just searches the output string and replaces all 22 | // occurrences of the base resolve directory. This isn't 23 | // precisely accurate but is a good enough solution. 24 | var replaced = output; 25 | do { 26 | output = replaced; 27 | replaced = output.replace(baseResolveDir, "."); 28 | } while (replaced !== output); 29 | } 30 | 31 | var outputDockerComposeFile = tl.getPathInput("outputDockerComposeFile", true); 32 | 33 | fs.writeFileSync(outputDockerComposeFile, output); 34 | }); 35 | } 36 | -------------------------------------------------------------------------------- /src/tasks/dockerCompose/dockerComposeConnection.ts: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | import * as del from "del"; 4 | import * as fs from "fs"; 5 | import * as path from "path"; 6 | import * as tl from "vsts-task-lib/task"; 7 | import * as tr from "vsts-task-lib/toolrunner"; 8 | import * as yaml from "js-yaml"; 9 | import DockerConnection from "./dockerConnection"; 10 | 11 | export default class DockerComposeConnection extends DockerConnection { 12 | private dockerComposePath: string; 13 | private dockerComposeFile: string; 14 | private dockerComposeVersion: string; 15 | private additionalDockerComposeFiles: string[]; 16 | private requireAdditionalDockerComposeFiles: boolean; 17 | private projectName: string; 18 | private finalComposeFile: string; 19 | 20 | constructor() { 21 | super(); 22 | this.dockerComposePath = tl.which("docker-compose", true); 23 | this.dockerComposeFile = tl.globFirst(tl.getInput("dockerComposeFile", true)); 24 | if (!this.dockerComposeFile) { 25 | throw new Error("No Docker Compose file matching " + tl.getInput("dockerComposeFile") + " was found."); 26 | } 27 | this.dockerComposeVersion = "2"; 28 | this.additionalDockerComposeFiles = tl.getDelimitedInput("additionalDockerComposeFiles", "\n"); 29 | this.requireAdditionalDockerComposeFiles = tl.getBoolInput("requireAdditionalDockerComposeFiles"); 30 | this.projectName = tl.getInput("projectName"); 31 | } 32 | 33 | public open(hostEndpoint?: string, registryEndpoint?: string): any { 34 | super.open(hostEndpoint, registryEndpoint); 35 | 36 | if (this.hostUrl) { 37 | process.env["DOCKER_HOST"] = this.hostUrl; 38 | process.env["DOCKER_TLS_VERIFY"] = 1; 39 | process.env["DOCKER_CERT_PATH"] = this.certsDir; 40 | } 41 | 42 | tl.getDelimitedInput("dockerComposeFileArgs", "\n").forEach(envVar => { 43 | var tokens = envVar.split("="); 44 | if (tokens.length < 2) { 45 | throw new Error("Environment variable '" + envVar + "' is invalid."); 46 | } 47 | process.env[tokens[0].trim()] = tokens.slice(1).join("=").trim(); 48 | }); 49 | 50 | return this.getImages(true).then(images => { 51 | var qualifyImageNames = tl.getBoolInput("qualifyImageNames"); 52 | if (!qualifyImageNames) { 53 | return; 54 | } 55 | var agentDirectory = tl.getVariable("Agent.HomeDirectory"); 56 | this.finalComposeFile = path.join(agentDirectory, ".docker-compose." + Date.now() + ".yml"); 57 | var services = {}; 58 | if (qualifyImageNames) { 59 | for (var serviceName in images) { 60 | images[serviceName] = this.qualifyImageName(images[serviceName]); 61 | } 62 | } 63 | for (var serviceName in images) { 64 | services[serviceName] = { 65 | image: images[serviceName] 66 | }; 67 | } 68 | fs.writeFileSync(this.finalComposeFile, yaml.safeDump({ 69 | version: this.dockerComposeVersion, 70 | services: services 71 | }, { lineWidth: -1 } as any)); 72 | }); 73 | } 74 | 75 | public createComposeCommand(): tr.ToolRunner { 76 | var command = tl.tool(this.dockerComposePath); 77 | 78 | command.arg(["-f", this.dockerComposeFile]); 79 | 80 | var basePath = path.dirname(this.dockerComposeFile); 81 | this.additionalDockerComposeFiles.forEach(file => { 82 | // If the path is relative, resolve it 83 | if (!path.isAbsolute(file)) { 84 | file = path.join(basePath, file); 85 | } 86 | if (this.requireAdditionalDockerComposeFiles || tl.exist(file)) { 87 | command.arg(["-f", file]); 88 | } 89 | }); 90 | if (this.finalComposeFile) { 91 | command.arg(["-f", this.finalComposeFile]); 92 | } 93 | 94 | if (this.projectName) { 95 | command.arg(["-p", this.projectName]); 96 | } 97 | 98 | return command; 99 | } 100 | 101 | public getCombinedConfig(imageDigestComposeFile?: string): any { 102 | var command = this.createComposeCommand(); 103 | if (imageDigestComposeFile) { 104 | command.arg(["-f", imageDigestComposeFile]); 105 | } 106 | command.arg("config"); 107 | var result = ""; 108 | command.on("stdout", data => { 109 | result += data; 110 | }); 111 | command.on("errline", line => { 112 | tl.error(line); 113 | }); 114 | return command.exec({ silent: true } as any).then(() => result); 115 | } 116 | 117 | public getImages(builtOnly?: boolean): any { 118 | return this.getCombinedConfig().then(input => { 119 | var doc = yaml.safeLoad(input); 120 | if (doc.version) { 121 | this.dockerComposeVersion = doc.version; 122 | } 123 | var projectName = this.projectName; 124 | if (!projectName) { 125 | projectName = path.basename(path.dirname(this.dockerComposeFile)); 126 | } 127 | var images: any = {}; 128 | for (var serviceName in doc.services || {}) { 129 | var service = doc.services[serviceName]; 130 | var image = service.image; 131 | if (!image) { 132 | image = projectName.toLowerCase().replace(/[^0-9a-z]/g, "") + "_" + serviceName; 133 | } 134 | if (!builtOnly || service.build) { 135 | images[serviceName] = image; 136 | } 137 | } 138 | return images; 139 | }); 140 | } 141 | 142 | public getVersion(): string { 143 | return this.dockerComposeVersion; 144 | } 145 | 146 | public close(): void { 147 | if (this.finalComposeFile && tl.exist(this.finalComposeFile)) { 148 | del.sync(this.finalComposeFile, { force: true }); 149 | } 150 | super.close(); 151 | } 152 | } 153 | -------------------------------------------------------------------------------- /src/tasks/dockerCompose/dockerComposeDigests.ts: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | import * as fs from "fs"; 4 | import * as tl from "vsts-task-lib/task"; 5 | import * as yaml from "js-yaml"; 6 | import DockerComposeConnection from "./dockerComposeConnection"; 7 | import * as imageUtils from "./dockerImageUtils"; 8 | 9 | function dockerPull(connection: DockerComposeConnection, imageName: string, imageDigests: any, serviceName: string) { 10 | var command = connection.createCommand(); 11 | command.arg("pull"); 12 | command.arg(imageName); 13 | 14 | var output = ""; 15 | command.on("stdout", data => { 16 | output += data; 17 | }); 18 | 19 | return connection.execCommand(command).then(() => { 20 | // Parse the output to find the repository digest 21 | var imageDigest = output.match(/^Digest: (.*)$/m)[1]; 22 | if (imageDigest) { 23 | var baseImageName = imageUtils.imageNameWithoutTag(imageName); 24 | imageDigests[serviceName] = baseImageName + "@" + imageDigest; 25 | } 26 | }); 27 | } 28 | 29 | function writeImageDigestComposeFile(version: string, imageDigests: any, imageDigestComposeFile: string): void { 30 | var services = {}; 31 | Object.keys(imageDigests).forEach(serviceName => { 32 | services[serviceName] = { 33 | image: imageDigests[serviceName] 34 | }; 35 | }); 36 | fs.writeFileSync(imageDigestComposeFile, yaml.safeDump({ 37 | version: version, 38 | services: services 39 | }, { lineWidth: -1 } as any)); 40 | } 41 | 42 | export function createImageDigestComposeFile(connection: DockerComposeConnection, imageDigestComposeFile: string) { 43 | return connection.getImages().then(images => { 44 | var promise: any; 45 | var version = connection.getVersion(); 46 | var imageDigests = {}; 47 | Object.keys(images).forEach(serviceName => { 48 | (imageName => { 49 | if (!promise) { 50 | promise = dockerPull(connection, imageName, imageDigests, serviceName); 51 | } else { 52 | promise = promise.then(() => dockerPull(connection, imageName, imageDigests, serviceName)); 53 | } 54 | })(images[serviceName]); 55 | }); 56 | if (!promise) { 57 | writeImageDigestComposeFile(version, imageDigests, imageDigestComposeFile); 58 | } else { 59 | return promise.then(() => writeImageDigestComposeFile(version, imageDigests, imageDigestComposeFile)); 60 | } 61 | }); 62 | } 63 | 64 | export function run(connection: DockerComposeConnection): any { 65 | var imageDigestComposeFile = tl.getPathInput("imageDigestComposeFile", true); 66 | return createImageDigestComposeFile(connection, imageDigestComposeFile); 67 | } 68 | -------------------------------------------------------------------------------- /src/tasks/dockerCompose/dockerComposeLock.ts: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | import * as del from "del"; 4 | import * as path from "path"; 5 | import * as tl from "vsts-task-lib/task"; 6 | import DockerComposeConnection from "./dockerComposeConnection"; 7 | import { createImageDigestComposeFile } from "./dockerComposeDigests"; 8 | import { run as runDockerComposeConfig } from "./dockerComposeConfig"; 9 | 10 | export function run(connection: DockerComposeConnection): any { 11 | var agentDirectory = tl.getVariable("Agent.HomeDirectory"), 12 | imageDigestComposeFile = path.join(agentDirectory, ".docker-compose.images" + Date.now() + ".yml"); 13 | return createImageDigestComposeFile(connection, imageDigestComposeFile) 14 | .then(() => runDockerComposeConfig(connection, imageDigestComposeFile)) 15 | .fin(() => { 16 | if (tl.exist(imageDigestComposeFile)) { 17 | del.sync(imageDigestComposeFile, { force: true }); 18 | } 19 | }); 20 | } 21 | -------------------------------------------------------------------------------- /src/tasks/dockerCompose/dockerComposePush.ts: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | import * as tl from "vsts-task-lib/task"; 4 | import DockerComposeConnection from "./dockerComposeConnection"; 5 | import * as sourceUtils from "./sourceUtils"; 6 | import * as imageUtils from "./dockerImageUtils"; 7 | 8 | function dockerPush(connection: DockerComposeConnection, imageName: string) { 9 | var command = connection.createCommand(); 10 | command.arg("push"); 11 | command.arg(imageName); 12 | return connection.execCommand(command); 13 | } 14 | 15 | function pushTag(promise: any, connection: DockerComposeConnection, imageName: string) { 16 | if (!promise) { 17 | return dockerPush(connection, imageName); 18 | } else { 19 | return promise.then(() => dockerPush(connection, imageName)); 20 | } 21 | } 22 | 23 | function pushTags(connection: DockerComposeConnection, imageName: string): any { 24 | var baseImageName = imageUtils.imageNameWithoutTag(imageName); 25 | var builtImageName = imageName + (baseImageName === imageName ? ":latest" : ""); 26 | return dockerPush(connection, builtImageName) 27 | .then(function pushAdditionalTags() { 28 | var promise: any; 29 | tl.getDelimitedInput("additionalImageTags", "\n").forEach(tag => { 30 | promise = pushTag(promise, connection, baseImageName + ":" + tag); 31 | }); 32 | return promise; 33 | }) 34 | .then(function pushSourceTags() { 35 | var promise: any; 36 | var includeSourceTags = tl.getBoolInput("includeSourceTags"); 37 | if (includeSourceTags) { 38 | sourceUtils.getSourceTags().forEach(tag => { 39 | promise = pushTag(promise, connection, baseImageName + ":" + tag); 40 | }); 41 | } 42 | return promise; 43 | }) 44 | .then(function pushLatestTag() { 45 | var includeLatestTag = tl.getBoolInput("includeLatestTag"); 46 | if (baseImageName !== imageName && includeLatestTag) { 47 | return dockerPush(connection, baseImageName + ":latest"); 48 | } 49 | }); 50 | } 51 | 52 | export function run(connection: DockerComposeConnection): any { 53 | return connection.getImages(true) 54 | .then(images => { 55 | var promise: any; 56 | Object.keys(images).forEach(serviceName => { 57 | (imageName => { 58 | if (!promise) { 59 | promise = pushTags(connection, imageName); 60 | } else { 61 | promise = promise.then(() => pushTags(connection, imageName)); 62 | } 63 | })(images[serviceName]); 64 | }); 65 | return promise; 66 | }); 67 | } 68 | -------------------------------------------------------------------------------- /src/tasks/dockerCompose/dockerComposeRun.ts: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | import * as tl from "vsts-task-lib/task"; 4 | import DockerComposeConnection from "./dockerComposeConnection"; 5 | 6 | export function run(connection: DockerComposeConnection): any { 7 | var command = connection.createComposeCommand(); 8 | command.arg("run"); 9 | 10 | var detached = tl.getBoolInput("detached"); 11 | if (detached) { 12 | command.arg("-d"); 13 | } 14 | 15 | var entrypoint = tl.getInput("entrypoint"); 16 | if (entrypoint) { 17 | command.arg(["--entrypoint", entrypoint]); 18 | } 19 | 20 | var containerName = tl.getInput("containerName"); 21 | if (containerName) { 22 | command.arg(["--name", containerName]); 23 | } 24 | 25 | tl.getDelimitedInput("ports", "\n").forEach(port => { 26 | command.arg(["-p", port]); 27 | }); 28 | 29 | if (!detached) { 30 | command.arg("--rm"); 31 | } 32 | 33 | command.arg("-T"); 34 | 35 | var workDir = tl.getInput("workDir"); 36 | if (workDir) { 37 | command.arg(["-w", workDir]); 38 | } 39 | 40 | var serviceName = tl.getInput("serviceName", true); 41 | command.arg(serviceName); 42 | 43 | var containerCommand = tl.getInput("containerCommand"); 44 | if (containerCommand) { 45 | command.line(containerCommand); 46 | } 47 | 48 | var promise = connection.execCommand(command); 49 | 50 | if (!detached) { 51 | promise = promise.fin(() => { 52 | var downCommand = connection.createComposeCommand(); 53 | downCommand.arg("down"); 54 | return connection.execCommand(downCommand); 55 | }); 56 | } 57 | 58 | return promise; 59 | } 60 | -------------------------------------------------------------------------------- /src/tasks/dockerCompose/dockerComposeUp.ts: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | import * as tl from "vsts-task-lib/task"; 4 | import DockerComposeConnection from "./dockerComposeConnection"; 5 | 6 | export function run(connection: DockerComposeConnection): any { 7 | var command = connection.createComposeCommand(); 8 | command.arg("up"); 9 | 10 | var detached = tl.getBoolInput("detached"); 11 | if (detached) { 12 | command.arg("-d"); 13 | } 14 | 15 | var buildImages = tl.getBoolInput("buildImages"); 16 | if (buildImages) { 17 | command.arg("--build"); 18 | } 19 | 20 | var abortOnContainerExit = tl.getBoolInput("abortOnContainerExit"); 21 | if (!detached && abortOnContainerExit) { 22 | command.arg("--abort-on-container-exit"); 23 | } 24 | 25 | return connection.execCommand(command); 26 | } 27 | -------------------------------------------------------------------------------- /src/tasks/dockerCompose/dockerConnection.ts: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | import * as del from "del"; 4 | import * as fs from "fs"; 5 | import * as path from "path"; 6 | import * as url from "url"; 7 | import * as tl from "vsts-task-lib/task"; 8 | import * as tr from "vsts-task-lib/toolrunner"; 9 | import * as imageUtils from "./dockerImageUtils"; 10 | 11 | export default class DockerConnection { 12 | private dockerPath: string; 13 | protected hostUrl: string; 14 | protected certsDir: string; 15 | private caPath: string; 16 | private certPath: string; 17 | private keyPath: string; 18 | private registryAuth: { [key: string]: string }; 19 | private registryHost: string; 20 | 21 | constructor() { 22 | this.dockerPath = tl.which("docker", true); 23 | } 24 | 25 | public createCommand(): tr.ToolRunner { 26 | var command = tl.tool(this.dockerPath); 27 | if (this.hostUrl) { 28 | command.arg(["-H", this.hostUrl]); 29 | command.arg("--tls"); 30 | command.arg("--tlscacert='" + this.caPath + "'"); 31 | command.arg("--tlscert='" + this.certPath + "'"); 32 | command.arg("--tlskey='" + this.keyPath + "'"); 33 | } 34 | return command; 35 | } 36 | 37 | public execCommand(command: tr.ToolRunner, options?: tr.IExecOptions) { 38 | var errlines = []; 39 | command.on("errline", line => { 40 | errlines.push(line); 41 | }); 42 | return command.exec(options).fail(error => { 43 | errlines.forEach(line => tl.error(line)); 44 | throw error; 45 | }); 46 | } 47 | 48 | public open(hostEndpoint?: string, registryEndpoint?: string): void { 49 | if (hostEndpoint) { 50 | this.hostUrl = tl.getEndpointUrl(hostEndpoint, false); 51 | if (this.hostUrl.charAt(this.hostUrl.length - 1) == "/") { 52 | this.hostUrl = this.hostUrl.substring(0, this.hostUrl.length - 1); 53 | } 54 | 55 | this.certsDir = path.join("", ".dockercerts"); 56 | if (!fs.existsSync(this.certsDir)) { 57 | fs.mkdirSync(this.certsDir); 58 | } 59 | 60 | var authDetails = tl.getEndpointAuthorization(hostEndpoint, false).parameters; 61 | 62 | this.caPath = path.join(this.certsDir, "ca.pem"); 63 | fs.writeFileSync(this.caPath, authDetails["cacert"]); 64 | 65 | this.certPath = path.join(this.certsDir, "cert.pem"); 66 | fs.writeFileSync(this.certPath, authDetails["cert"]); 67 | 68 | this.keyPath = path.join(this.certsDir, "key.pem"); 69 | fs.writeFileSync(this.keyPath, authDetails["key"]); 70 | } 71 | 72 | if (registryEndpoint) { 73 | var command = this.createCommand(); 74 | this.registryAuth = tl.getEndpointAuthorization(registryEndpoint, true).parameters; 75 | if (this.registryAuth) { 76 | command.arg("login"); 77 | command.arg(["-u", this.registryAuth["username"]]); 78 | command.arg(["-p", this.registryAuth["password"]]); 79 | command.arg(this.registryAuth["registry"]); 80 | command.execSync(); 81 | this.registryHost = this.registryAuth["registry"]; 82 | } 83 | } 84 | } 85 | 86 | public qualifyImageName(imageName: string) { 87 | if (!imageUtils.hasRegistryComponent(imageName) && this.registryAuth) { 88 | var regUrl = url.parse(this.registryAuth["registry"]), 89 | hostname = !regUrl.slashes ? regUrl.href : regUrl.host; 90 | if (hostname.toLowerCase() !== "index.docker.io") { 91 | imageName = hostname + "/" + imageName; 92 | } 93 | } 94 | return imageName; 95 | } 96 | 97 | public close(): void { 98 | if (this.registryHost) { 99 | var command = this.createCommand(); 100 | command.arg("logout"); 101 | command.arg(this.registryHost); 102 | command.execSync(); 103 | } 104 | if (this.certsDir && fs.existsSync(this.certsDir)) { 105 | del.sync(this.certsDir); 106 | } 107 | } 108 | } 109 | -------------------------------------------------------------------------------- /src/tasks/dockerCompose/dockerImageUtils.ts: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | export function hasRegistryComponent(imageName: string): boolean { 4 | var periodIndex = imageName.indexOf("."), 5 | colonIndex = imageName.indexOf(":"), 6 | slashIndex = imageName.indexOf("/"); 7 | return ((periodIndex > 0 && periodIndex < slashIndex) || 8 | (colonIndex > 0 && colonIndex < slashIndex)); 9 | } 10 | 11 | export function imageNameWithoutTag(imageName: string): string { 12 | var endIndex = 0; 13 | if (hasRegistryComponent(imageName)) { 14 | // Contains a registry component that may include ":", so omit 15 | // this part of the name from the main delimiter determination 16 | endIndex = imageName.indexOf("/"); 17 | } 18 | endIndex = imageName.indexOf(":", endIndex); 19 | return endIndex < 0 ? imageName : imageName.substr(0, endIndex); 20 | } 21 | -------------------------------------------------------------------------------- /src/tasks/dockerCompose/gitUtils.ts: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | import * as cp from "child_process"; 4 | import * as tl from "vsts-task-lib/task"; 5 | 6 | export function tagsAt(commit: string): string[] { 7 | var git = tl.which("git", true); 8 | var args = ["tag", "--points-at", commit]; 9 | var gitDir = tl.getVariable("Build.Repository.LocalPath"); 10 | console.log("[command]" + git + " " + args.join(" ")); 11 | var result = (cp.execFileSync(git, args, { 12 | encoding: "utf8", 13 | cwd: gitDir 14 | }) as string).trim(); 15 | console.log(result); 16 | return result.length ? result.split("\n") : []; 17 | } 18 | -------------------------------------------------------------------------------- /src/tasks/dockerCompose/icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/vsts-docker/7b94ccdb581f66968b28efb4b9e4b59dea3e13f8/src/tasks/dockerCompose/icon.png -------------------------------------------------------------------------------- /src/tasks/dockerCompose/sourceUtils.ts: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | import * as tl from "vsts-task-lib/task"; 4 | import * as gitUtils from "./gitUtils"; 5 | 6 | export function getSourceTags(): string[] { 7 | var tags: string[]; 8 | 9 | var sourceProvider = tl.getVariable("Build.Repository.Provider"); 10 | if (!sourceProvider) { 11 | tl.warning("Cannot retrieve source tags because Build.Repository.Provider is not set."); 12 | return []; 13 | } 14 | if (sourceProvider === "TfsVersionControl") { 15 | // TFVC has no concept of source tags 16 | return []; 17 | } 18 | 19 | var sourceVersion = tl.getVariable("Build.SourceVersion"); 20 | if (!sourceVersion) { 21 | tl.warning("Cannot retrieve source tags because Build.SourceVersion is not set."); 22 | return []; 23 | } 24 | 25 | switch (sourceProvider) { 26 | case "TfsGit": 27 | case "GitHub": 28 | case "Git": 29 | tags = gitUtils.tagsAt(sourceVersion); 30 | break; 31 | case "Subversion": 32 | // TODO: support subversion tags 33 | tl.warning("Retrieving Subversion tags is not currently supported."); 34 | break; 35 | } 36 | 37 | return tags || []; 38 | } 39 | -------------------------------------------------------------------------------- /src/tasks/dockerDeploy/acs-dcos/Dockerfile.task: -------------------------------------------------------------------------------- 1 | FROM python:2.7-onbuild 2 | ENTRYPOINT [ "python" ] 3 | -------------------------------------------------------------------------------- /src/tasks/dockerDeploy/acs-dcos/acsclient.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import socket 4 | import subprocess 5 | import time 6 | from StringIO import StringIO 7 | 8 | import paramiko 9 | import requests 10 | from sshtunnel import SSHTunnelForwarder 11 | 12 | 13 | class ACSClient(object): 14 | """ 15 | Class for connecting to the ACS cluster and making requests 16 | """ 17 | current_tunnel = () 18 | # Max wait time (seconds) for tunnel to be established 19 | max_wait_time = 5 * 60 20 | 21 | def __init__(self, acs_info): 22 | self.acs_info = acs_info 23 | self.tunnel_server = None 24 | self.is_direct = False 25 | self.is_running = False 26 | 27 | # If master_url is provided, we have a direct connection 28 | if self.acs_info.master_url: 29 | logging.debug('Using Direct connection') 30 | self.is_direct = True 31 | else: 32 | logging.debug('Using SSH connection') 33 | 34 | def shutdown(self): 35 | """ 36 | Stops the tunnel if its started 37 | """ 38 | if self.current_tunnel and self.is_running: 39 | logging.debug('Stopping SSH tunnel') 40 | self.current_tunnel[0].stop() 41 | self.is_running = False 42 | 43 | def _wait_for_tunnel(self, start_time, url): 44 | """ 45 | Waits until the SSH tunnel is available and 46 | we can start sending requests through it 47 | """ 48 | succeeded = False 49 | while not time.time() - start_time > self.max_wait_time: 50 | try: 51 | response = requests.get(url) 52 | if response.status_code == 200: 53 | succeeded = True 54 | self.is_running = True 55 | break 56 | except: 57 | time.sleep(5) 58 | 59 | if not succeeded: 60 | raise Exception( 61 | 'Could not establish connection to "{}".'.format( 62 | self.acs_info.host)) 63 | 64 | def _get_private_key(self): 65 | """ 66 | Creates an RSAKey instance from provided private key string 67 | and password 68 | """ 69 | if not self.acs_info.private_key: 70 | raise Exception('Private key was not provided') 71 | private_key_file = StringIO() 72 | private_key_file.write(self.acs_info.private_key) 73 | private_key_file.seek(0) 74 | return paramiko.RSAKey.from_private_key(private_key_file, self.acs_info.password) 75 | 76 | def ensure_dcos_version(self): 77 | """ 78 | Ensures min DC/OS version is installed on the cluster 79 | """ 80 | min_dcos_version_str = '1.8.4' 81 | min_dcos_version_tuple = map(int, (min_dcos_version_str.split('.'))) 82 | path = '/dcos-metadata/dcos-version.json' 83 | version_json = self.get_request(path).json() 84 | 85 | if not 'version' in version_json: 86 | raise Exception('Could not determine DC/OS version from %s', path) 87 | 88 | version_str = version_json['version'] 89 | logging.info('Found DC/OS version %s', version_str) 90 | version_tuple = map(int, (version_str.split('.'))) 91 | if version_tuple < min_dcos_version_tuple: 92 | err_msg = 'DC/OS version %s is not supported. Only DC/OS version "%s" \ 93 | or higher is supported' % (version_str, min_dcos_version_str) 94 | logging.error(err_msg) 95 | raise ValueError(err_msg) 96 | return True 97 | 98 | def _setup_tunnel_server(self, server_port): 99 | """ 100 | Gets the local port to access the tunnel running on 101 | the server_port 102 | """ 103 | if self.is_direct: 104 | return server_port 105 | 106 | if not self.current_tunnel: 107 | logging.debug('Create a new SSH tunnel') 108 | local_port = self.get_available_local_port() 109 | log = logging.getLogger() 110 | previous_log_level = log.level 111 | log.setLevel(logging.INFO) 112 | 113 | forwarder = SSHTunnelForwarder( 114 | ssh_address_or_host=(self.acs_info.host, int(self.acs_info.port)), 115 | ssh_username=self.acs_info.username, 116 | ssh_pkey=self._get_private_key(), 117 | remote_bind_address=('localhost', server_port), 118 | local_bind_address=('0.0.0.0', int(local_port)), 119 | logger=log) 120 | forwarder.start() 121 | 122 | start_time = time.time() 123 | url = 'http://127.0.0.1:{}/'.format(str(local_port)) 124 | self._wait_for_tunnel(start_time, url) 125 | 126 | self.current_tunnel = (forwarder, int(local_port)) 127 | log.setLevel(previous_log_level) 128 | 129 | return self.current_tunnel[1] 130 | 131 | def create_request_url(self, path, port): 132 | """ 133 | Creates the request URL from provided path. Depending on which 134 | connection type was picked, it will create an SSH tunnel 135 | """ 136 | local_port = self._setup_tunnel_server(port) 137 | if self.is_direct: 138 | url = '{}:{}/{}'.format(self.acs_info.master_url, local_port, path) 139 | else: 140 | url = 'http://127.0.0.1:{}/{}'.format(str(local_port), path) 141 | return url 142 | 143 | def make_request(self, path, method, data=None, port=80, **kwargs): 144 | """ 145 | Makes an HTTP request with specified method 146 | """ 147 | url = self.create_request_url(path, port) 148 | logging.debug('%s: %s (DATA=%s)', method, url, data) 149 | 150 | if not hasattr(requests, method): 151 | raise Exception('Invalid method {}'.format(method)) 152 | 153 | method_to_call = getattr(requests, method) 154 | headers = {'content-type': 'application/json'} 155 | 156 | if not data: 157 | response = method_to_call( 158 | url, headers=headers, **kwargs) 159 | else: 160 | response = method_to_call( 161 | url, data, headers=headers, **kwargs) 162 | 163 | if response.status_code > 400: 164 | raise Exception('Call to "%s" failed with: %s', url, response.text) 165 | return response 166 | 167 | def get_request(self, path): 168 | """ 169 | Makes a GET request to an endpoint (localhost:80 on the cluster) 170 | :param path: Path part of the URL to make the request to 171 | :type path: String 172 | """ 173 | return self.make_request(path, 'get') 174 | 175 | def delete_request(self, path): 176 | """ 177 | Makes a DELETE request to an endpoint (localhost:80 on the cluster) 178 | :param path: Path part of the URL to make the request to 179 | :type path: String 180 | """ 181 | return self.make_request(path, 'delete') 182 | 183 | def post_request(self, path, post_data): 184 | """ 185 | Makes a POST request to an endpoint (localhost:80 on the cluster) 186 | :param path: Path part of the URL to make the request to 187 | :type path: String 188 | """ 189 | return self.make_request(path, 'post', data=post_data) 190 | 191 | def put_request(self, path, put_data=None, **kwargs): 192 | """ 193 | Makes a POST request to Marathon endpoint (localhost:80 on the cluster) 194 | :param path: Path part of the URL to make the request to 195 | :type path: String 196 | """ 197 | return self.make_request(path, 'put', data=put_data, **kwargs) 198 | 199 | def get_available_local_port(self): 200 | """ 201 | Gets a random, available local port 202 | """ 203 | sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 204 | sock.bind(('', 0)) 205 | sock.listen(1) 206 | port = sock.getsockname()[1] 207 | sock.close() 208 | return port 209 | -------------------------------------------------------------------------------- /src/tasks/dockerDeploy/acs-dcos/acsinfo.py: -------------------------------------------------------------------------------- 1 | class AcsInfo(object): 2 | """ 3 | Holds info about the ACS cluster 4 | """ 5 | def __init__(self, host, port, username, password, private_key, master_url): 6 | self.host = host 7 | self.port = port 8 | self.username = username 9 | self.password = password 10 | self.private_key = private_key 11 | self.master_url = master_url 12 | -------------------------------------------------------------------------------- /src/tasks/dockerDeploy/acs-dcos/conf/exhibitor-data.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "/exhibitor-data", 3 | "cpus": 0.01, 4 | "mem": 32, 5 | "instances": 1, 6 | "acceptedResourceRoles": [ 7 | "slave_public" 8 | ], 9 | "container": { 10 | "type": "DOCKER", 11 | "docker": { 12 | "image": "openresty/openresty:alpine", 13 | "network": "BRIDGE", 14 | "portMappings": [ 15 | { 16 | "protocol": "tcp", 17 | "hostPort": 0, 18 | "containerPort": 80, 19 | "labels": { 20 | "VIP_0": "exhibitor-data:80" 21 | } 22 | } 23 | ] 24 | } 25 | }, 26 | "cmd": "cat << EOF > /usr/local/openresty/nginx/conf/nginx.conf && exec /usr/local/openresty/bin/openresty -g 'daemon off;'\nworker_processes 1;\nevents {\n worker_connections 1024;\n}\nhttp {\n upstream backend {\n server leader.mesos;\n }\n server {\n listen 80;\n location = / {\n proxy_pass http://backend/exhibitor/exhibitor/v1/cluster/status;\n }\n location ~ /_/(.*)$ {\n internal;\n proxy_pass http://backend/exhibitor/exhibitor/v1/explorer/node-data?key=/\\$1;\n }\n location ~ /(.*)$ {\n content_by_lua_block {\n local cjson = require \"cjson\"\n local resp = ngx.location.capture(\"/_/\" .. ngx.var[1])\n local bytes = cjson.decode(resp.body).bytes\n bytes = string.gsub(bytes, \" \", \"\")\n if string.len(bytes) == 0 then\n ngx.status = 404\n ngx.exit(404)\n end\n for i = 1, string.len(bytes), 2 do\n ngx.print(string.char(tonumber(string.sub(bytes, i, i + 1), 16)))\n end\n }\n }\n }\n}", 27 | "healthChecks": [ 28 | { 29 | "path": "/", 30 | "protocol": "HTTP", 31 | "portIndex": 0, 32 | "gracePeriodSeconds": 300, 33 | "intervalSeconds": 5, 34 | "timeoutSeconds": 20, 35 | "maxConsecutiveFailures": 3, 36 | "ignoreHttp1xx": false 37 | } 38 | ] 39 | } -------------------------------------------------------------------------------- /src/tasks/dockerDeploy/acs-dcos/conf/external-nginx-lb.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "/external-nginx-lb", 3 | "cpus": 0.1, 4 | "mem": 128, 5 | "instances": 1, 6 | "acceptedResourceRoles": [ 7 | "slave_public" 8 | ], 9 | "container": { 10 | "type": "DOCKER", 11 | "docker": { 12 | "image": "nginx:alpine", 13 | "network": "HOST" 14 | } 15 | }, 16 | "ports": [ 17 | 80, 18 | 443 19 | ], 20 | "requirePorts": true, 21 | "cmd": "cat << EOF > /etc/nginx/nginx.conf && exec nginx -g 'daemon off;'\nworker_processes 1;\nevents {\n worker_connections 1024;\n}\nhttp {\n server {\n listen 80;\n resolver $(echo $(cat /etc/resolv.conf | grep ^nameserver\\ | cut -d ' ' -f 2));\n location / {\n proxy_set_header Host \\$host;\n proxy_pass http://\\$host.${GROUP:-external}.marathon.l4lb.thisdcos.directory\\$request_uri;\n }\n }\n}", 22 | "healthChecks": [ 23 | { 24 | "protocol": "TCP", 25 | "gracePeriodSeconds": 60, 26 | "intervalSeconds": 5, 27 | "timeoutSeconds": 2, 28 | "maxConsecutiveFailures": 2 29 | } 30 | ], 31 | "upgradeStrategy": { 32 | "minimumHealthCapacity": 0.5, 33 | "maximumOverCapacity": 0.2 34 | } 35 | } -------------------------------------------------------------------------------- /src/tasks/dockerDeploy/acs-dcos/createmarathon.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import logging 3 | import sys 4 | import traceback 5 | 6 | import dockercomposeparser 7 | 8 | 9 | class VstsLogFormatter(logging.Formatter): 10 | error_format = logging.Formatter('##[error]%(message)s') 11 | warning_format = logging.Formatter('##[warning]%(message)s') 12 | debug_format = logging.Formatter('##[debug]%(message)s') 13 | default_format = logging.Formatter('%(message)s') 14 | 15 | def format(self, record): 16 | if record.levelno == logging.ERROR: 17 | return self.error_format.format(record) 18 | elif record.levelno == logging.WARNING: 19 | return self.warning_format.format(record) 20 | elif record.levelno == logging.DEBUG: 21 | return self.debug_format.format(record) 22 | return self.default_format.format(record) 23 | 24 | def get_arg_parser(): 25 | """ 26 | Sets up the argument parser 27 | """ 28 | parser = argparse.ArgumentParser( 29 | description='Translate docker-compose.yml file to marathon.json file', 30 | formatter_class=argparse.ArgumentDefaultsHelpFormatter) 31 | 32 | parser.add_argument('--compose-file', 33 | help='[required] Docker-compose.yml file') 34 | parser.add_argument('--dcos-master-url', 35 | help='DC/OS master URL') 36 | 37 | parser.add_argument('--group-name', 38 | help='[required] Application group name') 39 | parser.add_argument('--group-qualifier', 40 | help='[required] Application group qualifier') 41 | parser.add_argument('--group-version', 42 | help='[required] Application group version') 43 | parser.add_argument('--minimum-health-capacity', type=int, 44 | help='[required] Minimum health capacity') 45 | 46 | parser.add_argument('--registry-host', 47 | help='Registry host (e.g. myregistry.azurecr-test.io:1234)') 48 | parser.add_argument('--registry-username', 49 | help='Registry username') 50 | parser.add_argument('--registry-password', 51 | help='Registry password') 52 | 53 | parser.add_argument('--acs-host', 54 | help='ACS host') 55 | parser.add_argument('--acs-port', 56 | help='ACS username') 57 | parser.add_argument('--acs-username', 58 | help='ACS username') 59 | parser.add_argument('--acs-password', 60 | help='ACS password') 61 | parser.add_argument('--acs-private-key', 62 | help='ACS private key') 63 | 64 | parser.add_argument('--verbose', 65 | help='Turn on verbose logging', 66 | action='store_true') 67 | return parser 68 | 69 | def process_arguments(): 70 | """ 71 | Makes sure required arguments are provided 72 | """ 73 | arg_parser = get_arg_parser() 74 | args = arg_parser.parse_args() 75 | 76 | if args.compose_file is None: 77 | arg_parser.error('argument --compose-file is required') 78 | if args.group_name is None: 79 | arg_parser.error('argument --group-name is required') 80 | if args.group_qualifier is None: 81 | arg_parser.error('argument --group-qualifier is required') 82 | if args.group_version is None: 83 | arg_parser.error('argument --group-version is required') 84 | if args.minimum_health_capacity is None: 85 | arg_parser.error('argument --minimum-health-capacity is required') 86 | return args 87 | 88 | def init_logger(verbose): 89 | """ 90 | Initializes the logger and sets the custom formatter for VSTS 91 | """ 92 | logging_level = logging.DEBUG if verbose else logging.INFO 93 | vsts_formatter = VstsLogFormatter() 94 | stream_handler = logging.StreamHandler() 95 | stream_handler.setFormatter(vsts_formatter) 96 | logging.root.name = 'ACS-Deploy' 97 | logging.root.setLevel(logging_level) 98 | logging.root.addHandler(stream_handler) 99 | 100 | # Don't show INFO log messages from requests library 101 | logging.getLogger("requests").setLevel(logging.WARNING) 102 | 103 | if __name__ == '__main__': 104 | arguments = process_arguments() 105 | init_logger(arguments.verbose) 106 | try: 107 | with dockercomposeparser.DockerComposeParser( 108 | arguments.compose_file, arguments.dcos_master_url, arguments.acs_host, 109 | arguments.acs_port, arguments.acs_username, arguments.acs_password, 110 | arguments.acs_private_key, arguments.group_name, arguments.group_qualifier, 111 | arguments.group_version, arguments.registry_host, arguments.registry_username, 112 | arguments.registry_password, arguments.minimum_health_capacity, 113 | check_dcos_version=True) as compose_parser: 114 | compose_parser.deploy() 115 | sys.exit(0) 116 | except Exception as deployment_exc: 117 | logging.error('Error occurred during deployment: %s', deployment_exc) 118 | sys.exit(1) 119 | -------------------------------------------------------------------------------- /src/tasks/dockerDeploy/acs-dcos/dockerregistry.py: -------------------------------------------------------------------------------- 1 | import hexifier 2 | from exhibitor import Exhibitor 3 | 4 | 5 | class DockerRegistry(object): 6 | """ 7 | Class for working with Docker registry 8 | """ 9 | def __init__(self, registry_host, registry_username, registry_password, marathon_helper): 10 | self.registry_host = registry_host 11 | self.registry_username = registry_username 12 | self.registry_password = registry_password 13 | self.marathon_helper = marathon_helper 14 | self.exhibitor_helper = Exhibitor(marathon_helper) 15 | 16 | def get_registry_auth_url(self): 17 | """ 18 | Handles creating the exhibitor-data service, docker.tar.gz and returns 19 | the URL to the docker.tar.gz that can be set as a URI on marathon app 20 | """ 21 | # If registry_host is not set, we assume we don't need the auth URL 22 | if not self.registry_host: 23 | return None 24 | 25 | self.marathon_helper.ensure_exists(Exhibitor.APP_ID, Exhibitor.JSON_FILE) 26 | auth_config_hexifier = hexifier.DockerAuthConfigHexifier( 27 | self.registry_host, self.registry_username, self.registry_password) 28 | 29 | hex_string = auth_config_hexifier.hexify() 30 | endpoint = 'registries/{}'.format(auth_config_hexifier.get_auth_file_path()) 31 | return self.exhibitor_helper.upload(hex_string, endpoint) 32 | -------------------------------------------------------------------------------- /src/tasks/dockerDeploy/acs-dcos/exhibitor.py: -------------------------------------------------------------------------------- 1 | 2 | class Exhibitor(object): 3 | """ 4 | Functionality for interacting with exhbitior 5 | """ 6 | APP_ID = '/exhibitor-data' 7 | HOST_NAME = 'exhibitor-data.marathon.l4lb.thisdcos.directory' 8 | JSON_FILE = 'conf/exhibitor-data.json' 9 | 10 | def __init__(self, marathon_helper): 11 | self.marathon_helper = marathon_helper 12 | 13 | def upload(self, hex_string, endpoint): 14 | """ 15 | Uploads a hexified string to provided exhibitor endpoint 16 | and returns the full URL to it 17 | """ 18 | self.marathon_helper.put_request( 19 | endpoint, 20 | put_data=hex_string, 21 | endpoint='/exhibitor/exhibitor/v1/explorer/znode') 22 | 23 | return 'http://{}/{}'.format( 24 | Exhibitor.HOST_NAME, endpoint) 25 | -------------------------------------------------------------------------------- /src/tasks/dockerDeploy/acs-dcos/healthcheck.py: -------------------------------------------------------------------------------- 1 | 2 | import json 3 | 4 | class HealthCheck(object): 5 | PATH_LABEL = 'com.microsoft.acs.dcos.marathon.healthcheck.path' 6 | PORT_INDEX_LABEL = 'com.microsoft.acs.dcos.marathon.healthcheck.portindex' 7 | COMMAND_LABEL = 'com.microsoft.acs.dcos.marathon.healthcheck.command' 8 | HEALTH_CHECK_LABEL = 'com.microsoft.acs.dcos.marathon.healthcheck' 9 | HEALTH_CHECKS_LABEL = 'com.microsoft.acs.dcos.marathon.healthchecks' 10 | 11 | def __init__(self, labels): 12 | if labels is None: 13 | raise ValueError('Labels cannot be empty') 14 | self.labels = labels 15 | 16 | @staticmethod 17 | def get_default_health_check_config(): 18 | """ 19 | Gets the default (TCP) healthcheck config for Marathon 20 | """ 21 | return { 22 | 'portIndex': 0, 23 | 'protocol': 'TCP', 24 | 'gracePeriodSeconds': 300, 25 | 'intervalSeconds': 5, 26 | 'timeoutSeconds': 20, 27 | 'maxConsecutiveFailures': 3 28 | } 29 | 30 | def _label_exists(self, name): 31 | """ 32 | Checks if label exists and returns True/False 33 | """ 34 | label_exists = False 35 | for label in self.labels: 36 | if '=' in label: 37 | label_split = label.split('=') 38 | if label_split[0].lower() == name: 39 | label_exists = True 40 | break 41 | else: 42 | if label.lower() == name: 43 | label_exists = True 44 | break 45 | 46 | return label_exists 47 | 48 | def _get_label_value(self, name): 49 | """ 50 | Gets the label value or None if label doesn't exist 51 | """ 52 | label_value = None 53 | for label in self.labels: 54 | if '=' in label: 55 | label_split = label.split('=') 56 | if label_split[0].lower() == name: 57 | label_value = label_split[1] 58 | break 59 | else: 60 | if label.lower() == name: 61 | label_value = self.labels[label] 62 | break 63 | return label_value 64 | 65 | def _set_path_if_exists(self, healthcheck_json): 66 | """ 67 | Sets the path in health check 68 | """ 69 | if self._label_exists(self.PATH_LABEL): 70 | healthcheck_json['path'] = self._get_label_value(self.PATH_LABEL) 71 | healthcheck_json['protocol'] = 'HTTP' 72 | return healthcheck_json 73 | 74 | def _set_port_index_if_exists(self, healthcheck_json): 75 | """ 76 | Sets the port index in health check 77 | """ 78 | if self._label_exists(self.PORT_INDEX_LABEL): 79 | healthcheck_json['portIndex'] = self._get_label_value(self.PORT_INDEX_LABEL) 80 | healthcheck_json['protocol'] = 'HTTP' 81 | return healthcheck_json 82 | 83 | def _set_command_if_exists(self, healthcheck_json): 84 | """ 85 | Sets the command in healthcheck 86 | """ 87 | if self._label_exists(self.COMMAND_LABEL): 88 | healthcheck_json['command'] = {'value': self._get_label_value(self.COMMAND_LABEL)} 89 | healthcheck_json['protocol'] = 'COMMAND' 90 | return healthcheck_json 91 | 92 | def get_health_check_config(self): 93 | """ 94 | Gets the health check config 95 | """ 96 | healthcheck = None 97 | 98 | if self._label_exists(self.HEALTH_CHECK_LABEL) or \ 99 | self._label_exists(self.PATH_LABEL) or \ 100 | self._label_exists(self.PORT_INDEX_LABEL): 101 | healthcheck = HealthCheck.get_default_health_check_config() 102 | healthcheck = self._set_path_if_exists(healthcheck) 103 | healthcheck = self._set_port_index_if_exists(healthcheck) 104 | elif self._label_exists(self.COMMAND_LABEL): 105 | healthcheck = HealthCheck.get_default_health_check_config() 106 | healthcheck = self._set_command_if_exists(healthcheck) 107 | elif self._label_exists(self.HEALTH_CHECKS_LABEL): 108 | healthcheck = json.loads(self._get_label_value(self.HEALTH_CHECKS_LABEL)) 109 | if not healthcheck is None and not isinstance(healthcheck, list): 110 | healthcheck = [healthcheck] 111 | 112 | return healthcheck 113 | -------------------------------------------------------------------------------- /src/tasks/dockerDeploy/acs-dcos/hexifier.py: -------------------------------------------------------------------------------- 1 | import base64 2 | import binascii 3 | import json 4 | import os 5 | import tarfile 6 | import tempfile 7 | 8 | 9 | class DockerAuthConfigHexifier(object): 10 | """ 11 | Creates a hex representation of docker.tar.gz file 12 | that contains config.json with auth information 13 | """ 14 | CONFIG_FILE_NAME = 'config.json' 15 | 16 | def __init__(self, registry_host, registry_username, registry_password): 17 | self.registry_host = registry_host 18 | self.registry_username = registry_username 19 | self.registry_password = registry_password 20 | 21 | if not self.registry_host: 22 | raise ValueError('registry_host not set') 23 | 24 | if not self.registry_username: 25 | raise ValueError('registry_username not set') 26 | 27 | if not self.registry_password: 28 | raise ValueError('registry_password not set') 29 | 30 | def get_auth_file_path(self): 31 | """ 32 | Gets the path to the auth file in Exhibitor (e.g. 'hostname/username.tar.gz') 33 | """ 34 | return '{}/{}'.format(self.registry_host, self._get_auth_filename()) 35 | 36 | @classmethod 37 | def hexify_file(cls, file_name): 38 | """ 39 | Creates a hex representation of a file and returns it as 40 | a string 41 | """ 42 | with open(file_name, 'rb') as binary_file: 43 | file_bytes = binary_file.read() 44 | hex_string = binascii.hexlify(bytearray(file_bytes)) 45 | return hex_string 46 | 47 | def hexify(self): 48 | """ 49 | Create a hex representation of the docker.tar.gz file 50 | """ 51 | file_path = self._create_temp_auth_file() 52 | return DockerAuthConfigHexifier.hexify_file(file_path) 53 | 54 | def _get_auth_filename(self): 55 | """ 56 | Gets the name of the .tar.gz file 57 | """ 58 | if not self.registry_username: 59 | raise ValueError('registry_username not set') 60 | return '{}.tar.gz'.format(self.registry_username) 61 | 62 | def _get_config_filepath(self): 63 | """ 64 | Gets the full path to the config file 65 | """ 66 | config_contents = self._create_config_contents() 67 | root_path = tempfile.mkdtemp() 68 | config_filepath = os.path.join(root_path, self.CONFIG_FILE_NAME) 69 | with open(config_filepath, 'w') as config_file: 70 | json.dump(config_contents, config_file) 71 | return config_filepath 72 | 73 | def _create_temp_auth_file(self): 74 | """ 75 | Creates a temporary auth file (.tar.gz) with config.json in .docker folder 76 | """ 77 | config_filepath = self._get_config_filepath() 78 | auth_file_path = os.path.join(os.path.dirname(config_filepath), self._get_auth_filename()) 79 | 80 | with tarfile.open(auth_file_path, 'w:gz') as tar: 81 | tar.add(config_filepath, os.path.join('.docker', self.CONFIG_FILE_NAME)) 82 | return auth_file_path 83 | 84 | def _create_config_contents(self): 85 | """ 86 | Creates the config.json for docker auth 87 | """ 88 | return { 89 | "auths": { 90 | self.registry_host: { 91 | "auth": base64.b64encode(self.registry_username + ':' + self.registry_password) 92 | } 93 | } 94 | } 95 | -------------------------------------------------------------------------------- /src/tasks/dockerDeploy/acs-dcos/marathon_deployments.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | import re 4 | import threading 5 | 6 | import sseclient 7 | 8 | 9 | class MarathonEvent(object): 10 | """ 11 | Represens a single event from Marathon 12 | """ 13 | def __init__(self, data): 14 | self.data = data 15 | 16 | def _get_event_type(self): 17 | """ 18 | Gets the event type 19 | """ 20 | if not 'eventType' in self.data: 21 | return 'UNKNOWN' 22 | return self.data['eventType'] 23 | 24 | def app_id(self): 25 | """ 26 | Gets the appId 27 | """ 28 | return self.data['appId'] 29 | 30 | def task_id(self): 31 | """ 32 | Gets the taskId 33 | """ 34 | return self.data['taskId'] 35 | 36 | def slave_id(self): 37 | """ 38 | Gets the slaveId 39 | """ 40 | return self.data['slaveId'] 41 | 42 | def _get_task_status(self): 43 | """ 44 | Gets the task status 45 | """ 46 | return self.data['taskStatus'] 47 | 48 | def is_status_update(self): 49 | """ 50 | True if event represents a status update 51 | """ 52 | return self._get_event_type() == 'status_update_event' 53 | 54 | def is_group_change_success(self): 55 | """ 56 | True if event represents a group change success 57 | """ 58 | return self._get_event_type() == 'group_change_success' 59 | 60 | def is_app_terminated(self): 61 | """ 62 | True if event represents an app terminated event 63 | """ 64 | return self._get_event_type() == 'app_terminated_event' 65 | 66 | def is_task_failed(self): 67 | """ 68 | True if task is failed, false otherwise 69 | """ 70 | return self._get_task_status() == 'TASK_FAILED' 71 | 72 | def is_task_staging(self): 73 | """ 74 | True if task is staging, false otherwise 75 | """ 76 | return self._get_task_status() == 'TASK_STAGING' 77 | 78 | def is_task_running(self): 79 | """ 80 | True if task is running, false otherwise 81 | """ 82 | return self._get_task_status() == 'TASK_RUNNING' 83 | 84 | def is_task_killed(self): 85 | """ 86 | True if task is killed, false otherwise 87 | """ 88 | return self._get_task_status() == 'TASK_KILLED' 89 | 90 | def is_task_killing(self): 91 | """ 92 | True if task is being killed, false otherwise 93 | """ 94 | return self._get_task_status() == 'TASK_KILLING' 95 | 96 | def is_task_finished(self): 97 | """ 98 | True if task is finished, false otherwise 99 | """ 100 | return self._get_task_status() == 'TASK_FINISHED' 101 | 102 | def is_deployment_succeeded(self): 103 | """ 104 | True if event represents a successful deployment 105 | """ 106 | return self._get_event_type() == 'deployment_success' 107 | 108 | def is_deployment_failed(self): 109 | """ 110 | True if event represents a failed deployment 111 | """ 112 | return self._get_event_type() == 'deployment_failed' 113 | 114 | def status(self): 115 | """ 116 | Gets the event status 117 | """ 118 | event_status = "" 119 | if self.is_task_running(): 120 | event_status = 'Service "{}" task is running'.format(self.app_id()) 121 | elif self.is_task_staging(): 122 | event_status = 'Service "{}" task is being staged'.format(self.app_id()) 123 | elif self.is_task_failed(): 124 | event_status = 'Service "{}" task has failed: {}'.format( 125 | self.app_id(), self.data['message']) 126 | elif self.is_task_killed(): 127 | event_status = 'Service "{}" task was killed: {}'.format( 128 | self.app_id(), self.data['message']) 129 | elif self.is_task_killing(): 130 | if self.data['message'].strip() == '': 131 | event_status = 'Service "{}" task is being killed.'.format(self.app_id()) 132 | else: 133 | event_status = 'Service "{}" task is being killed: {}'.format( 134 | self.app_id(), self.data['message']) 135 | elif self.is_task_finished(): 136 | if self.data['message'].strip() == '': 137 | event_status = 'Service "{}" task is finished.'.format(self.app_id()) 138 | else: 139 | event_status = 'Service "{}" task is finished: {}'.format( 140 | self.app_id(), self.data['message']) 141 | elif self.is_app_terminated(): 142 | event_status = 'Service "{}" was terminated.'.format(self.app_id()) 143 | 144 | return event_status 145 | 146 | class DeploymentMonitor(object): 147 | """ 148 | Monitors deployment of apps to Marathon using their 149 | app IDs 150 | """ 151 | def __init__(self, marathon, app_ids, deployment_id, log_failures=True): 152 | self._log_failures = log_failures 153 | self._marathon = marathon 154 | self._deployment_succeeded = False 155 | self._app_ids = app_ids 156 | self._deployment_id = deployment_id 157 | self.stopped = False 158 | self._thread = threading.Thread( 159 | target=DeploymentMonitor._process_events, args=(self,)) 160 | 161 | def start(self): 162 | """ 163 | Starts the deployment monitor 164 | """ 165 | self._thread.daemon = True 166 | self._thread.start() 167 | 168 | def deployment_succeeded(self): 169 | """ 170 | True if deployment succeeded, false otherwise 171 | """ 172 | return self._deployment_succeeded 173 | 174 | def _process_events(self): 175 | """ 176 | Reads the event stream from Marathon and handles events 177 | """ 178 | events = self._get_event_stream() 179 | for event in events: 180 | try: 181 | self._log_event(event) 182 | except: 183 | # Ignore any exceptions 184 | pass 185 | 186 | def _log_event(self, event): 187 | """ 188 | Logs events from Marathon 189 | """ 190 | if event.is_status_update() or event.is_app_terminated(): 191 | if event.app_id() in self._app_ids: 192 | logging.info(event.status()) 193 | if (event.is_task_failed() or event.is_task_killed()) and self._log_failures: 194 | self._log_stderr(event) 195 | elif event.is_deployment_succeeded(): 196 | if self._deployment_id == event.data['id']: 197 | self._deployment_succeeded = True 198 | 199 | def _log_stderr(self, event): 200 | """ 201 | Logs the stderr of the failed event 202 | """ 203 | failed_task = self._marathon.mesos.get_task( 204 | event.task_id(), event.slave_id()) 205 | stderr = self._marathon.mesos.get_task_log_file(failed_task, 'stderr') 206 | logging.error(stderr) 207 | 208 | def _get_event_stream(self): 209 | """ 210 | Gets the event stream by making a GET request to 211 | Marathon /events endpoint 212 | """ 213 | events_url = self._marathon.get_url('service/marathon/v2/events') 214 | messages = sseclient.SSEClient(events_url) 215 | for msg in messages: 216 | if self.stopped: 217 | break 218 | try: 219 | json_data = json.loads(msg.data) 220 | except ValueError: 221 | logging.debug('Failed to parse event: %s', msg.data) 222 | continue 223 | event = MarathonEvent(json_data) 224 | yield event 225 | -------------------------------------------------------------------------------- /src/tasks/dockerDeploy/acs-dcos/mesos.py: -------------------------------------------------------------------------------- 1 | from mesos_task import MesosTask 2 | 3 | class Mesos(object): 4 | def __init__(self, acs_client): 5 | self.acs_client = acs_client 6 | 7 | def _get_request(self, endpoint, path): 8 | """ 9 | Makes a GET request to ACS 10 | """ 11 | return self.acs_client.make_request('{}/{}'.format(endpoint, path), 'get', port=80) 12 | 13 | def get_task_log_file(self, task, filename): 14 | """ 15 | Gets the contents of a log file from the tasks sandbox 16 | """ 17 | url_path = task.get_sandbox_download_path(filename) 18 | try: 19 | log_file_response = self._get_request('slave', url_path) 20 | except: 21 | return '' 22 | 23 | return log_file_response.content 24 | 25 | def _get_slave_ids(self): 26 | """ 27 | Gets all slave IDs in the cluster 28 | """ 29 | # GET /mesos/slaves/state.json 30 | response = self._get_request('mesos/slaves', 'state.json') 31 | response.raise_for_status() 32 | 33 | all_slaves = response.json() 34 | return [slave['id'] for slave in all_slaves['slaves']] 35 | 36 | def _get_slave_state(self, slave_id): 37 | """ 38 | Gets the state.json for specified slave 39 | """ 40 | slave_state_response = self._get_request( 41 | 'slave', '{}/state.json'.format(slave_id)) 42 | slave_state_response.raise_for_status() 43 | 44 | slave_state_json = slave_state_response.json() 45 | return slave_state_json 46 | 47 | def get_task(self, task_id, slave_id=None): 48 | """ 49 | Go through all frameworks and executors and get all tasks that 50 | start with the service_id. Returns the latest task with information 51 | needed to get the files from the sandbox 52 | """ 53 | framework_name = 'marathon' 54 | slave_ids = [slave_id] 55 | if not slave_id: 56 | slave_ids = self._get_slave_ids() 57 | 58 | found_tasks = [] 59 | 60 | for slave_id in slave_ids: 61 | slave_state_json = self._get_slave_state(slave_id) 62 | 63 | # Get all 'marathon' frameworks 64 | marathon_frameworks = [] 65 | marathon_frameworks.extend( 66 | [f for f in slave_state_json['frameworks'] if f['name'] == framework_name]) 67 | marathon_frameworks.extend( 68 | [f for f in slave_state_json['completed_frameworks'] if f['name'] == framework_name]) 69 | 70 | # Get all executors and completed executors where 'id' of the task 71 | # starts with the service_id 72 | executors = [] 73 | for framework in marathon_frameworks: 74 | executors.extend( 75 | [e for e in framework['executors'] if e['id'] == task_id]) 76 | executors.extend( 77 | [e for e in framework['completed_executors'] if e['id'] == task_id]) 78 | 79 | for executor in executors: 80 | for task in executor['tasks']: 81 | found_tasks.append(MesosTask(task, executor['directory'])) 82 | 83 | for task in executor['completed_tasks']: 84 | found_tasks.append(MesosTask(task, executor['directory'])) 85 | 86 | # Sort the tasks, so the newest are on top 87 | found_tasks.sort(key=lambda task: task.timestamp, reverse=True) 88 | if len(found_tasks) == 0: 89 | return None 90 | 91 | return found_tasks[0] 92 | -------------------------------------------------------------------------------- /src/tasks/dockerDeploy/acs-dcos/mesos_task.py: -------------------------------------------------------------------------------- 1 | 2 | class MesosTask(object): 3 | """ 4 | Class represents a Mesos task 5 | """ 6 | def __init__(self, task, directory): 7 | if not 'id' in task: 8 | raise ValueError('Task is missing "id" ') 9 | if not 'slave_id' in task: 10 | raise ValueError('Task is missing "slave_id"') 11 | if not 'framework_id' in task: 12 | raise ValueError('Task is missing "framework_id"') 13 | if not 'state' in task: 14 | raise ValueError('Task is missing "state"') 15 | if not 'statuses' in task: 16 | raise ValueError('Task is missing "statuses"') 17 | 18 | self.task_id = task['id'] 19 | self.slave_id = task['slave_id'] 20 | self.framework_id = task['framework_id'] 21 | self.directory = directory 22 | self.state = task['state'] 23 | 24 | statuses = [ts for ts in task['statuses']] 25 | if len(statuses) == 0: 26 | timestamp = -1 27 | else: 28 | statuses.sort(key=lambda s: s['timestamp'], reverse=True) 29 | timestamp = statuses[0]['timestamp'] 30 | self.timestamp = timestamp 31 | 32 | def get_sandbox_download_path(self, filename): 33 | """ 34 | Gets the path for downloading a file from sandbox 35 | """ 36 | if not filename: 37 | raise ValueError('Filename is not set') 38 | 39 | url_template = '{}/files/download?path={}/{}' 40 | return url_template.format( 41 | self.slave_id, self.directory, filename) 42 | 43 | def is_failed(self): 44 | """ 45 | Returns True if task failed, False otherwise 46 | """ 47 | return self.state == 'TASK_FAILED' 48 | 49 | def is_killed(self): 50 | """ 51 | Returns True if task is killed or being killed, false otherwise 52 | """ 53 | return self.state == 'TASK_KILLED' or self.state == 'TASK_KILLING' 54 | 55 | def __str__(self): 56 | return 'task_id:{}\nslave_id:{}\nframework_id:{}\nstate:{}\ndirectory:{}\ntimestamp:{}\n'.format( 57 | self.task_id, self.slave_id, self.framework_id, self.state, self.directory,self.timestamp) 58 | -------------------------------------------------------------------------------- /src/tasks/dockerDeploy/acs-dcos/nginx.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from hexifier import DockerAuthConfigHexifier 4 | from exhibitor import Exhibitor 5 | 6 | 7 | class LoadBalancerApp(object): 8 | """ 9 | NGINX load balancer functionality 10 | """ 11 | APP_ID = '/external-nginx-lb' 12 | JSON_FILE = 'conf/external-nginx-lb.json' 13 | 14 | def __init__(self, marathon_helper): 15 | self.marathon_helper = marathon_helper 16 | 17 | def ensure_exists(self, compose_data): 18 | """ 19 | Checks if compose file has label that requires NGINX 20 | to be install and ensures it is installed 21 | """ 22 | for _, service_info in compose_data['services'].items(): 23 | if self._has_external_label(service_info): 24 | self._install() 25 | break 26 | 27 | def _has_external_label(self, service_info): 28 | """ 29 | Checks if the service has a vhost label set 30 | """ 31 | if 'labels' in service_info: 32 | for label in service_info['labels']: 33 | if label.startswith('com.microsoft.acs.dcos.marathon.vhost'): 34 | return True 35 | return False 36 | 37 | def _install(self): 38 | """ 39 | Installs NGINX load balancer. Checks if NGINX is not installed yet 40 | then deploys the nginx.conf template first, and deploys the NGINX app. 41 | """ 42 | if not self.marathon_helper.app_exists(LoadBalancerApp.APP_ID): 43 | self.marathon_helper.ensure_exists(LoadBalancerApp.APP_ID, LoadBalancerApp.JSON_FILE) 44 | -------------------------------------------------------------------------------- /src/tasks/dockerDeploy/acs-dcos/requirements.txt: -------------------------------------------------------------------------------- 1 | pyyaml==3.12 2 | requests==2.12.3 3 | sshtunnel==0.1.1 4 | sseclient==0.0.14 5 | -------------------------------------------------------------------------------- /src/tasks/dockerDeploy/acs-dcos/test_compose_1.yml: -------------------------------------------------------------------------------- 1 | networks: {} 2 | services: 3 | service-a: 4 | build: 5 | context: ./service-a 6 | environment: 7 | - RACK_ENV=development 8 | image: registry.marathon.mesos:5000/peterjausovecsampleapp_service-a@sha256:8ae590a69fc4cbb77b43c1523cb0044b766f58cf5de50f4180b80d4f01381931 9 | links: 10 | - service-b 11 | ports: 12 | - 8080:80 13 | expose: 14 | - 9090 15 | command: [bundle, exec, thin, -p, 3000] 16 | labels: 17 | - "NAME1=value2" 18 | - "com.microsoft.azure.acs.dcos.marathon.healthcheck=true" 19 | entrypoint: 20 | - php 21 | - -d 22 | - zend_extension=/usr/local/lib/php/extensions/no-debug-non-zts-20100525/xdebug.so 23 | - -d 24 | - memory_limit=-1 25 | - vendor/bin/phpunit 26 | service-b: 27 | build: 28 | context: ./service-b 29 | environment: 30 | APPINSIGHTS_INSTRUMENTATIONKEY: null 31 | expose: 32 | - 80 33 | labels: 34 | first: 123 35 | second: 456 36 | third: 987 37 | image: registry.marathon.mesos:5000/peterjausovecsampleapp_service-b@sha256:b0228dde5727dbcd375aec304cd481d6b6a8010ced609e21f093e848432df750 38 | version: '2.0' 39 | volumes: {} -------------------------------------------------------------------------------- /src/tasks/dockerDeploy/acs-dcos/test_compose_1_expected.json: -------------------------------------------------------------------------------- 1 | { 2 | "apps": [ 3 | { 4 | "ports": [], 5 | "instances": 0, 6 | "dependencies": [], 7 | "container": { 8 | "docker": { 9 | "portMappings": [ 10 | { 11 | "labels": {}, 12 | "protocol": "tcp", 13 | "containerPort": 0, 14 | "hostPort": 0 15 | } 16 | ], 17 | "image": "registry.marathon.mesos:5000/peterjausovecsampleapp_service-b@sha256:b0228dde5727dbcd375aec304cd481d6b6a8010ced609e21f093e848432df750", 18 | "network": "BRIDGE", 19 | "parameters": [] 20 | } 21 | }, 22 | "env": { 23 | "APPINSIGHTS_INSTRUMENTATIONKEY": "None" 24 | }, 25 | "mem": 256, 26 | "labels": { 27 | "second": 456, 28 | "third": 987, 29 | "first": 123 30 | }, 31 | "id": "mygroup/service-b", 32 | "cpus": 0.1 33 | }, 34 | { 35 | "mem": 256, 36 | "labels": { 37 | "com.microsoft.azure.acs.dcos.marathon.healthcheck": "true", 38 | "NAME1": "value2" 39 | }, 40 | "cpus": 0.1, 41 | "instances": 0, 42 | "dependencies": [], 43 | "healthChecks": [ 44 | { 45 | "portIndex": 0, 46 | "protocol": "TCP", 47 | "timeoutSeconds": 20, 48 | "intervalSeconds": 5, 49 | "gracePeriodSeconds": 300, 50 | "maxConsecutiveFailures": 3 51 | } 52 | ], 53 | "id": "mygroup/service-a", 54 | "container": { 55 | "docker": { 56 | "portMappings": [ 57 | { 58 | "labels": {}, 59 | "protocol": "tcp", 60 | "containerPort": 0, 61 | "hostPort": 0 62 | } 63 | ], 64 | "image": "registry.marathon.mesos:5000/peterjausovecsampleapp_service-a@sha256:8ae590a69fc4cbb77b43c1523cb0044b766f58cf5de50f4180b80d4f01381931", 65 | "network": "BRIDGE", 66 | "parameters": [ 67 | { 68 | "value": "php -d zend_extension=/usr/local/lib/php/extensions/no-debug-non-zts-20100525/xdebug.so -d memory_limit=-1 vendor/bin/phpunit", 69 | "key": "entrypoint" 70 | } 71 | ] 72 | } 73 | }, 74 | "cmd": "bundle exec thin -p 3000", 75 | "env": { 76 | "RACK_ENV": "development" 77 | }, 78 | "ports": [] 79 | } 80 | ], 81 | "id": "mygroup" 82 | } -------------------------------------------------------------------------------- /src/tasks/dockerDeploy/acs-dcos/test_deployment_monitor.py: -------------------------------------------------------------------------------- 1 | 2 | import unittest 3 | from mock import Mock, patch 4 | 5 | from marathon_deployments import DeploymentMonitor, MarathonEvent 6 | 7 | 8 | class DeploymentMonitorTests(unittest.TestCase): 9 | 10 | @patch('marathon.Marathon') 11 | def test_not_none(self, mock_marathon): 12 | m = DeploymentMonitor(mock_marathon, [], '') 13 | self.assertIsNotNone(m) 14 | 15 | @patch('marathon.Marathon') 16 | def test_start_called(self, mock_marathon): 17 | m = DeploymentMonitor(mock_marathon, [], '') 18 | m._thread = Mock() 19 | m.start() 20 | self.assertTrue(m._thread.start.called) 21 | self.assertTrue(m._thread.daemon) 22 | 23 | @patch('marathon.Marathon') 24 | def test_stop_called(self, mock_marathon): 25 | m = DeploymentMonitor(mock_marathon, [], '') 26 | m._thread = Mock() 27 | m.stop() 28 | self.assertTrue(m._stop_event.isSet()) 29 | self.assertTrue(m._thread.stop.called) 30 | 31 | @patch('marathon.Marathon') 32 | def test_is_running(self, mock_marathon): 33 | m = DeploymentMonitor(mock_marathon, [], '') 34 | self.assertTrue(m.is_running()) 35 | 36 | @patch('marathon.Marathon') 37 | def test_is_running_false(self, mock_marathon): 38 | m = DeploymentMonitor(mock_marathon, [], '') 39 | m._thread = Mock() 40 | m.stop() 41 | self.assertFalse(m.is_running()) 42 | 43 | @patch('marathon.Marathon') 44 | def test_handle_event_status_update(self, mock_marathon): 45 | app_ids = ['app_1', 'app_2'] 46 | m = DeploymentMonitor(mock_marathon, app_ids, '') 47 | m._thread = Mock() 48 | 49 | ev = MarathonEvent({'appId': 'app_1', 'taskStatus': 'some_status', 'eventType': 'status_update_event'}) 50 | m._handle_event(ev) 51 | self.assertFalse(m._deployment_failed) 52 | self.assertFalse(m._deployment_succeeded) 53 | 54 | @patch('marathon.Marathon') 55 | def test_handle_event_status_update_failed(self, mock_marathon): 56 | app_ids = ['app_1', 'app_2'] 57 | m = DeploymentMonitor(mock_marathon, app_ids, '') 58 | m._thread = Mock() 59 | 60 | ev = MarathonEvent({'appId': 'app_1', 'taskStatus': 'TASK_FAILED', 'eventType': 'status_update_event', 'message': 'somemessage'}) 61 | m._handle_event(ev) 62 | self.assertTrue(m._deployment_failed) 63 | self.assertTrue(m._stop_event.isSet()) 64 | self.assertTrue(m._thread.stop.called) 65 | self.assertIsNotNone(m._failed_event) 66 | self.assertEqual(ev, m._failed_event) 67 | self.assertFalse(m._deployment_succeeded) 68 | 69 | @patch('marathon.Marathon') 70 | def test_handle_event_deployment_succeeded(self, mock_marathon): 71 | app_ids = ['app_1', 'app_2'] 72 | m = DeploymentMonitor(mock_marathon, app_ids, 'deployment_id') 73 | m._thread = Mock() 74 | 75 | ev = MarathonEvent({'id': 'deployment_id', 'appId': 'app_1', 'taskStatus': 'TASK_FAILED', 'eventType': 'deployment_success', 'message': 'somemessage'}) 76 | m._handle_event(ev) 77 | self.assertFalse(m._deployment_failed) 78 | self.assertTrue(m._stop_event.isSet()) 79 | self.assertTrue(m._thread.stop.called) 80 | self.assertIsNone(m._failed_event) 81 | self.assertTrue(m._deployment_succeeded) 82 | 83 | @patch('marathon.Marathon') 84 | def test_handle_event_app_not_in_list(self, mock_marathon): 85 | app_ids = ['app_X', 'app_Y'] 86 | m = DeploymentMonitor(mock_marathon, app_ids, '') 87 | m._thread = Mock() 88 | 89 | ev = MarathonEvent({'appId': 'app_1', 'taskStatus': 'TASK_FAILED', 'eventType': 'status_update_event', 'message': 'somemessage'}) 90 | m._handle_event(ev) 91 | self.assertFalse(m._deployment_failed) 92 | self.assertFalse(m._deployment_succeeded) 93 | self.assertFalse(m._stop_event.isSet()) 94 | self.assertFalse(m._thread.stop.called) 95 | self.assertIsNone(m._failed_event) 96 | 97 | @patch('marathon.Marathon') 98 | def test_handle_event_deployment_not_in_list(self, mock_marathon): 99 | app_ids = ['app_X', 'app_Y'] 100 | m = DeploymentMonitor(mock_marathon, app_ids, 'MY_DEPLOYMENT') 101 | m._thread = Mock() 102 | 103 | ev = MarathonEvent({'id': 'another_deply_id', 'appId': 'app_1', 'taskStatus': 'TASK_FAILED', 'eventType': 'deployment_success', 'message': 'somemessage'}) 104 | m._handle_event(ev) 105 | self.assertFalse(m._deployment_failed) 106 | self.assertFalse(m._deployment_succeeded) 107 | self.assertFalse(m._stop_event.isSet()) 108 | self.assertFalse(m._thread.stop.called) 109 | self.assertIsNone(m._failed_event) -------------------------------------------------------------------------------- /src/tasks/dockerDeploy/acs-dcos/test_hexifier.py: -------------------------------------------------------------------------------- 1 | import base64 2 | import unittest 3 | 4 | import hexifier 5 | 6 | 7 | class DockerAuthConfigHexifierTests(unittest.TestCase): 8 | def test_not_none(self): 9 | h = hexifier.DockerAuthConfigHexifier('myhost', 'myusername', 'mypassword') 10 | self.assertIsNotNone(h) 11 | 12 | def test_values_set(self): 13 | h = hexifier.DockerAuthConfigHexifier('myhost', 'myusername', 'mypassword') 14 | self.assertEquals('myhost', h.registry_host) 15 | self.assertEquals('myusername', h.registry_username) 16 | self.assertEquals('mypassword', h.registry_password) 17 | 18 | def test_missing_host(self): 19 | self.assertRaises(ValueError, hexifier.DockerAuthConfigHexifier, None, 'user', 'pass') 20 | 21 | def test_missing_user(self): 22 | self.assertRaises(ValueError, hexifier.DockerAuthConfigHexifier, 'host', None, 'pass') 23 | 24 | def test_missing_pass(self): 25 | self.assertRaises(ValueError, hexifier.DockerAuthConfigHexifier, 'host', 'user', None) 26 | 27 | def test_auth_file_path(self): 28 | h = hexifier.DockerAuthConfigHexifier('myhost', 'myusername', 'mypassword') 29 | self.assertEquals('myhost/myusername.tar.gz', h.get_auth_file_path()) 30 | 31 | def test_auth_file_name(self): 32 | h = hexifier.DockerAuthConfigHexifier('myhost', 'myusername', 'mypassword') 33 | self.assertEquals('myusername.tar.gz', h._get_auth_filename()) 34 | 35 | def test_create_config_contents(self): 36 | expected = { 37 | "auths": { 38 | 'myhost': { 39 | "auth": base64.b64encode('myusername:mypassword') 40 | } 41 | } 42 | } 43 | h = hexifier.DockerAuthConfigHexifier('myhost', 'myusername', 'mypassword') 44 | self.assertEquals(expected, h._create_config_contents()) 45 | -------------------------------------------------------------------------------- /src/tasks/dockerDeploy/acs-dcos/test_marathon_event.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from marathon_deployments import MarathonEvent 4 | 5 | 6 | class MarathonEventTest(unittest.TestCase): 7 | def test_not_none(self): 8 | m = MarathonEvent({}) 9 | self.assertIsNotNone(m) 10 | 11 | def test_get_event_type(self): 12 | m = MarathonEvent({'eventType': 'SomeEvent'}) 13 | self.assertEqual(m._get_event_type(), 'SomeEvent') 14 | 15 | def test_get_event_type_unknown(self): 16 | m = MarathonEvent({'blah': 'SomeEvent'}) 17 | self.assertEqual(m._get_event_type(), 'UNKNOWN') 18 | 19 | def test_get_app_id(self): 20 | m = MarathonEvent({'appId': 'appid'}) 21 | self.assertEqual(m.app_id(), 'appid') 22 | 23 | def test_get_app_id_missing(self): 24 | m = MarathonEvent({'blah': 'appid'}) 25 | self.assertRaises(KeyError, m.app_id) 26 | 27 | def test_get_task_id(self): 28 | m = MarathonEvent({'taskId': 'taskid'}) 29 | self.assertEqual(m.task_id(), 'taskid') 30 | 31 | def test_get_task_id_missing(self): 32 | m = MarathonEvent({'blah': 'blah'}) 33 | self.assertRaises(KeyError, m.task_id) 34 | 35 | def test_get_slave_id(self): 36 | m = MarathonEvent({'slaveId': 'slaveid'}) 37 | self.assertEqual(m.slave_id(), 'slaveid') 38 | 39 | def test_get_slave_id_missing(self): 40 | m = MarathonEvent({'blah': 'blah'}) 41 | self.assertRaises(KeyError, m.slave_id) 42 | 43 | def test_get_task_status(self): 44 | m = MarathonEvent({'taskStatus': 'status'}) 45 | self.assertEqual(m._get_task_status(), 'status') 46 | 47 | def test_get_task_status_missing(self): 48 | m = MarathonEvent({'blah': 'blah'}) 49 | self.assertRaises(KeyError, m._get_task_status) 50 | 51 | def test_is_status_update_true(self): 52 | m = MarathonEvent({'eventType': 'status_update_event'}) 53 | self.assertTrue(m.is_status_update()) 54 | 55 | def test_is_status_update_false(self): 56 | m = MarathonEvent({'eventType': 'BLAH'}) 57 | self.assertFalse(m.is_status_update()) 58 | 59 | def test_is_deployment_succeeded_true(self): 60 | m = MarathonEvent({'eventType': 'deployment_success'}) 61 | self.assertTrue(m.is_deployment_succeeded()) 62 | 63 | def test_is_deployment_succeeded_false(self): 64 | m = MarathonEvent({'eventType': 'BLAH'}) 65 | self.assertFalse(m.is_deployment_succeeded()) 66 | 67 | def test_is_task_failed_true(self): 68 | m = MarathonEvent({'taskStatus': 'TASK_FAILED'}) 69 | self.assertTrue(m.is_task_failed()) 70 | 71 | def test_is_task_failed_false(self): 72 | m = MarathonEvent({'taskStatus': 'BLAH'}) 73 | self.assertFalse(m.is_task_failed()) 74 | 75 | def test_is_task_staging_true(self): 76 | m = MarathonEvent({'taskStatus': 'TASK_STAGING'}) 77 | self.assertTrue(m.is_task_staging()) 78 | 79 | def test_is_task_staging_false(self): 80 | m = MarathonEvent({'taskStatus': 'BLAH'}) 81 | self.assertFalse(m.is_task_staging()) 82 | 83 | def test_is_task_killed_true(self): 84 | m = MarathonEvent({'taskStatus': 'TASK_KILLED'}) 85 | self.assertTrue(m.is_task_killed()) 86 | 87 | def test_is_task_killed_false(self): 88 | m = MarathonEvent({'taskStatus': 'BLAH'}) 89 | self.assertFalse(m.is_task_killed()) 90 | 91 | def test_is_task_running_true(self): 92 | m = MarathonEvent({'taskStatus': 'TASK_RUNNING'}) 93 | self.assertTrue(m.is_task_running()) 94 | 95 | def test_is_task_running_false(self): 96 | m = MarathonEvent({'taskStatus': 'BLAH'}) 97 | self.assertFalse(m.is_task_running()) 98 | -------------------------------------------------------------------------------- /src/tasks/dockerDeploy/acs-dcos/test_mesos.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from mock import Mock, patch 3 | from mesos import Mesos 4 | 5 | def mocked_requests_get(*args, **kwargs): 6 | class MockResponse: 7 | def __init__(self, json_data, status_code): 8 | self.json_data = json_data 9 | self.status_code = status_code 10 | 11 | def json(self): 12 | return self.json_data 13 | 14 | def raise_for_status(self): 15 | if self.status_code >= 400: 16 | raise Exception('raise_for_status exception') 17 | else: 18 | pass 19 | if args[0].startswith('mesos/slaves/state.json'): 20 | return MockResponse({'slaves': [{'id': 'slave_1'}, {'id': 'slave_2'}]}, 200) 21 | elif args[0].startswith('slave/slave_1/state.json'): 22 | state = { 23 | 'frameworks': [], 24 | 'completed_frameworks': [{ 25 | 'id': 'framework_id', 26 | 'name': 'marathon', 27 | 'executors': [], 28 | 'completed_executors': [{ 29 | 'id': 'service_id', 30 | 'directory': 'completed_executor_1_directory', 31 | 'tasks':[], 32 | 'queued_tasks': [], 33 | 'completed_tasks': [{ 34 | 'id': 'service_id', 35 | 'framework_id': 'framework_id', 36 | 'slave_id': 'slave_id', 37 | 'state': 'TASK_STATE', 38 | 'statuses': [{ 39 | 'state': 'TASK_STATE_STATUS', 40 | 'timestamp': 1 41 | }] 42 | }] 43 | }] 44 | }] 45 | } 46 | return MockResponse(state, 200) 47 | elif args[0].startswith('slave/slave_2/state.json'): 48 | state = { 49 | 'frameworks': [], 50 | 'completed_frameworks': [{ 51 | 'id': 'framework_id_2', 52 | 'name': 'marathon', 53 | 'executors': [], 54 | 'completed_executors': [{ 55 | 'id': 'service_id', 56 | 'directory': 'completed_executor_2_directory', 57 | 'tasks':[], 58 | 'queued_tasks': [], 59 | 'completed_tasks': [{ 60 | 'id': 'service_id', 61 | 'framework_id': 'framework_id_2', 62 | 'slave_id': 'slave_id', 63 | 'state': 'TASK_STATE', 64 | 'statuses': [{ 65 | 'state': 'TASK_STATE_STATUS', 66 | 'timestamp': 2 67 | }] 68 | }] 69 | }] 70 | }] 71 | } 72 | return MockResponse(state, 200) 73 | elif args[0].startswith('save/404/state.json'): 74 | return MockResponse({}, 404) 75 | 76 | return MockResponse({}, 404) 77 | 78 | 79 | class MesosTest(unittest.TestCase): 80 | @patch('acsclient.ACSClient') 81 | def test_not_none(self, mock_acs_client): 82 | m = Mesos(mock_acs_client) 83 | self.assertIsNotNone(m) 84 | 85 | @patch('acsclient.ACSClient') 86 | def test_get_request(self, mock_acs_client): 87 | m = Mesos(mock_acs_client) 88 | m._get_request('endpoint', 'path') 89 | mock_acs_client.make_request.assert_called_with('endpoint/path', 'get', port=80) 90 | 91 | @patch('acsclient.ACSClient') 92 | def test_get_slave_ids(self, mock_acs_client): 93 | mock_acs_client.make_request.side_effect = mocked_requests_get 94 | m = Mesos(mock_acs_client) 95 | actual = m._get_slave_ids() 96 | self.assertEqual(actual, ['slave_1', 'slave_2']) 97 | 98 | @patch('acsclient.ACSClient') 99 | def test_get_slave_state(self, mock_acs_client): 100 | mock_acs_client.make_request.side_effect = mocked_requests_get 101 | m = Mesos(mock_acs_client) 102 | actual = m._get_slave_state('slave_1') 103 | expected = state = { 104 | 'frameworks': [], 105 | 'completed_frameworks': [{ 106 | 'id': 'framework_id', 107 | 'name': 'marathon', 108 | 'executors': [], 109 | 'completed_executors': [{ 110 | 'id': 'service_id', 111 | 'directory': 'completed_executor_1_directory', 112 | 'tasks':[], 113 | 'queued_tasks': [], 114 | 'completed_tasks': [{ 115 | 'id': 'service_id', 116 | 'framework_id': 'framework_id', 117 | 'slave_id': 'slave_id', 118 | 'state': 'TASK_STATE', 119 | 'statuses': [{ 120 | 'state': 'TASK_STATE_STATUS', 121 | 'timestamp': 1 122 | }] 123 | }] 124 | }] 125 | }] 126 | } 127 | self.assertEqual(actual, expected) 128 | 129 | @patch('acsclient.ACSClient') 130 | def test_get_slave_state_404(self, mock_acs_client): 131 | mock_acs_client.make_request.side_effect = mocked_requests_get 132 | m = Mesos(mock_acs_client) 133 | self.assertRaises(Exception, m._get_slave_state, '404') 134 | 135 | @patch('acsclient.ACSClient') 136 | def test_get_latest_task(self, mock_acs_client): 137 | mock_acs_client.make_request.side_effect = mocked_requests_get 138 | m = Mesos(mock_acs_client) 139 | actual = m.get_task('service_id') 140 | self.assertEqual(actual.task_id, 'service_id') 141 | self.assertEqual(actual.slave_id, 'slave_id') 142 | self.assertEqual(actual.framework_id, 'framework_id_2') 143 | self.assertEqual(actual.state, 'TASK_STATE') 144 | self.assertEqual(actual.directory, 'completed_executor_2_directory') 145 | self.assertEqual(actual.timestamp, 2) 146 | -------------------------------------------------------------------------------- /src/tasks/dockerDeploy/acs-dcos/test_mesos_task.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from mesos_task import MesosTask 3 | 4 | class MesosTaskTest(unittest.TestCase): 5 | def test_not_none(self): 6 | base_task = { 7 | 'id': 'mytask_id', 8 | 'slave_id': 'myslave_id', 9 | 'framework_id': 'myframework_id', 10 | 'state': 'mystate', 11 | 'statuses': [] 12 | } 13 | 14 | task = MesosTask(base_task, 'directory') 15 | self.assertIsNotNone(task) 16 | 17 | def test_missing_id(self): 18 | base_task = { 19 | 'slave_id': 'myslave_id', 20 | 'framework_id': 'myframework_id', 21 | 'state': 'mystate', 22 | 'statuses': [] 23 | } 24 | self.assertRaises(ValueError, MesosTask, base_task, 'directory') 25 | 26 | def test_missing_slave_id(self): 27 | base_task = { 28 | 'id': 'mytask_id', 29 | 'framework_id': 'myframework_id', 30 | 'state': 'mystate', 31 | 'statuses': [] 32 | } 33 | self.assertRaises(ValueError, MesosTask, base_task, 'directory') 34 | 35 | def test_missing_framework_id(self): 36 | base_task = { 37 | 'id': 'mytask_id', 38 | 'slave_id': 'myslave_id', 39 | 'state': 'mystate', 40 | 'statuses': [] 41 | } 42 | self.assertRaises(ValueError, MesosTask, base_task, 'directory') 43 | 44 | def test_missing_state(self): 45 | base_task = { 46 | 'id': 'mytask_id', 47 | 'slave_id': 'myslave_id', 48 | 'framework_id': 'myframework_id', 49 | 'statuses': [] 50 | } 51 | self.assertRaises(ValueError, MesosTask, base_task, 'directory') 52 | 53 | def test_missing_statuses(self): 54 | base_task = { 55 | 'id': 'mytask_id', 56 | 'slave_id': 'myslave_id', 57 | 'framework_id': 'myframework_id', 58 | 'state': 'mystate' 59 | } 60 | self.assertRaises(ValueError, MesosTask, base_task, 'directory') 61 | 62 | def test_values_set(self): 63 | base_task = { 64 | 'id': 'mytask_id', 65 | 'slave_id': 'myslave_id', 66 | 'framework_id': 'myframework_id', 67 | 'state': 'mystate', 68 | 'statuses': [] 69 | } 70 | 71 | task = MesosTask(base_task, 'directory') 72 | self.assertEqual(task.task_id, 'mytask_id') 73 | self.assertEqual(task.slave_id, 'myslave_id') 74 | self.assertEqual(task.framework_id, 'myframework_id') 75 | self.assertEqual(task.state, 'mystate') 76 | self.assertEqual(task.timestamp, -1) 77 | 78 | def test_sandbox_path(self): 79 | base_task = { 80 | 'id': 'mytask_id', 81 | 'slave_id': 'myslave_id', 82 | 'framework_id': 'myframework_id', 83 | 'state': 'mystate', 84 | 'statuses': [] 85 | } 86 | task = MesosTask(base_task, 'directory') 87 | expected = 'myslave_id/files/download?path=directory/myfile' 88 | actual = task.get_sandbox_download_path('myfile') 89 | 90 | self.assertEqual(actual, expected) 91 | 92 | def test_sandbox_path_empty_filename(self): 93 | base_task = { 94 | 'id': 'mytask_id', 95 | 'slave_id': 'myslave_id', 96 | 'framework_id': 'myframework_id', 97 | 'state': 'mystate', 98 | 'statuses': [] 99 | } 100 | task = MesosTask(base_task, 'directory') 101 | self.assertRaises(ValueError, task.get_sandbox_download_path, None) 102 | 103 | def test_is_failed(self): 104 | base_task = { 105 | 'id': 'mytask_id', 106 | 'slave_id': 'myslave_id', 107 | 'framework_id': 'myframework_id', 108 | 'state': 'TASK_FAILED', 109 | 'statuses': [] 110 | } 111 | task = MesosTask(base_task, 'directory') 112 | self.assertTrue(task.is_failed()) 113 | 114 | def test_is_failed_false(self): 115 | base_task = { 116 | 'id': 'mytask_id', 117 | 'slave_id': 'myslave_id', 118 | 'framework_id': 'myframework_id', 119 | 'state': 'TASK_SOMETHING', 120 | 'statuses': [] 121 | } 122 | task = MesosTask(base_task, 'directory') 123 | self.assertFalse(task.is_failed()) 124 | 125 | def test_is_killed(self): 126 | base_task = { 127 | 'id': 'mytask_id', 128 | 'slave_id': 'myslave_id', 129 | 'framework_id': 'myframework_id', 130 | 'state': 'TASK_KILLED', 131 | 'statuses': [] 132 | } 133 | task = MesosTask(base_task, 'directory') 134 | self.assertTrue(task.is_killed()) 135 | 136 | def test_is_killing(self): 137 | base_task = { 138 | 'id': 'mytask_id', 139 | 'slave_id': 'myslave_id', 140 | 'framework_id': 'myframework_id', 141 | 'state': 'TASK_KILLING', 142 | 'statuses': [] 143 | } 144 | task = MesosTask(base_task, 'directory') 145 | self.assertTrue(task.is_killed()) 146 | 147 | def test_is_killed_false(self): 148 | base_task = { 149 | 'id': 'mytask_id', 150 | 'slave_id': 'myslave_id', 151 | 'framework_id': 'myframework_id', 152 | 'state': 'TASK_FALSE', 153 | 'statuses': [] 154 | } 155 | task = MesosTask(base_task, 'directory') 156 | self.assertFalse(task.is_killed()) -------------------------------------------------------------------------------- /src/tasks/dockerDeploy/acs-dcos/test_nginx.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import mock 4 | 5 | from marathon import Marathon 6 | from nginx import LoadBalancerApp 7 | 8 | 9 | class TestLoadBalancerApp(unittest.TestCase): 10 | 11 | @mock.patch.object(LoadBalancerApp, '_install') 12 | def test_ensure_exists(self, mock_install): 13 | marathon_helper_mock = mock.Mock(Marathon) 14 | compose_data = {'services': {'service-b': {'labels': {'com.microsoft.acs.dcos.marathon.vhost': 'www.contoso.com:80'}}}} 15 | a = LoadBalancerApp(marathon_helper_mock) 16 | a.ensure_exists(compose_data) 17 | self.assertTrue(mock_install.called) 18 | 19 | @mock.patch.object(LoadBalancerApp, '_install') 20 | def test_ensure_exists_not_called(self, mock_install): 21 | marathon_helper_mock = mock.Mock(Marathon) 22 | compose_data = {'services': {'service-b': {'labels': {'mylabel': 'www.contoso.com:80'}}}} 23 | a = LoadBalancerApp(marathon_helper_mock) 24 | a.ensure_exists(compose_data) 25 | self.assertFalse(marathon_helper_mock.ensure_exists.called) 26 | self.assertFalse(mock_install.called) 27 | 28 | @mock.patch.object(LoadBalancerApp, '_install') 29 | def test_ensure_exists_vhosts(self, mock_install): 30 | marathon_helper_mock = mock.Mock(Marathon) 31 | compose_data = {'services': {'service-b': {'labels': {'com.microsoft.acs.dcos.marathon.vhost': 'www.contoso.com:80'}}}} 32 | a = LoadBalancerApp(marathon_helper_mock) 33 | a.ensure_exists(compose_data) 34 | self.assertTrue(mock_install.called) 35 | 36 | @mock.patch.object(LoadBalancerApp, '_install') 37 | def test_ensure_exists_install_called_once(self, mock_install): 38 | marathon_helper_mock = mock.Mock(Marathon) 39 | compose_data = {'services': {'service-b': {'labels': {'mylabel': 123, 'com.microsoft.acs.dcos.marathon.vhost': 'www.contoso.com:80', 'com.microsoft.acs.dcos.marathon.vhost': 'api.contoso.com:81'}}}} 40 | a = LoadBalancerApp(marathon_helper_mock) 41 | a.ensure_exists(compose_data) 42 | self.assertEquals(1, mock_install.call_count) 43 | 44 | def test_has_external_label_true(self): 45 | marathon_helper_mock = mock.Mock(Marathon) 46 | service_info = {'labels': {'mylabel': 123, 'com.microsoft.acs.dcos.marathon.vhost': 'www.contoso.com:80', 'com.microsoft.acs.dcos.marathon.vhost': 'api.contoso.com:81'}} 47 | a = LoadBalancerApp(marathon_helper_mock) 48 | self.assertTrue(a._has_external_label(service_info)) 49 | 50 | def test_has_external_label_false(self): 51 | marathon_helper_mock = mock.Mock(Marathon) 52 | compose_data = {'labels': {'mylabel': 123}} 53 | a = LoadBalancerApp(marathon_helper_mock) 54 | self.assertFalse(a._has_external_label(compose_data)) 55 | -------------------------------------------------------------------------------- /src/tasks/dockerDeploy/acs-kubernetes/Dockerfile.task: -------------------------------------------------------------------------------- 1 | FROM python:2.7-onbuild 2 | ENTRYPOINT [ "python" ] 3 | -------------------------------------------------------------------------------- /src/tasks/dockerDeploy/acs-kubernetes/acsclient.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import socket 4 | import subprocess 5 | import time 6 | from StringIO import StringIO 7 | 8 | import paramiko 9 | import requests 10 | from sshtunnel import SSHTunnelForwarder 11 | 12 | 13 | class ACSClient(object): 14 | """ 15 | Class for connecting to the ACS cluster and making requests 16 | """ 17 | current_tunnel = () 18 | # Max wait time (seconds) for tunnel to be established 19 | max_wait_time = 5 * 60 20 | 21 | def __init__(self, cluster_info): 22 | self.cluster_info = cluster_info 23 | self.tunnel_server = None 24 | self.is_direct = False 25 | self.is_running = False 26 | 27 | # If master_url is provided, we have a direct connection 28 | if self.cluster_info.api_endpoint: 29 | logging.debug('Using Direct connection') 30 | self.is_direct = True 31 | else: 32 | logging.debug('Using SSH connection') 33 | 34 | def shutdown(self): 35 | """ 36 | Stops the tunnel if its started 37 | """ 38 | if self.current_tunnel and self.is_running: 39 | logging.debug('Stopping SSH tunnel') 40 | self.current_tunnel[0].stop() 41 | self.is_running = False 42 | 43 | def _wait_for_tunnel(self, start_time, url): 44 | """ 45 | Waits until the SSH tunnel is available and 46 | we can start sending requests through it 47 | """ 48 | succeeded = False 49 | while not time.time() - start_time > self.max_wait_time: 50 | try: 51 | response = requests.get(url) 52 | if response.status_code == 200: 53 | succeeded = True 54 | self.is_running = True 55 | break 56 | except: 57 | time.sleep(5) 58 | 59 | if not succeeded: 60 | raise Exception( 61 | 'Could not establish connection to "{}".'.format( 62 | self.cluster_info.host)) 63 | 64 | def _get_private_key(self): 65 | """ 66 | Creates an RSAKey instance from provided private key string 67 | and password 68 | """ 69 | if not self.cluster_info.private_key: 70 | raise Exception('Private key was not provided') 71 | private_key_file = StringIO() 72 | private_key_file.write(self.cluster_info.private_key) 73 | private_key_file.seek(0) 74 | return paramiko.RSAKey.from_private_key(private_key_file, self.cluster_info.password) 75 | 76 | def _setup_tunnel_server(self): 77 | """ 78 | Gets the local port to access the tunnel 79 | """ 80 | if not self.current_tunnel: 81 | logging.debug('Create a new SSH tunnel') 82 | local_port = self.get_available_local_port() 83 | log = logging.getLogger() 84 | previous_log_level = log.level 85 | log.setLevel(logging.INFO) 86 | 87 | forwarder = SSHTunnelForwarder( 88 | ssh_address_or_host=(self.cluster_info.host, 89 | int(self.cluster_info.port)), 90 | ssh_username=self.cluster_info.username, 91 | ssh_pkey=self._get_private_key(), 92 | remote_bind_address=( 93 | 'localhost', self.cluster_info.get_api_endpoint_port()), 94 | local_bind_address=('0.0.0.0', int(local_port)), 95 | logger=log) 96 | forwarder.start() 97 | 98 | start_time = time.time() 99 | url = 'http://127.0.0.1:{}'.format(str(local_port)) 100 | self._wait_for_tunnel(start_time, url) 101 | 102 | self.current_tunnel = (forwarder, int(local_port)) 103 | log.setLevel(previous_log_level) 104 | 105 | return self.current_tunnel[1] 106 | 107 | def create_request_url(self, path): 108 | """ 109 | Creates the request URL from provided path. Depending on which 110 | connection type was picked, it will create an SSH tunnel 111 | """ 112 | if self.is_direct: 113 | raise NotImplementedError("Direct connection is not implemented yet") 114 | else: 115 | local_port = self._setup_tunnel_server() 116 | url = 'http://127.0.0.1:{}/{}'.format(str(local_port), path) 117 | return url 118 | 119 | def make_request(self, path, method, data=None, port=None, **kwargs): 120 | """ 121 | Makes an HTTP request with specified method 122 | """ 123 | url = self.create_request_url(path) 124 | logging.debug('%s: %s (DATA=%s)', method, url, data) 125 | 126 | if not hasattr(requests, method): 127 | raise Exception('Invalid method {}'.format(method)) 128 | 129 | method_to_call = getattr(requests, method) 130 | headers = { 131 | 'Content-type': 'application/json', 132 | } 133 | 134 | if not data: 135 | response = method_to_call( 136 | url, headers=headers, **kwargs) 137 | else: 138 | response = method_to_call( 139 | url, data, headers=headers, **kwargs) 140 | return response 141 | 142 | def get_request(self, path): 143 | """ 144 | Makes a GET request to an endpoint on the cluster 145 | :param path: Path part of the URL to make the request to 146 | :type path: String 147 | """ 148 | return self.make_request(path, 'get') 149 | 150 | def delete_request(self, path): 151 | """ 152 | Makes a DELETE request to an endpoint on the cluster 153 | :param path: Path part of the URL to make the request to 154 | :type path: String 155 | """ 156 | return self.make_request(path, 'delete') 157 | 158 | def post_request(self, path, post_data): 159 | """ 160 | Makes a POST request to an endpoint on the cluster 161 | :param path: Path part of the URL to make the request to 162 | :type path: String 163 | """ 164 | return self.make_request(path, 'post', data=post_data) 165 | 166 | def put_request(self, path, put_data=None, **kwargs): 167 | """ 168 | Makes a POST request to an endpoint on the cluster) 169 | :param path: Path part of the URL to make the request to 170 | :type path: String 171 | """ 172 | return self.make_request(path, 'put', data=put_data, **kwargs) 173 | 174 | def get_available_local_port(self): 175 | """ 176 | Gets a random, available local port 177 | """ 178 | sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 179 | sock.bind(('', 0)) 180 | sock.listen(1) 181 | port = sock.getsockname()[1] 182 | sock.close() 183 | return port 184 | -------------------------------------------------------------------------------- /src/tasks/dockerDeploy/acs-kubernetes/clusterinfo.py: -------------------------------------------------------------------------------- 1 | class ClusterInfo(object): 2 | """ 3 | Holds info about the ACS cluster 4 | """ 5 | def __init__(self, host, port, username, password, private_key, api_endpoint, orchestrator): 6 | self.host = host 7 | self.port = port 8 | self.username = username 9 | self.password = password 10 | self.private_key = private_key 11 | self.api_endpoint = api_endpoint 12 | self.orchestrator = orchestrator 13 | 14 | def get_api_endpoint_port(self): 15 | """ 16 | Gets the API endpoint port based on the orchestrator type 17 | """ 18 | if self.orchestrator.lower() == 'kubernetes': 19 | return 8080 20 | elif self.orchestrator.lower() == 'dcos': 21 | return 80 22 | else: 23 | raise ValueError('Invalid orchestrator type') 24 | -------------------------------------------------------------------------------- /src/tasks/dockerDeploy/acs-kubernetes/deploy.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import logging 3 | import sys 4 | import traceback 5 | 6 | import dockercomposeparser 7 | from clusterinfo import ClusterInfo 8 | from registryinfo import RegistryInfo 9 | from groupinfo import GroupInfo 10 | 11 | 12 | class VstsLogFormatter(logging.Formatter): 13 | error_format = logging.Formatter('##[error]%(message)s') 14 | warning_format = logging.Formatter('##[warning]%(message)s') 15 | debug_format = logging.Formatter('##[debug]%(message)s') 16 | default_format = logging.Formatter('%(message)s') 17 | 18 | def format(self, record): 19 | if record.levelno == logging.ERROR: 20 | return self.error_format.format(record) 21 | elif record.levelno == logging.WARNING: 22 | return self.warning_format.format(record) 23 | elif record.levelno == logging.DEBUG: 24 | return self.debug_format.format(record) 25 | return self.default_format.format(record) 26 | 27 | 28 | def get_arg_parser(): 29 | """ 30 | Sets up the argument parser 31 | """ 32 | parser = argparse.ArgumentParser( 33 | description='Translate docker-compose.yml file to marathon.json file', 34 | formatter_class=argparse.ArgumentDefaultsHelpFormatter) 35 | 36 | parser.add_argument('--compose-file', 37 | help='[required] Docker-compose.yml file') 38 | parser.add_argument('--api-endpoint-url', 39 | help='API endpoint URL') 40 | parser.add_argument('--orchestrator', 41 | help='Orchestrator type (DCOS or Kubernetes)') 42 | 43 | parser.add_argument('--group-name', 44 | help='[required] Application group name') 45 | parser.add_argument('--group-qualifier', 46 | help='[required] Application group qualifier') 47 | parser.add_argument('--group-version', 48 | help='[required] Application group version') 49 | parser.add_argument('--deploy-ingress-controller', 50 | help='[required] Should Ingress controller be deployed or not', 51 | dest='deploy_ingress_controller', action='store_true') 52 | 53 | parser.add_argument('--registry-host', 54 | help='Registry host (e.g. myregistry.azurecr-test.io:1234)') 55 | parser.add_argument('--registry-username', 56 | help='Registry username') 57 | parser.add_argument('--registry-password', 58 | help='Registry password') 59 | 60 | parser.add_argument('--acs-host', 61 | help='ACS host') 62 | parser.add_argument('--acs-port', 63 | help='ACS username') 64 | parser.add_argument('--acs-username', 65 | help='ACS username') 66 | parser.add_argument('--acs-password', 67 | help='ACS password') 68 | parser.add_argument('--acs-private-key', 69 | help='ACS private key') 70 | 71 | parser.add_argument('--verbose', 72 | help='Turn on verbose logging', 73 | action='store_true') 74 | return parser 75 | 76 | 77 | def process_arguments(): 78 | """ 79 | Makes sure required arguments are provided 80 | """ 81 | arg_parser = get_arg_parser() 82 | args = arg_parser.parse_args() 83 | 84 | if args.compose_file is None: 85 | arg_parser.error('argument --compose-file is required') 86 | if args.group_name is None: 87 | arg_parser.error('argument --group-name is required') 88 | if args.group_qualifier is None: 89 | arg_parser.error('argument --group-qualifier is required') 90 | if args.group_version is None: 91 | arg_parser.error('argument --group-version is required') 92 | return args 93 | 94 | 95 | def init_logger(verbose): 96 | """ 97 | Initializes the logger and sets the custom formatter for VSTS 98 | """ 99 | logging_level = logging.DEBUG if verbose else logging.INFO 100 | vsts_formatter = VstsLogFormatter() 101 | stream_handler = logging.StreamHandler() 102 | stream_handler.setFormatter(vsts_formatter) 103 | logging.root.name = 'ACS-Deploy' 104 | logging.root.setLevel(logging_level) 105 | logging.root.addHandler(stream_handler) 106 | 107 | # Don't show INFO log messages from requests library 108 | logging.getLogger("requests").setLevel(logging.WARNING) 109 | 110 | if __name__ == '__main__': 111 | arguments = process_arguments() 112 | init_logger(arguments.verbose) 113 | 114 | cluster_info = ClusterInfo( 115 | arguments.acs_host, arguments.acs_port, arguments.acs_username, arguments.acs_password, 116 | arguments.acs_private_key, arguments.api_endpoint_url, arguments.orchestrator) 117 | 118 | registry_info = RegistryInfo( 119 | arguments.registry_host, arguments.registry_username, arguments.registry_password) 120 | 121 | group_info = GroupInfo(arguments.group_name, 122 | arguments.group_qualifier, arguments.group_version) 123 | 124 | try: 125 | with dockercomposeparser.DockerComposeParser( 126 | arguments.compose_file, cluster_info, registry_info, group_info, arguments.deploy_ingress_controller) as compose_parser: 127 | compose_parser.deploy() 128 | sys.exit(0) 129 | except Exception as deployment_exc: 130 | import traceback 131 | traceback.print_exc() 132 | logging.error('Error occurred during deployment: \n%s', deployment_exc) 133 | sys.exit(1) 134 | -------------------------------------------------------------------------------- /src/tasks/dockerDeploy/acs-kubernetes/groupinfo.py: -------------------------------------------------------------------------------- 1 | import hashlib 2 | 3 | class GroupInfo(object): 4 | """ 5 | Holds info about the deployment 6 | """ 7 | def __init__(self, group_name, group_qualifier, group_version): 8 | self.name = group_name 9 | self.qualifier = group_qualifier 10 | self.version = group_version 11 | 12 | def _get_hash(self, input_string): 13 | """ 14 | Gets the hashed string 15 | """ 16 | hash_value = hashlib.sha1(input_string) 17 | digest = hash_value.hexdigest() 18 | return digest 19 | 20 | def get_id(self, include_version=True): 21 | """ 22 | Gets the group id. 23 | .. 24 | """ 25 | hash_qualifier = self._get_hash(self.qualifier)[:8] 26 | 27 | if include_version: 28 | return '{}_{}_{}'.format(self.name, hash_qualifier, self.version) 29 | 30 | return '{}_{}'.format(self.name, hash_qualifier) 31 | 32 | def get_namespace(self): 33 | """ 34 | Gets the value used for service namespace 35 | """ 36 | return '{}-{}'.format(self.name, self.version) 37 | 38 | def get_version(self): 39 | """ 40 | Gets the group version 41 | """ 42 | return self.version 43 | -------------------------------------------------------------------------------- /src/tasks/dockerDeploy/acs-kubernetes/ingress/default-backend-svc.json: -------------------------------------------------------------------------------- 1 | { 2 | "kind": "Service", 3 | "spec": { 4 | "ports": [ 5 | { 6 | "targetPort": 8080, 7 | "protocol": "TCP", 8 | "port": 80, 9 | "name": "default-http-backend" 10 | } 11 | ], 12 | "selector": { 13 | "app": "default-http-backend" 14 | } 15 | }, 16 | "apiVersion": "v1", 17 | "metadata": { 18 | "name": "default-http-backend", 19 | "labels": { 20 | "app": "default-http-backend" 21 | } 22 | } 23 | } -------------------------------------------------------------------------------- /src/tasks/dockerDeploy/acs-kubernetes/ingress/default-backend.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "extensions/v1beta1", 3 | "kind": "Deployment", 4 | "metadata": { 5 | "name": "default-http-backend" 6 | }, 7 | "spec": { 8 | "replicas": 1, 9 | "selector": { 10 | "matchLabels": { 11 | "app": "default-http-backend" 12 | } 13 | }, 14 | "template": { 15 | "metadata": { 16 | "labels": { 17 | "app": "default-http-backend" 18 | } 19 | }, 20 | "spec": { 21 | "terminationGracePeriodSeconds": 60, 22 | "containers": [ 23 | { 24 | "name": "default-http-backend", 25 | "image": "gcr.io/google_containers/defaultbackend:1.0", 26 | "livenessProbe": { 27 | "httpGet": { 28 | "path": "/healthz", 29 | "port": 8080, 30 | "scheme": "HTTP" 31 | }, 32 | "initialDelaySeconds": 30, 33 | "timeoutSeconds": 5 34 | }, 35 | "ports": [ 36 | { 37 | "containerPort": 8080 38 | } 39 | ], 40 | "resources": { 41 | "limits": { 42 | "cpu": "10m", 43 | "memory": "20Mi" 44 | }, 45 | "requests": { 46 | "cpu": "10m", 47 | "memory": "20Mi" 48 | } 49 | } 50 | } 51 | ] 52 | } 53 | } 54 | } 55 | } -------------------------------------------------------------------------------- /src/tasks/dockerDeploy/acs-kubernetes/ingress/nginx-ingress-lb-svc.json: -------------------------------------------------------------------------------- 1 | { 2 | "kind": "Service", 3 | "spec": { 4 | "ports": [ 5 | { 6 | "targetPort": 80, 7 | "protocol": "TCP", 8 | "port": 80, 9 | "name": "http" 10 | }, 11 | { 12 | "targetPort": 443, 13 | "protocol": "TCP", 14 | "port": 443, 15 | "name": "https" 16 | } 17 | ], 18 | "selector": { 19 | "k8s-app": "nginx-ingress-controller" 20 | }, 21 | "type": "LoadBalancer" 22 | }, 23 | "apiVersion": "v1", 24 | "metadata": { 25 | "name": "nginx-ingress-controller", 26 | "labels": { 27 | "k8s-app": "nginx-ingress-controller" 28 | } 29 | } 30 | } -------------------------------------------------------------------------------- /src/tasks/dockerDeploy/acs-kubernetes/ingress/nginx-ingress-lb.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "extensions/v1beta1", 3 | "kind": "Deployment", 4 | "metadata": { 5 | "name": "nginx-ingress-controller", 6 | "labels": { 7 | "k8s-app": "nginx-ingress-controller" 8 | }, 9 | "namespace": "default" 10 | }, 11 | "spec": { 12 | "replicas": 1, 13 | "template": { 14 | "metadata": { 15 | "labels": { 16 | "k8s-app": "nginx-ingress-controller" 17 | } 18 | }, 19 | "spec": { 20 | "terminationGracePeriodSeconds": 60, 21 | "containers": [ 22 | { 23 | "image": "gcr.io/google_containers/nginx-ingress-controller:0.8.3", 24 | "name": "nginx-ingress-controller", 25 | "imagePullPolicy": "Always", 26 | "readinessProbe": { 27 | "httpGet": { 28 | "path": "/healthz", 29 | "port": 10254, 30 | "scheme": "HTTP" 31 | } 32 | }, 33 | "livenessProbe": { 34 | "httpGet": { 35 | "path": "/healthz", 36 | "port": 10254, 37 | "scheme": "HTTP" 38 | }, 39 | "initialDelaySeconds": 10, 40 | "timeoutSeconds": 1 41 | }, 42 | "ports": [ 43 | { 44 | "containerPort": 80, 45 | "hostPort": 80 46 | }, 47 | { 48 | "containerPort": 443, 49 | "hostPort": 443 50 | } 51 | ], 52 | "env": [ 53 | { 54 | "name": "POD_NAME", 55 | "valueFrom": { 56 | "fieldRef": { 57 | "fieldPath": "metadata.name" 58 | } 59 | } 60 | }, 61 | { 62 | "name": "POD_NAMESPACE", 63 | "valueFrom": { 64 | "fieldRef": { 65 | "fieldPath": "metadata.namespace" 66 | } 67 | } 68 | } 69 | ], 70 | "args": [ 71 | "/nginx-ingress-controller", 72 | "--default-backend-service=$(POD_NAMESPACE)/default-http-backend" 73 | ] 74 | } 75 | ] 76 | } 77 | } 78 | } 79 | } -------------------------------------------------------------------------------- /src/tasks/dockerDeploy/acs-kubernetes/ingress_controller.py: -------------------------------------------------------------------------------- 1 | 2 | import os 3 | import json 4 | import logging 5 | import time 6 | 7 | 8 | class IngressController(object): 9 | """ 10 | This class takes care of deploying Nginx Ingress load balancer 11 | and default backend for the load balancer 12 | 13 | Deploying this will expose an IP on Azure load balancer. 14 | """ 15 | 16 | # Sadly, it can take 5 minutes or more for 17 | # Azure to open up a port on LB 18 | external_ip_max_wait_time = 10 * 60 19 | 20 | DEFAULT_NAMESPACE = 'default' 21 | DEFAULT_BACKEND_DEPLOYMENT_FILE = 'ingress/default-backend.json' 22 | DEFAULT_BACKEND_SERVICE_FILE = 'ingress/default-backend-svc.json' 23 | NGINX_INGRESS_DEPLOYMENT_FILE = 'ingress/nginx-ingress-lb.json' 24 | NGINX_INGRESS_SERVICE_FILE = 'ingress/nginx-ingress-lb-svc.json' 25 | 26 | DEFAULT_BACKEND_NAME = 'default-http-backend' 27 | NGINX_INGRESS_LB_NAME = 'nginx-ingress-controller' 28 | 29 | def __init__(self, kubernetes): 30 | self.kubernetes = kubernetes 31 | 32 | def deploy(self, wait_for_external_ip=False): 33 | """ 34 | Deploys the default backend and Nginx Ingress load balacer 35 | if needed 36 | """ 37 | start_timestamp = time.time() 38 | logging.info('Deploying default backend') 39 | self._ensure_default_backend() 40 | logging.info('Deploying Nginx Ingress Load balancer') 41 | self._ensure_nginx_ingress_lb() 42 | 43 | if wait_for_external_ip: 44 | self._wait_for_external_ip(start_timestamp) 45 | 46 | def get_external_ip(self): 47 | """ 48 | Gets the ExternalIP where the Nginx loadbalacer is exposed on 49 | """ 50 | service = self.kubernetes.get_service( 51 | IngressController.NGINX_INGRESS_LB_NAME, IngressController.DEFAULT_NAMESPACE) 52 | external_ip = None 53 | 54 | try: 55 | external_ip = service['status']['loadBalancer']['ingress'][0]['ip'] 56 | except KeyError: 57 | logging.debug('Error getting [status][loadBalancer][ingress]') 58 | return None 59 | 60 | return external_ip 61 | 62 | def _wait_for_external_ip(self, start_timestamp): 63 | """ 64 | Waits for the external IP to become active 65 | """ 66 | ip_obtained = False 67 | timeout_exceeded = False 68 | 69 | logging.info('Waiting for ExternalIP') 70 | while not ip_obtained: 71 | if self._wait_time_exceeded(self.external_ip_max_wait_time, start_timestamp): 72 | timeout_exceeded = True 73 | break 74 | external_ip = self.get_external_ip() 75 | 76 | if external_ip: 77 | ip_obtained = True 78 | break 79 | time.sleep(1) 80 | 81 | if timeout_exceeded: 82 | raise Exception('Timeout exceeded waiting for ExternalIP') 83 | 84 | if ip_obtained: 85 | logging.info('ExternalIP obtained') 86 | 87 | def _ensure_default_backend(self): 88 | """ 89 | Ensures default backed deployment and 90 | service are deployed 91 | """ 92 | self._ensure_service(IngressController.DEFAULT_BACKEND_NAME, 93 | IngressController.DEFAULT_NAMESPACE, 94 | IngressController.DEFAULT_BACKEND_SERVICE_FILE) 95 | self._ensure_deployment(IngressController.DEFAULT_BACKEND_NAME, 96 | IngressController.DEFAULT_NAMESPACE, 97 | IngressController.DEFAULT_BACKEND_DEPLOYMENT_FILE) 98 | 99 | def _ensure_nginx_ingress_lb(self): 100 | """ 101 | Ensures NGINX ingress loadbalancer deployment and 102 | service are deployed 103 | """ 104 | self._ensure_service(IngressController.NGINX_INGRESS_LB_NAME, 105 | IngressController.DEFAULT_NAMESPACE, 106 | IngressController.NGINX_INGRESS_SERVICE_FILE) 107 | self._ensure_deployment(IngressController.NGINX_INGRESS_LB_NAME, 108 | IngressController.DEFAULT_NAMESPACE, 109 | IngressController.NGINX_INGRESS_DEPLOYMENT_FILE) 110 | 111 | def _ensure_service(self, name, namespace, json_file): 112 | """ 113 | Ensures service exists and if not it deploys it 114 | """ 115 | if not self.kubernetes.service_exists(name, namespace): 116 | logging.info('Deploying "%s" service', name) 117 | service_json = self._load_json_from_file(json_file) 118 | self.kubernetes.create_service( 119 | json.dumps(service_json), namespace) 120 | 121 | def _ensure_deployment(self, name, namespace, json_file): 122 | """ 123 | Ensures deployment exists and if not it deploys it 124 | """ 125 | if not self.kubernetes.deployment_exists(name, namespace): 126 | logging.info('Creating deployment "%s"', name) 127 | service_json = self._load_json_from_file(json_file) 128 | self.kubernetes.create_deployment( 129 | json.dumps(service_json), namespace, wait_for_complete=True) 130 | 131 | def _load_json_from_file(self, file_path): 132 | """ 133 | Gets json contents from a file 134 | """ 135 | full_path = os.path.join(os.getcwd(), file_path) 136 | with open(full_path) as json_file: 137 | data = json.load(json_file) 138 | return data 139 | 140 | def _wait_time_exceeded(self, max_wait, timestamp): 141 | """ 142 | Checks if the wait time was exceeded. 143 | """ 144 | return time.time() - timestamp > max_wait 145 | -------------------------------------------------------------------------------- /src/tasks/dockerDeploy/acs-kubernetes/portparser.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | class PortParser(object): 4 | def __init__(self, service_info): 5 | self.service_info = service_info 6 | 7 | def parse_private_ports(self): 8 | """ 9 | Parses the 'expose' key in the docker-compose file and returns a 10 | list of tuples with port numbers. 11 | """ 12 | port_tuple_list = [] 13 | 14 | if 'expose' not in self.service_info: 15 | return port_tuple_list 16 | 17 | for port_entry in self.service_info['expose']: 18 | if self._is_number(port_entry): 19 | port_tuple_list.append((int(port_entry), int(port_entry))) 20 | else: 21 | raise ValueError( 22 | 'Port number "%s" is not a valid number', port_entry) 23 | return port_tuple_list 24 | 25 | def parse_internal_ports(self): 26 | """ 27 | Parses the 'ports' key in the docker-compose file and returns a list of 28 | tuples with port numbers. 29 | """ 30 | port_tuple_list = [] 31 | 32 | if 'ports' not in self.service_info: 33 | return port_tuple_list 34 | 35 | for port_entry in self.service_info['ports']: 36 | if ':' in str(port_entry): 37 | split = port_entry.split(':') 38 | vip_port = split[0] 39 | container_port = split[1] 40 | if self._is_port_range(vip_port) and self._is_port_range(container_port): 41 | # "8080-8090:9080-9090" 42 | if self._are_port_ranges_same_length(vip_port, container_port): 43 | vip_start, vip_end = self._split_port_range(vip_port) 44 | container_start, container_end = self._split_port_range( 45 | container_port) 46 | # vp = vip_port, cp = container_port; we do +1 on the end range to 47 | # include the last port as well 48 | for vp, cp in zip(range(vip_start, vip_end + 1), range(container_start, container_end + 1)): 49 | port_tuple_list.append((int(vp), int(cp))) 50 | else: 51 | raise ValueError('Port ranges "{}" and "{}" are not equal in length', 52 | vip_port, container_port) 53 | else: 54 | # "8080:8080" 55 | if self._is_number(vip_port) and self._is_number(container_port): 56 | port_tuple_list.append( 57 | (int(vip_port), int(container_port))) 58 | else: 59 | # e.g. invalid entry: 8080-8082:9000 60 | raise ValueError( 61 | 'One of the ports is not a valid number or a valid range') 62 | else: 63 | if self._is_port_range(port_entry): 64 | # "3000-3005" 65 | range_start, range_end = self._split_port_range(port_entry) 66 | for i in range(range_start, range_end + 1): 67 | port_tuple_list.append((i, i)) 68 | else: 69 | # "3000" 70 | if self._is_number(port_entry): 71 | port_tuple_list.append( 72 | (int(port_entry), int(port_entry))) 73 | else: 74 | raise ValueError( 75 | 'One of the ports is not a valid number') 76 | return port_tuple_list 77 | 78 | 79 | def get_all_vhosts(self): 80 | """ 81 | Gets a dictionary with all vhosts and their ports 82 | """ 83 | vhost_label = 'com.microsoft.acs.kubernetes.vhost' 84 | vhosts_label = 'com.microsoft.acs.kubernetes.vhosts' 85 | all_vhosts = {} 86 | 87 | if 'labels' not in self.service_info: 88 | return {} 89 | 90 | for label in self.service_info['labels']: 91 | if label.lower() == vhosts_label: 92 | parsed = self._parse_vhost_json(self.service_info['labels'][label]) 93 | all_vhosts = self._merge_dicts(all_vhosts, parsed) 94 | elif label.lower() == vhost_label: 95 | vhost_item = self.service_info['labels'][label] 96 | vhost, port = self._parse_vhost_label(vhost_item) 97 | all_vhosts[vhost] = port 98 | else: 99 | if '=' in label: 100 | split = label.split('=') 101 | if split[0].lower() == vhost_label: 102 | # "vhost='www.contoto.com:80'" 103 | vhost, port = self._parse_vhost_label(split[1]) 104 | all_vhosts[vhost] = port 105 | elif split[0].lower() == vhosts_label: 106 | # "vhosts=['www.blah.com:80','api.blah.com:81']" 107 | parsed = self._parse_vhost_json(split[1].replace("'", '"')) 108 | all_vhosts = self._merge_dicts(all_vhosts, parsed) 109 | return all_vhosts 110 | 111 | def _parse_vhost_label(self, vhost_label): 112 | """ 113 | Parses the vhost label string (host:[port]) and 114 | returns a tuple (host, port) 115 | """ 116 | if not vhost_label: 117 | return None 118 | 119 | vhost = vhost_label 120 | vhost_port = 80 121 | if ':' in vhost_label: 122 | vhost_split = vhost_label.split(':') 123 | vhost = vhost_split[0] 124 | vhost_port = vhost_split[1] 125 | 126 | return vhost, int(vhost_port) 127 | 128 | def _parse_vhost_json(self, vhost_json): 129 | """ 130 | Parse the vhosts JSON value 131 | """ 132 | if not vhost_json: 133 | return None 134 | parsed = {} 135 | 136 | try: 137 | vhost_items = json.loads(vhost_json) 138 | except ValueError: 139 | return parsed 140 | 141 | for item in vhost_items: 142 | vhost, port = self._parse_vhost_label(item) 143 | parsed[vhost] = port 144 | return parsed 145 | 146 | def _is_number(self, input_str): 147 | """ 148 | Checks if the string is a number or not 149 | """ 150 | try: 151 | int(input_str) 152 | return True 153 | except ValueError: 154 | return False 155 | 156 | def _is_port_range(self, port_entry): 157 | """ 158 | Checks if the provided string is a port entry or not 159 | """ 160 | if not port_entry: 161 | return False 162 | 163 | if '-' in str(port_entry) and str(port_entry).count('-') == 1: 164 | split = port_entry.split('-') 165 | first_part = split[0] 166 | second_part = split[1] 167 | return self._is_number(first_part) and self._is_number(second_part) 168 | return False 169 | 170 | def _split_port_range(self, port_range): 171 | """ 172 | Splits a port range and returns a tuple with start and end port 173 | """ 174 | if not self._is_port_range(port_range): 175 | raise ValueError( 176 | 'Provided value "%s" is not a port range', port_range) 177 | split = port_range.split('-') 178 | return (int(split[0]), int(split[1])) 179 | 180 | def _are_port_ranges_same_length(self, first_range, second_range): 181 | """ 182 | Checks if two port ranges are the same length 183 | """ 184 | 185 | if not self._is_port_range(first_range) or not self._is_port_range(second_range): 186 | raise ValueError( 187 | 'At least one of the provided values is not a port range') 188 | 189 | first_split_start, first_split_end = self._split_port_range( 190 | first_range) 191 | second_split_start, second_split_end = self._split_port_range( 192 | second_range) 193 | 194 | return len(range(first_split_start, first_split_end)) == len(range(second_split_start, second_split_end)) 195 | 196 | def _merge_dicts(self, dict_a, dict_b): 197 | """ 198 | Merges two dictionaries 199 | """ 200 | result = dict_a.copy() 201 | result.update(dict_b) 202 | return result -------------------------------------------------------------------------------- /src/tasks/dockerDeploy/acs-kubernetes/registryinfo.py: -------------------------------------------------------------------------------- 1 | import base64 2 | import json 3 | 4 | 5 | class RegistryInfo(object): 6 | """ 7 | Holds info about the Docker registry 8 | """ 9 | 10 | def __init__(self, host, username, password): 11 | self.host = host 12 | self.username = username 13 | self.password = password 14 | 15 | def get_secret_name(self): 16 | """ 17 | Gets the value used for the secret name 18 | """ 19 | return self.host 20 | 21 | def create_secret_json(self): 22 | """ 23 | Creates the JSON with Kubernetes secret object 24 | """ 25 | return json.dumps({ 26 | "apiVersion": "v1", 27 | "kind": "Secret", 28 | "metadata": { 29 | "name": self.host 30 | }, 31 | "data": { 32 | ".dockerconfigjson": self._get_encoded_config() 33 | }, 34 | "type": "kubernetes.io/dockerconfigjson" 35 | }) 36 | 37 | def _get_encoded_config(self): 38 | """ 39 | Gets the config.json contents as an base64 encoded string 40 | """ 41 | config = { 42 | "auths": { 43 | self.host: { 44 | "auth": base64.b64encode(self.username + ':' + self.password) 45 | } 46 | } 47 | } 48 | return base64.b64encode(json.dumps(config)) 49 | -------------------------------------------------------------------------------- /src/tasks/dockerDeploy/acs-kubernetes/requirements.txt: -------------------------------------------------------------------------------- 1 | pyyaml==3.12 2 | requests==2.12.3 3 | sshtunnel==0.1.1 4 | sseclient==0.0.14 5 | -------------------------------------------------------------------------------- /src/tasks/dockerDeploy/dockerComposeConnection.ts: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | import * as del from "del"; 4 | import * as fs from "fs"; 5 | import * as path from "path"; 6 | import * as tl from "vsts-task-lib/task"; 7 | import * as tr from "vsts-task-lib/toolrunner"; 8 | import * as yaml from "js-yaml"; 9 | import DockerConnection from "./dockerConnection"; 10 | 11 | export default class DockerComposeConnection extends DockerConnection { 12 | private dockerComposePath: string; 13 | private dockerComposeFile: string; 14 | private dockerComposeVersion: string; 15 | private additionalDockerComposeFiles: string[]; 16 | private requireAdditionalDockerComposeFiles: boolean; 17 | private projectName: string; 18 | private finalComposeFile: string; 19 | 20 | constructor() { 21 | super(); 22 | this.dockerComposePath = tl.which("docker-compose", true); 23 | this.dockerComposeFile = tl.globFirst(tl.getInput("dockerComposeFile", true)); 24 | if (!this.dockerComposeFile) { 25 | throw new Error("No Docker Compose file matching " + tl.getInput("dockerComposeFile") + " was found."); 26 | } 27 | this.dockerComposeVersion = "2"; 28 | this.additionalDockerComposeFiles = tl.getDelimitedInput("additionalDockerComposeFiles", "\n"); 29 | this.requireAdditionalDockerComposeFiles = tl.getBoolInput("requireAdditionalDockerComposeFiles"); 30 | this.projectName = tl.getInput("projectName"); 31 | } 32 | 33 | public open(hostEndpoint?: string, registryEndpoint?: string): any { 34 | super.open(hostEndpoint, registryEndpoint); 35 | 36 | if (this.hostUrl) { 37 | process.env["DOCKER_HOST"] = this.hostUrl; 38 | process.env["DOCKER_TLS_VERIFY"] = 1; 39 | process.env["DOCKER_CERT_PATH"] = this.certsDir; 40 | } 41 | 42 | tl.getDelimitedInput("dockerComposeFileArgs", "\n").forEach(envVar => { 43 | var tokens = envVar.split("="); 44 | if (tokens.length < 2) { 45 | throw new Error("Environment variable '" + envVar + "' is invalid."); 46 | } 47 | process.env[tokens[0].trim()] = tokens.slice(1).join("=").trim(); 48 | }); 49 | 50 | return this.getImages(true).then(images => { 51 | var qualifyImageNames = tl.getBoolInput("qualifyImageNames"); 52 | if (!qualifyImageNames) { 53 | return; 54 | } 55 | var agentDirectory = tl.getVariable("Agent.HomeDirectory"); 56 | this.finalComposeFile = path.join(agentDirectory, ".docker-compose." + Date.now() + ".yml"); 57 | var services = {}; 58 | if (qualifyImageNames) { 59 | for (var serviceName in images) { 60 | images[serviceName] = this.qualifyImageName(images[serviceName]); 61 | } 62 | } 63 | for (var serviceName in images) { 64 | services[serviceName] = { 65 | image: images[serviceName] 66 | }; 67 | } 68 | fs.writeFileSync(this.finalComposeFile, yaml.safeDump({ 69 | version: this.dockerComposeVersion, 70 | services: services 71 | }, { lineWidth: -1 } as any)); 72 | }); 73 | } 74 | 75 | public createComposeCommand(): tr.ToolRunner { 76 | var command = tl.tool(this.dockerComposePath); 77 | 78 | command.arg(["-f", this.dockerComposeFile]); 79 | 80 | var basePath = path.dirname(this.dockerComposeFile); 81 | this.additionalDockerComposeFiles.forEach(file => { 82 | // If the path is relative, resolve it 83 | if (!path.isAbsolute(file)) { 84 | file = path.join(basePath, file); 85 | } 86 | if (this.requireAdditionalDockerComposeFiles || tl.exist(file)) { 87 | command.arg(["-f", file]); 88 | } 89 | }); 90 | if (this.finalComposeFile) { 91 | command.arg(["-f", this.finalComposeFile]); 92 | } 93 | 94 | if (this.projectName) { 95 | command.arg(["-p", this.projectName]); 96 | } 97 | 98 | return command; 99 | } 100 | 101 | public getCombinedConfig(imageDigestComposeFile?: string): any { 102 | var command = this.createComposeCommand(); 103 | if (imageDigestComposeFile) { 104 | command.arg(["-f", imageDigestComposeFile]); 105 | } 106 | command.arg("config"); 107 | var result = ""; 108 | command.on("stdout", data => { 109 | result += data; 110 | }); 111 | command.on("errline", line => { 112 | tl.error(line); 113 | }); 114 | return command.exec({ silent: true } as any).then(() => result); 115 | } 116 | 117 | public getImages(builtOnly?: boolean): any { 118 | return this.getCombinedConfig().then(input => { 119 | var doc = yaml.safeLoad(input); 120 | if (doc.version) { 121 | this.dockerComposeVersion = doc.version; 122 | } 123 | var projectName = this.projectName; 124 | if (!projectName) { 125 | projectName = path.basename(path.dirname(this.dockerComposeFile)); 126 | } 127 | var images: any = {}; 128 | for (var serviceName in doc.services || {}) { 129 | var service = doc.services[serviceName]; 130 | var image = service.image; 131 | if (!image) { 132 | image = projectName.toLowerCase().replace(/[^0-9a-z]/g, "") + "_" + serviceName; 133 | } 134 | if (!builtOnly || service.build) { 135 | images[serviceName] = image; 136 | } 137 | } 138 | return images; 139 | }); 140 | } 141 | 142 | public getVersion(): string { 143 | return this.dockerComposeVersion; 144 | } 145 | 146 | public close(): void { 147 | if (this.finalComposeFile && tl.exist(this.finalComposeFile)) { 148 | del.sync(this.finalComposeFile, { force: true }); 149 | } 150 | super.close(); 151 | } 152 | } 153 | -------------------------------------------------------------------------------- /src/tasks/dockerDeploy/dockerConnection.ts: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | import * as del from "del"; 4 | import * as fs from "fs"; 5 | import * as path from "path"; 6 | import * as url from "url"; 7 | import * as tl from "vsts-task-lib/task"; 8 | import * as tr from "vsts-task-lib/toolrunner"; 9 | import * as imageUtils from "./dockerImageUtils"; 10 | 11 | export default class DockerConnection { 12 | private dockerPath: string; 13 | protected hostUrl: string; 14 | protected certsDir: string; 15 | private caPath: string; 16 | private certPath: string; 17 | private keyPath: string; 18 | private registryAuth: { [key: string]: string }; 19 | private registryHost: string; 20 | 21 | constructor() { 22 | this.dockerPath = tl.which("docker", true); 23 | } 24 | 25 | public createCommand(): tr.ToolRunner { 26 | var command = tl.tool(this.dockerPath); 27 | if (this.hostUrl) { 28 | command.arg(["-H", this.hostUrl]); 29 | command.arg("--tls"); 30 | command.arg("--tlscacert='" + this.caPath + "'"); 31 | command.arg("--tlscert='" + this.certPath + "'"); 32 | command.arg("--tlskey='" + this.keyPath + "'"); 33 | } 34 | return command; 35 | } 36 | 37 | public execCommand(command: tr.ToolRunner, options?: tr.IExecOptions) { 38 | var errlines = []; 39 | command.on("errline", line => { 40 | errlines.push(line); 41 | }); 42 | return command.exec(options).fail(error => { 43 | errlines.forEach(line => tl.error(line)); 44 | throw error; 45 | }); 46 | } 47 | 48 | public open(hostEndpoint?: string, registryEndpoint?: string): void { 49 | if (hostEndpoint) { 50 | this.hostUrl = tl.getEndpointUrl(hostEndpoint, false); 51 | if (this.hostUrl.charAt(this.hostUrl.length - 1) == "/") { 52 | this.hostUrl = this.hostUrl.substring(0, this.hostUrl.length - 1); 53 | } 54 | 55 | this.certsDir = path.join("", ".dockercerts"); 56 | if (!fs.existsSync(this.certsDir)) { 57 | fs.mkdirSync(this.certsDir); 58 | } 59 | 60 | var authDetails = tl.getEndpointAuthorization(hostEndpoint, false).parameters; 61 | 62 | this.caPath = path.join(this.certsDir, "ca.pem"); 63 | fs.writeFileSync(this.caPath, authDetails["cacert"]); 64 | 65 | this.certPath = path.join(this.certsDir, "cert.pem"); 66 | fs.writeFileSync(this.certPath, authDetails["cert"]); 67 | 68 | this.keyPath = path.join(this.certsDir, "key.pem"); 69 | fs.writeFileSync(this.keyPath, authDetails["key"]); 70 | } 71 | 72 | if (registryEndpoint) { 73 | var command = this.createCommand(); 74 | this.registryAuth = tl.getEndpointAuthorization(registryEndpoint, true).parameters; 75 | if (this.registryAuth) { 76 | command.arg("login"); 77 | command.arg(["-u", this.registryAuth["username"]]); 78 | command.arg(["-p", this.registryAuth["password"]]); 79 | command.arg(this.registryAuth["registry"]); 80 | command.execSync(); 81 | this.registryHost = this.registryAuth["registry"]; 82 | } 83 | } 84 | } 85 | 86 | public qualifyImageName(imageName: string) { 87 | if (!imageUtils.hasRegistryComponent(imageName) && this.registryAuth) { 88 | var regUrl = url.parse(this.registryAuth["registry"]), 89 | hostname = !regUrl.slashes ? regUrl.href : regUrl.host; 90 | if (hostname.toLowerCase() !== "index.docker.io") { 91 | imageName = hostname + "/" + imageName; 92 | } 93 | } 94 | return imageName; 95 | } 96 | 97 | public close(): void { 98 | if (this.registryHost) { 99 | var command = this.createCommand(); 100 | command.arg("logout"); 101 | command.arg(this.registryHost); 102 | command.execSync(); 103 | } 104 | if (this.certsDir && fs.existsSync(this.certsDir)) { 105 | del.sync(this.certsDir); 106 | } 107 | } 108 | } 109 | -------------------------------------------------------------------------------- /src/tasks/dockerDeploy/dockerDeploy.ts: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | import * as tl from "vsts-task-lib/task"; 4 | 5 | // Change to any specified working directory 6 | tl.cd(tl.getInput("cwd")); 7 | 8 | // Run the deployment based on target type 9 | var targetType = tl.getInput("targetType", true); 10 | /* tslint:disable:no-var-requires */ 11 | require({ 12 | "ACS DCOS": "./dockerDeployAcsDcos" 13 | }[targetType]).run() 14 | /* tslint:enable:no-var-requires */ 15 | .fail(function failure(err) { 16 | tl.setResult(tl.TaskResult.Failed, err.message); 17 | }) 18 | .then(function success() { 19 | tl.setResult(tl.TaskResult.Succeeded, ""); 20 | }) 21 | .done(); 22 | -------------------------------------------------------------------------------- /src/tasks/dockerDeploy/dockerDeployAcsDcos.ts: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | import * as del from "del"; 4 | import * as fs from "fs"; 5 | import * as path from "path"; 6 | import * as tl from "vsts-task-lib/task"; 7 | import DockerComposeConnection from "./dockerComposeConnection"; 8 | 9 | var srcPath = path.join(path.dirname(module.filename), "acs-dcos"); 10 | var imageName = "vsts-task-dd7c9344117944a9891b177fbb98b9a7-acs-dcos"; 11 | 12 | function normalizeAppId(id: string) { 13 | // Marathon allows lowercase letters, digits, hyphens, "." and ".." 14 | // We don't handle the complexity of normalizing to the exact regex 15 | return id.toLowerCase().replace(/[^/0-9a-z-\.]/g, ""); 16 | } 17 | 18 | export function run(): any { 19 | var connection = new DockerComposeConnection(), 20 | composeFile: string; 21 | return connection.open(tl.getInput("dockerHostEndpoint"), tl.getInput("dockerRegistryEndpoint")) 22 | .then(() => connection.getCombinedConfig()) 23 | .then(config => { 24 | var registryEndpoint = tl.getInput("dockerRegistryEndpoint"), 25 | registryHost: string, 26 | registryUsername: string, 27 | registryPassword: string; 28 | if (registryEndpoint) { 29 | var registryAuth = tl.getEndpointAuthorization(registryEndpoint, true).parameters; 30 | registryHost = registryAuth["registry"]; 31 | registryUsername = registryAuth["username"]; 32 | registryPassword = registryAuth["password"]; 33 | } 34 | 35 | var endpointType = tl.getInput("acsDcosEndpointType", true), 36 | masterUrl = tl.getInput("acsDcosMasterUrl", endpointType === "Direct"), 37 | sshEndpoint = tl.getInput("acsDcosSshEndpoint", endpointType === "SSH"), 38 | sshHost: string, 39 | sshPort: string, 40 | sshUsername: string, 41 | sshPrivateKey: string, 42 | sshPassword: string; 43 | if (endpointType === "Direct") { 44 | sshEndpoint = null; 45 | } else { 46 | masterUrl = null; 47 | sshHost = tl.getEndpointDataParameter(sshEndpoint, "host", false); 48 | sshPort = tl.getEndpointDataParameter(sshEndpoint, "port", true) || "22"; 49 | sshUsername = tl.getEndpointAuthorizationParameter(sshEndpoint, "username", false); 50 | sshPrivateKey = tl.getEndpointDataParameter(sshEndpoint, "privateKey", true); 51 | sshPassword = tl.getEndpointAuthorizationParameter(sshEndpoint, "password", !!sshPrivateKey); 52 | } 53 | 54 | var appGroupName = normalizeAppId(tl.getInput("acsDcosAppGroupName", true)), 55 | appGroupQualifier = normalizeAppId(tl.getInput("acsDcosAppGroupQualifier", true)), 56 | appGroupVersion = normalizeAppId(tl.getInput("acsDcosAppGroupVersion", true)); 57 | 58 | var minHealthCapacity = parseInt(tl.getInput("acsDcosMinimumHealthCapacity", true)); 59 | if (isNaN(minHealthCapacity)) { 60 | throw new Error("Minimum Health Capacity is not a number."); 61 | } 62 | 63 | var verbose = tl.getVariable("System.Debug"); 64 | 65 | composeFile = path.join(srcPath, ".docker-compose." + Date.now() + ".yml"); 66 | fs.writeFileSync(composeFile, config); 67 | 68 | return connection.execCommand(connection.createCommand() 69 | .arg("build") 70 | .arg(["-f", path.join(srcPath, "Dockerfile.task")]) 71 | .arg(["-t", imageName]) 72 | .arg(srcPath)) 73 | .then(() => connection.createCommand() 74 | .arg("run") 75 | .arg("--rm") 76 | .arg(imageName) 77 | .arg("createmarathon.py") 78 | .arg(["--compose-file", path.basename(composeFile)]) 79 | .arg(masterUrl ? ["--dcos-master-url", masterUrl] : [ 80 | "--acs-host", sshHost, 81 | "--acs-port", sshPort, 82 | "--acs-username", sshUsername, 83 | "--acs-private-key", sshPrivateKey, 84 | "--acs-password", sshPassword 85 | ]) 86 | .arg(registryHost ? [ 87 | "--registry-host", registryHost, 88 | "--registry-username", registryUsername, 89 | "--registry-password", registryPassword 90 | ] : []) 91 | .arg(["--group-name", appGroupName]) 92 | .arg(["--group-qualifier", appGroupQualifier]) 93 | .arg(["--group-version", appGroupVersion]) 94 | .arg(["--minimum-health-capacity", minHealthCapacity.toString()]) 95 | .arg(verbose ? ["--verbose"] : []) 96 | .exec()); 97 | }) 98 | .fin(function cleanup() { 99 | if (composeFile && tl.exist(composeFile)) { 100 | del.sync(composeFile, { force: true }); 101 | } 102 | connection.close(); 103 | }); 104 | } 105 | -------------------------------------------------------------------------------- /src/tasks/dockerDeploy/dockerDeployAcsKube.ts: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | import * as del from "del"; 4 | import * as fs from "fs"; 5 | import * as path from "path"; 6 | import * as tl from "vsts-task-lib/task"; 7 | import DockerComposeConnection from "./dockerComposeConnection"; 8 | 9 | var srcPath = path.join(path.dirname(module.filename), "acs-kubernetes"); 10 | var imageName = "vsts-task-dd7c9344117944a9891b177fbb98b9a7-acs-kubernetes"; 11 | 12 | function normalizeAppId(id: string) { 13 | return id.toLowerCase().replace(/(([^A-Za-z0-9][^-A-Za-z0-9_.]*)?[^A-Za-z0-9])?/g, ""); 14 | } 15 | 16 | export function run(): any { 17 | var connection = new DockerComposeConnection(), 18 | composeFile: string; 19 | return connection.open(tl.getInput("dockerHostEndpoint"), tl.getInput("dockerRegistryEndpoint")) 20 | .then(() => connection.getCombinedConfig()) 21 | .then(config => { 22 | var registryEndpoint = tl.getInput("dockerRegistryEndpoint"), 23 | registryHost: string, 24 | registryUsername: string, 25 | registryPassword: string; 26 | if (registryEndpoint) { 27 | var registryAuth = tl.getEndpointAuthorization(registryEndpoint, true).parameters; 28 | registryHost = registryAuth["registry"]; 29 | registryUsername = registryAuth["username"]; 30 | registryPassword = registryAuth["password"]; 31 | } 32 | 33 | var endpointType = tl.getInput("kubernetesEndpointType", true), 34 | apiEndpointUrl = tl.getInput("kubernetesApiEndpointURL", endpointType === "Direct"), 35 | sshEndpoint = tl.getInput("kubernetesSshEndpoint", endpointType === "SSH"), 36 | sshHost: string, 37 | sshPort: string, 38 | sshUsername: string, 39 | sshPrivateKey: string, 40 | sshPassword: string; 41 | 42 | if (endpointType === "Direct") { 43 | sshEndpoint = null; 44 | } else { 45 | apiEndpointUrl = null; 46 | sshHost = tl.getEndpointDataParameter(sshEndpoint, "host", false); 47 | sshPort = tl.getEndpointDataParameter(sshEndpoint, "port", true) || "22"; 48 | sshUsername = tl.getEndpointAuthorizationParameter(sshEndpoint, "username", false); 49 | sshPrivateKey = tl.getEndpointDataParameter(sshEndpoint, "privateKey", true); 50 | sshPassword = tl.getEndpointAuthorizationParameter(sshEndpoint, "password", !!sshPrivateKey); 51 | } 52 | 53 | var appGroupName = normalizeAppId(tl.getInput("acsDcosAppGroupName", true)), 54 | appGroupQualifier = normalizeAppId(tl.getInput("acsDcosAppGroupQualifier", true)), 55 | appGroupVersion = normalizeAppId(tl.getInput("acsDcosAppGroupVersion", true)); 56 | 57 | var deployIngressController = tl.getInput("kubernetesDeployIngressController", true); 58 | var verbose = tl.getVariable("System.Debug"); 59 | 60 | composeFile = path.join(srcPath, ".docker-compose." + Date.now() + ".yml"); 61 | fs.writeFileSync(composeFile, config); 62 | 63 | return connection.execCommand(connection.createCommand() 64 | .arg("build") 65 | .arg(["-f", path.join(srcPath, "Dockerfile.task")]) 66 | .arg(["-t", imageName]) 67 | .arg(srcPath)) 68 | .then(() => connection.createCommand() 69 | .arg("run") 70 | .arg("--rm") 71 | .arg(imageName) 72 | .arg("deploy.py") 73 | .arg(["--compose-file", path.basename(composeFile)]) 74 | .arg(apiEndpointUrl ? ["--api-endpoint-url", apiEndpointUrl] : [ 75 | "--acs-host", sshHost, 76 | "--acs-port", sshPort, 77 | "--acs-username", sshUsername, 78 | "--acs-private-key", sshPrivateKey, 79 | "--acs-password", sshPassword 80 | ]) 81 | .arg(registryHost ? [ 82 | "--registry-host", registryHost, 83 | "--registry-username", registryUsername, 84 | "--registry-password", registryPassword 85 | ] : []) 86 | .arg(["--group-name", appGroupName]) 87 | .arg(["--group-qualifier", appGroupQualifier]) 88 | .arg(["--group-version", appGroupVersion]) 89 | .arg(["--orchestrator", "Kubernetes"]) 90 | .arg(deployIngressController ? ["--deploy-ingress-controller"] : []) 91 | .arg(verbose ? ["--verbose"] : []) 92 | .exec()); 93 | }) 94 | .fin(function cleanup() { 95 | if (composeFile && tl.exist(composeFile)) { 96 | del.sync(composeFile, { force: true }); 97 | } 98 | connection.close(); 99 | }); 100 | } 101 | -------------------------------------------------------------------------------- /src/tasks/dockerDeploy/dockerImageUtils.ts: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | export function hasRegistryComponent(imageName: string): boolean { 4 | var periodIndex = imageName.indexOf("."), 5 | colonIndex = imageName.indexOf(":"), 6 | slashIndex = imageName.indexOf("/"); 7 | return ((periodIndex > 0 && periodIndex < slashIndex) || 8 | (colonIndex > 0 && colonIndex < slashIndex)); 9 | } 10 | 11 | export function imageNameWithoutTag(imageName: string): string { 12 | var endIndex = 0; 13 | if (hasRegistryComponent(imageName)) { 14 | // Contains a registry component that may include ":", so omit 15 | // this part of the name from the main delimiter determination 16 | endIndex = imageName.indexOf("/"); 17 | } 18 | endIndex = imageName.indexOf(":", endIndex); 19 | return endIndex < 0 ? imageName : imageName.substr(0, endIndex); 20 | } 21 | -------------------------------------------------------------------------------- /src/tasks/dockerDeploy/icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/microsoft/vsts-docker/7b94ccdb581f66968b28efb4b9e4b59dea3e13f8/src/tasks/dockerDeploy/icon.png -------------------------------------------------------------------------------- /tests/tasks/docker/dockerTests.ts: -------------------------------------------------------------------------------- 1 | /// 2 | 3 | import * as dockerRun from "../../../src/tasks/docker/dockerRun"; 4 | 5 | import chai = require("chai"); 6 | import sinon = require("sinon"); 7 | import sinonChai = require("sinon-chai"); 8 | import tl = require("vsts-task-lib/task"); 9 | 10 | chai.should(); 11 | chai.use(sinonChai); 12 | 13 | describe("dockerRun.dockerRun", (): void => { 14 | var sandbox; 15 | var getInputStub; 16 | 17 | beforeEach((): void => { 18 | sandbox = sinon.sandbox.create(); 19 | getInputStub = sandbox.stub(tl, "getInput"); 20 | }); 21 | 22 | afterEach((): void => { 23 | sandbox.restore(); 24 | }); 25 | 26 | it("should pass", (): void => { 27 | }); 28 | }); -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "ES5", 4 | "module": "commonjs" 5 | } 6 | } -------------------------------------------------------------------------------- /tsd.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "v4", 3 | "repo": "borisyankov/DefinitelyTyped", 4 | "ref": "master", 5 | "path": "typings", 6 | "bundle": "typings/tsd.d.ts", 7 | "installed": { 8 | "mocha/mocha.d.ts": { 9 | "commit": "e69fe60f2d6377ea4fae539493997b098f52cad1" 10 | }, 11 | "node/node.d.ts": { 12 | "commit": "81ebfb3d08cd608a7c801668a6d5f7b2ea9ecc38" 13 | }, 14 | "shelljs/shelljs.d.ts": { 15 | "commit": "3030a4be536b6530c06b80081f1333dc0de4d703" 16 | }, 17 | "sinon/sinon.d.ts": { 18 | "commit": "95b7178e0ef33b4b88327676418b4cc3ad61df0f" 19 | }, 20 | "assertion-error/assertion-error.d.ts": { 21 | "commit": "95b7178e0ef33b4b88327676418b4cc3ad61df0f" 22 | }, 23 | "chai/chai.d.ts": { 24 | "commit": "95b7178e0ef33b4b88327676418b4cc3ad61df0f" 25 | }, 26 | "sinon-chai/sinon-chai.d.ts": { 27 | "commit": "95b7178e0ef33b4b88327676418b4cc3ad61df0f" 28 | }, 29 | "del/del.d.ts": { 30 | "commit": "81ebfb3d08cd608a7c801668a6d5f7b2ea9ecc38" 31 | }, 32 | "glob/glob.d.ts": { 33 | "commit": "81ebfb3d08cd608a7c801668a6d5f7b2ea9ecc38" 34 | }, 35 | "minimatch/minimatch.d.ts": { 36 | "commit": "81ebfb3d08cd608a7c801668a6d5f7b2ea9ecc38" 37 | } 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /tslint.json: -------------------------------------------------------------------------------- 1 | { 2 | "rules": { 3 | "class-name": true, 4 | "comment-format": [true, 5 | "check-space" 6 | ], 7 | "indent": [true, 8 | "spaces" 9 | ], 10 | "one-line": [true, 11 | "check-open-brace", 12 | "check-whitespace" 13 | ], 14 | "no-var-requires": true, 15 | "no-var-keyword": false, 16 | "quotemark": [true, 17 | "double" 18 | ], 19 | "semicolon": true, 20 | "whitespace": [true, 21 | "check-branch", 22 | "check-operator", 23 | "check-separator", 24 | "check-type", 25 | "check-module" 26 | ], 27 | "typedef-whitespace": [true, { 28 | "call-signature": "nospace", 29 | "index-signature": "nospace", 30 | "parameter": "nospace", 31 | "property-declaration": "nospace", 32 | "variable-declaration": "nospace" 33 | }], 34 | "no-internal-module": true, 35 | "no-trailing-whitespace": true, 36 | "no-inferrable-types": true 37 | } 38 | } -------------------------------------------------------------------------------- /typings/assertion-error/assertion-error.d.ts: -------------------------------------------------------------------------------- 1 | // Type definitions for assertion-error 1.0.0 2 | // Project: https://github.com/chaijs/assertion-error 3 | // Definitions by: Bart van der Schoor 4 | // Definitions: https://github.com/borisyankov/DefinitelyTyped 5 | 6 | declare module 'assertion-error' { 7 | class AssertionError implements Error { 8 | constructor(message: string, props?: any, ssf?: Function); 9 | name: string; 10 | message: string; 11 | showDiff: boolean; 12 | stack: string; 13 | } 14 | export = AssertionError; 15 | } 16 | -------------------------------------------------------------------------------- /typings/del/del.d.ts: -------------------------------------------------------------------------------- 1 | // Type definitions for del v2.2.0 2 | // Project: https://github.com/sindresorhus/del 3 | // Definitions by: Asana , Aya Morisawa 4 | // Definitions: https://github.com/borisyankov/DefinitelyTyped 5 | 6 | /// 7 | 8 | declare module "del" { 9 | import glob = require("glob"); 10 | 11 | function Del(pattern: string): Promise; 12 | function Del(pattern: string, options: Del.Options): Promise; 13 | 14 | function Del(patterns: string[]): Promise; 15 | function Del(patterns: string[], options: Del.Options): Promise; 16 | 17 | module Del { 18 | function sync(pattern: string, options?: Options): string[]; 19 | function sync(patterns: string[], options?: Options): string[]; 20 | 21 | interface Options extends glob.IOptions { 22 | force?: boolean; 23 | dryRun?: boolean; 24 | } 25 | } 26 | 27 | export = Del; 28 | } 29 | -------------------------------------------------------------------------------- /typings/glob/glob.d.ts: -------------------------------------------------------------------------------- 1 | // Type definitions for Glob 5.0.10 2 | // Project: https://github.com/isaacs/node-glob 3 | // Definitions by: vvakame 4 | // Definitions: https://github.com/borisyankov/DefinitelyTyped 5 | 6 | /// 7 | /// 8 | 9 | declare module "glob" { 10 | 11 | import events = require("events"); 12 | import fs = require('fs'); 13 | import minimatch = require("minimatch"); 14 | 15 | function G(pattern: string, cb: (err: Error, matches: string[]) => void): void; 16 | function G(pattern: string, options: G.IOptions, cb: (err: Error, matches: string[]) => void): void; 17 | 18 | module G { 19 | function sync(pattern: string, options?: IOptions): string[]; 20 | 21 | function hasMagic(pattern: string, options?: IOptions): boolean; 22 | 23 | var Glob: IGlobStatic; 24 | var GlobSync: IGlobSyncStatic; 25 | 26 | interface IOptions extends minimatch.IOptions { 27 | cwd?: string; 28 | root?: string; 29 | dot?: boolean; 30 | nomount?: boolean; 31 | mark?: boolean; 32 | nosort?: boolean; 33 | stat?: boolean; 34 | silent?: boolean; 35 | strict?: boolean; 36 | cache?: { [path: string]: any /* boolean | string | string[] */ }; 37 | statCache?: { [path: string]: fs.Stats }; 38 | symlinks?: any; 39 | sync?: boolean; 40 | nounique?: boolean; 41 | nonull?: boolean; 42 | debug?: boolean; 43 | nobrace?: boolean; 44 | noglobstar?: boolean; 45 | noext?: boolean; 46 | nocase?: boolean; 47 | matchBase?: any; 48 | nodir?: boolean; 49 | ignore?: any; /* string | string[] */ 50 | follow?: boolean; 51 | realpath?: boolean; 52 | nonegate?: boolean; 53 | nocomment?: boolean; 54 | 55 | /** Deprecated. */ 56 | globDebug?: boolean; 57 | } 58 | 59 | interface IGlobStatic extends events.EventEmitter { 60 | new (pattern: string, cb?: (err: Error, matches: string[]) => void): IGlob; 61 | new (pattern: string, options: IOptions, cb?: (err: Error, matches: string[]) => void): IGlob; 62 | prototype: IGlob; 63 | } 64 | 65 | interface IGlobSyncStatic { 66 | new (pattern: string, options?: IOptions): IGlobBase 67 | prototype: IGlobBase; 68 | } 69 | 70 | interface IGlobBase { 71 | minimatch: minimatch.IMinimatch; 72 | options: IOptions; 73 | aborted: boolean; 74 | cache: { [path: string]: any /* boolean | string | string[] */ }; 75 | statCache: { [path: string]: fs.Stats }; 76 | symlinks: { [path: string]: boolean }; 77 | realpathCache: { [path: string]: string }; 78 | found: string[]; 79 | } 80 | 81 | interface IGlob extends IGlobBase, events.EventEmitter { 82 | pause(): void; 83 | resume(): void; 84 | abort(): void; 85 | 86 | /** Deprecated. */ 87 | EOF: any; 88 | /** Deprecated. */ 89 | paused: boolean; 90 | /** Deprecated. */ 91 | maxDepth: number; 92 | /** Deprecated. */ 93 | maxLength: number; 94 | /** Deprecated. */ 95 | changedCwd: boolean; 96 | /** Deprecated. */ 97 | cwd: string; 98 | /** Deprecated. */ 99 | root: string; 100 | /** Deprecated. */ 101 | error: any; 102 | /** Deprecated. */ 103 | matches: string[]; 104 | /** Deprecated. */ 105 | log(...args: any[]): void; 106 | /** Deprecated. */ 107 | emitMatch(m: any): void; 108 | } 109 | } 110 | 111 | export = G; 112 | } 113 | -------------------------------------------------------------------------------- /typings/js-yaml/js-yaml.d.ts: -------------------------------------------------------------------------------- 1 | // Type definitions for js-yaml 3.5.2 2 | // Project: https://github.com/nodeca/js-yaml 3 | // Definitions by: Bart van der Schoor , Sebastian Clausen 4 | // Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped 5 | 6 | declare namespace jsyaml { 7 | export function safeLoad(str: string, opts?: LoadOptions): any; 8 | export function load(str: string, opts?: LoadOptions): any; 9 | 10 | export class Type implements TypeConstructorOptions { 11 | constructor(tag: string, opts?: TypeConstructorOptions); 12 | tag: string; 13 | } 14 | export class Schema { 15 | constructor(definition: SchemaDefinition); 16 | public static create(args: any[]): Schema; 17 | } 18 | 19 | export function safeLoadAll(str: string, iterator: (doc: any) => void, opts?: LoadOptions): any; 20 | export function loadAll(str: string, iterator: (doc: any) => void, opts?: LoadOptions): any; 21 | 22 | export function safeDump(obj: any, opts?: DumpOptions): string; 23 | export function dump(obj: any, opts?: DumpOptions): string; 24 | 25 | export interface LoadOptions { 26 | // string to be used as a file path in error/warning messages. 27 | filename?: string; 28 | // makes the loader to throw errors instead of warnings. 29 | strict?: boolean; 30 | // specifies a schema to use. 31 | schema?: any; 32 | } 33 | 34 | export interface DumpOptions { 35 | // indentation width to use (in spaces). 36 | indent?: number; 37 | // do not throw on invalid types (like function in the safe schema) and skip pairs and single values with such types. 38 | skipInvalid?: boolean; 39 | // specifies level of nesting, when to switch from block to flow style for collections. -1 means block style everwhere 40 | flowLevel?: number; 41 | // Each tag may have own set of styles. - "tag" => "style" map. 42 | styles?: Object; 43 | // specifies a schema to use. 44 | schema?: any; 45 | } 46 | 47 | export interface TypeConstructorOptions { 48 | kind?: string; 49 | resolve?: Function; 50 | construct?: Function; 51 | instanceOf?: Object; 52 | predicate?: string; 53 | represent?: Function; 54 | defaultStyle?: string; 55 | styleAliases?: Object; 56 | } 57 | 58 | export interface SchemaDefinition { 59 | implicit?: any[]; 60 | explicit?: any[]; 61 | include?: any[]; 62 | } 63 | 64 | // only strings, arrays and plain objects: http://www.yaml.org/spec/1.2/spec.html#id2802346 65 | export var FAILSAFE_SCHEMA: any; 66 | // only strings, arrays and plain objects: http://www.yaml.org/spec/1.2/spec.html#id2802346 67 | export var JSON_SCHEMA: any; 68 | // same as JSON_SCHEMA: http://www.yaml.org/spec/1.2/spec.html#id2804923 69 | export var CORE_SCHEMA: any; 70 | // all supported YAML types, without unsafe ones (!!js/undefined, !!js/regexp and !!js/function): http://yaml.org/type/ 71 | export var DEFAULT_SAFE_SCHEMA: any; 72 | // all supported YAML types. 73 | export var DEFAULT_FULL_SCHEMA: any; 74 | export var MINIMAL_SCHEMA: any; 75 | export var SAFE_SCHEMA: any; 76 | 77 | export class YAMLException extends Error { 78 | constructor(reason?: any, mark?: any); 79 | toString(compact?: boolean): string; 80 | } 81 | } 82 | 83 | declare module 'js-yaml' { 84 | export = jsyaml; 85 | } 86 | -------------------------------------------------------------------------------- /typings/minimatch/minimatch.d.ts: -------------------------------------------------------------------------------- 1 | // Type definitions for Minimatch 2.0.8 2 | // Project: https://github.com/isaacs/minimatch 3 | // Definitions by: vvakame 4 | // Definitions: https://github.com/borisyankov/DefinitelyTyped 5 | 6 | declare module "minimatch" { 7 | 8 | function M(target: string, pattern: string, options?: M.IOptions): boolean; 9 | 10 | module M { 11 | function match(list: string[], pattern: string, options?: IOptions): string[]; 12 | function filter(pattern: string, options?: IOptions): (element: string, indexed: number, array: string[]) => boolean; 13 | function makeRe(pattern: string, options?: IOptions): RegExp; 14 | 15 | var Minimatch: IMinimatchStatic; 16 | 17 | interface IOptions { 18 | debug?: boolean; 19 | nobrace?: boolean; 20 | noglobstar?: boolean; 21 | dot?: boolean; 22 | noext?: boolean; 23 | nocase?: boolean; 24 | nonull?: boolean; 25 | matchBase?: boolean; 26 | nocomment?: boolean; 27 | nonegate?: boolean; 28 | flipNegate?: boolean; 29 | } 30 | 31 | interface IMinimatchStatic { 32 | new (pattern: string, options?: IOptions): IMinimatch; 33 | prototype: IMinimatch; 34 | } 35 | 36 | interface IMinimatch { 37 | pattern: string; 38 | options: IOptions; 39 | /** 2-dimensional array of regexp or string expressions. */ 40 | set: any[][]; // (RegExp | string)[][] 41 | regexp: RegExp; 42 | negate: boolean; 43 | comment: boolean; 44 | empty: boolean; 45 | 46 | makeRe(): RegExp; // regexp or boolean 47 | match(fname: string): boolean; 48 | matchOne(files: string[], pattern: string[], partial: boolean): boolean; 49 | 50 | /** Deprecated. For internal use. */ 51 | debug(): void; 52 | /** Deprecated. For internal use. */ 53 | make(): void; 54 | /** Deprecated. For internal use. */ 55 | parseNegate(): void; 56 | /** Deprecated. For internal use. */ 57 | braceExpand(pattern: string, options: IOptions): void; 58 | /** Deprecated. For internal use. */ 59 | parse(pattern: string, isSub?: boolean): void; 60 | } 61 | } 62 | 63 | export = M; 64 | } 65 | -------------------------------------------------------------------------------- /typings/mocha/mocha.d.ts: -------------------------------------------------------------------------------- 1 | // Type definitions for mocha 2.2.5 2 | // Project: http://mochajs.org/ 3 | // Definitions by: Kazi Manzur Rashid , otiai10 , jt000 , Vadim Macagon 4 | // Definitions: https://github.com/borisyankov/DefinitelyTyped 5 | 6 | interface MochaSetupOptions { 7 | //milliseconds to wait before considering a test slow 8 | slow?: number; 9 | 10 | // timeout in milliseconds 11 | timeout?: number; 12 | 13 | // ui name "bdd", "tdd", "exports" etc 14 | ui?: string; 15 | 16 | //array of accepted globals 17 | globals?: any[]; 18 | 19 | // reporter instance (function or string), defaults to `mocha.reporters.Spec` 20 | reporter?: any; 21 | 22 | // bail on the first test failure 23 | bail?: boolean; 24 | 25 | // ignore global leaks 26 | ignoreLeaks?: boolean; 27 | 28 | // grep string or regexp to filter tests with 29 | grep?: any; 30 | } 31 | 32 | interface MochaDone { 33 | (error?: Error): void; 34 | } 35 | 36 | declare var mocha: Mocha; 37 | declare var describe: Mocha.IContextDefinition; 38 | declare var xdescribe: Mocha.IContextDefinition; 39 | // alias for `describe` 40 | declare var context: Mocha.IContextDefinition; 41 | // alias for `describe` 42 | declare var suite: Mocha.IContextDefinition; 43 | declare var it: Mocha.ITestDefinition; 44 | declare var xit: Mocha.ITestDefinition; 45 | // alias for `it` 46 | declare var test: Mocha.ITestDefinition; 47 | 48 | declare function before(action: () => void): void; 49 | 50 | declare function before(action: (done: MochaDone) => void): void; 51 | 52 | declare function before(description: string, action: () => void): void; 53 | 54 | declare function before(description: string, action: (done: MochaDone) => void): void; 55 | 56 | declare function setup(action: () => void): void; 57 | 58 | declare function setup(action: (done: MochaDone) => void): void; 59 | 60 | declare function after(action: () => void): void; 61 | 62 | declare function after(action: (done: MochaDone) => void): void; 63 | 64 | declare function after(description: string, action: () => void): void; 65 | 66 | declare function after(description: string, action: (done: MochaDone) => void): void; 67 | 68 | declare function teardown(action: () => void): void; 69 | 70 | declare function teardown(action: (done: MochaDone) => void): void; 71 | 72 | declare function beforeEach(action: () => void): void; 73 | 74 | declare function beforeEach(action: (done: MochaDone) => void): void; 75 | 76 | declare function beforeEach(description: string, action: () => void): void; 77 | 78 | declare function beforeEach(description: string, action: (done: MochaDone) => void): void; 79 | 80 | declare function suiteSetup(action: () => void): void; 81 | 82 | declare function suiteSetup(action: (done: MochaDone) => void): void; 83 | 84 | declare function afterEach(action: () => void): void; 85 | 86 | declare function afterEach(action: (done: MochaDone) => void): void; 87 | 88 | declare function afterEach(description: string, action: () => void): void; 89 | 90 | declare function afterEach(description: string, action: (done: MochaDone) => void): void; 91 | 92 | declare function suiteTeardown(action: () => void): void; 93 | 94 | declare function suiteTeardown(action: (done: MochaDone) => void): void; 95 | 96 | declare class Mocha { 97 | constructor(options?: { 98 | grep?: RegExp; 99 | ui?: string; 100 | reporter?: string; 101 | timeout?: number; 102 | bail?: boolean; 103 | }); 104 | 105 | /** Setup mocha with the given options. */ 106 | setup(options: MochaSetupOptions): Mocha; 107 | bail(value?: boolean): Mocha; 108 | addFile(file: string): Mocha; 109 | /** Sets reporter by name, defaults to "spec". */ 110 | reporter(name: string): Mocha; 111 | /** Sets reporter constructor, defaults to mocha.reporters.Spec. */ 112 | reporter(reporter: (runner: Mocha.IRunner, options: any) => any): Mocha; 113 | ui(value: string): Mocha; 114 | grep(value: string): Mocha; 115 | grep(value: RegExp): Mocha; 116 | invert(): Mocha; 117 | ignoreLeaks(value: boolean): Mocha; 118 | checkLeaks(): Mocha; 119 | /** 120 | * Function to allow assertion libraries to throw errors directly into mocha. 121 | * This is useful when running tests in a browser because window.onerror will 122 | * only receive the 'message' attribute of the Error. 123 | */ 124 | throwError(error: Error): void; 125 | /** Enables growl support. */ 126 | growl(): Mocha; 127 | globals(value: string): Mocha; 128 | globals(values: string[]): Mocha; 129 | useColors(value: boolean): Mocha; 130 | useInlineDiffs(value: boolean): Mocha; 131 | timeout(value: number): Mocha; 132 | slow(value: number): Mocha; 133 | enableTimeouts(value: boolean): Mocha; 134 | asyncOnly(value: boolean): Mocha; 135 | noHighlighting(value: boolean): Mocha; 136 | /** Runs tests and invokes `onComplete()` when finished. */ 137 | run(onComplete?: (failures: number) => void): Mocha.IRunner; 138 | } 139 | 140 | // merge the Mocha class declaration with a module 141 | declare module Mocha { 142 | /** Partial interface for Mocha's `Runnable` class. */ 143 | interface IRunnable { 144 | title: string; 145 | fn: Function; 146 | async: boolean; 147 | sync: boolean; 148 | timedOut: boolean; 149 | } 150 | 151 | /** Partial interface for Mocha's `Suite` class. */ 152 | interface ISuite { 153 | parent: ISuite; 154 | title: string; 155 | 156 | fullTitle(): string; 157 | } 158 | 159 | /** Partial interface for Mocha's `Test` class. */ 160 | interface ITest extends IRunnable { 161 | parent: ISuite; 162 | pending: boolean; 163 | 164 | fullTitle(): string; 165 | } 166 | 167 | /** Partial interface for Mocha's `Runner` class. */ 168 | interface IRunner {} 169 | 170 | interface IContextDefinition { 171 | (description: string, spec: () => void): ISuite; 172 | only(description: string, spec: () => void): ISuite; 173 | skip(description: string, spec: () => void): void; 174 | timeout(ms: number): void; 175 | } 176 | 177 | interface ITestDefinition { 178 | (expectation: string, assertion?: () => void): ITest; 179 | (expectation: string, assertion?: (done: MochaDone) => void): ITest; 180 | only(expectation: string, assertion?: () => void): ITest; 181 | only(expectation: string, assertion?: (done: MochaDone) => void): ITest; 182 | skip(expectation: string, assertion?: () => void): void; 183 | skip(expectation: string, assertion?: (done: MochaDone) => void): void; 184 | timeout(ms: number): void; 185 | } 186 | 187 | export module reporters { 188 | export class Base { 189 | stats: { 190 | suites: number; 191 | tests: number; 192 | passes: number; 193 | pending: number; 194 | failures: number; 195 | }; 196 | 197 | constructor(runner: IRunner); 198 | } 199 | 200 | export class Doc extends Base {} 201 | export class Dot extends Base {} 202 | export class HTML extends Base {} 203 | export class HTMLCov extends Base {} 204 | export class JSON extends Base {} 205 | export class JSONCov extends Base {} 206 | export class JSONStream extends Base {} 207 | export class Landing extends Base {} 208 | export class List extends Base {} 209 | export class Markdown extends Base {} 210 | export class Min extends Base {} 211 | export class Nyan extends Base {} 212 | export class Progress extends Base { 213 | /** 214 | * @param options.open String used to indicate the start of the progress bar. 215 | * @param options.complete String used to indicate a complete test on the progress bar. 216 | * @param options.incomplete String used to indicate an incomplete test on the progress bar. 217 | * @param options.close String used to indicate the end of the progress bar. 218 | */ 219 | constructor(runner: IRunner, options?: { 220 | open?: string; 221 | complete?: string; 222 | incomplete?: string; 223 | close?: string; 224 | }); 225 | } 226 | export class Spec extends Base {} 227 | export class TAP extends Base {} 228 | export class XUnit extends Base { 229 | constructor(runner: IRunner, options?: any); 230 | } 231 | } 232 | } 233 | 234 | declare module "mocha" { 235 | export = Mocha; 236 | } 237 | -------------------------------------------------------------------------------- /typings/sinon-chai/sinon-chai.d.ts: -------------------------------------------------------------------------------- 1 | // Type definitions for sinon-chai 2.7.0 2 | // Project: https://github.com/domenic/sinon-chai 3 | // Definitions by: Kazi Manzur Rashid , Jed Mao 4 | // Definitions: https://github.com/borisyankov/DefinitelyTyped 5 | 6 | /// 7 | /// 8 | 9 | declare module Chai { 10 | 11 | interface LanguageChains { 12 | always: Assertion; 13 | } 14 | 15 | interface Assertion { 16 | /** 17 | * true if the spy was called at least once. 18 | */ 19 | called: Assertion; 20 | /** 21 | * @param count The number of recorded calls. 22 | */ 23 | callCount(count: number): Assertion; 24 | /** 25 | * true if the spy was called exactly once. 26 | */ 27 | calledOnce: Assertion; 28 | /** 29 | * true if the spy was called exactly twice. 30 | */ 31 | calledTwice: Assertion; 32 | /** 33 | * true if the spy was called exactly thrice. 34 | */ 35 | calledThrice: Assertion; 36 | /** 37 | * Returns true if the spy was called before anotherSpy. 38 | */ 39 | calledBefore(anotherSpy: Sinon.SinonSpy): Assertion; 40 | /** 41 | * Returns true if the spy was called after anotherSpy. 42 | */ 43 | calledAfter(anotherSpy: Sinon.SinonSpy): Assertion; 44 | /** 45 | * Returns true if spy/stub was called with the new operator. Beware that 46 | * this is inferred based on the value of the this object and the spy 47 | * function's prototype, so it may give false positives if you actively 48 | * return the right kind of object. 49 | */ 50 | calledWithNew: Assertion; 51 | /** 52 | * Returns true if context was this for this call. 53 | */ 54 | calledOn(context: any): Assertion; 55 | /** 56 | * Returns true if call received provided arguments (and possibly others). 57 | */ 58 | calledWith(...args: any[]): Assertion; 59 | /** 60 | * Returns true if call received provided arguments and no others. 61 | */ 62 | calledWithExactly(...args: any[]): Assertion; 63 | /** 64 | * Returns true if call received matching arguments (and possibly others). 65 | * This behaves the same as spyCall.calledWith(sinon.match(arg1), sinon.match(arg2), ...). 66 | */ 67 | calledWithMatch(...args: any[]): Assertion; 68 | /** 69 | * Returns true if spy returned the provided value at least once. Uses 70 | * deep comparison for objects and arrays. Use spy.returned(sinon.match.same(obj)) 71 | * for strict comparison (see matchers). 72 | */ 73 | returned(obj: any): Assertion; 74 | /** 75 | * Returns true if spy threw the provided exception object at least once. 76 | */ 77 | thrown(obj?: Error|typeof Error|string): Assertion; 78 | } 79 | } 80 | 81 | declare module "sinon-chai" { 82 | function sinonChai(chai: any, utils: any): void; 83 | namespace sinonChai { } 84 | export = sinonChai; 85 | } 86 | -------------------------------------------------------------------------------- /typings/tsd.d.ts: -------------------------------------------------------------------------------- 1 | /// 2 | /// 3 | /// 4 | /// 5 | /// 6 | /// 7 | /// 8 | /// 9 | /// 10 | /// 11 | /// 12 | --------------------------------------------------------------------------------