├── functions ├── host.json ├── CreatePool │ ├── sample.dat │ ├── function.json │ └── index.js ├── GetPoolInfo │ ├── sample.dat │ ├── function.json │ └── index.js ├── CreateJob │ ├── sample.dat │ ├── function.json │ └── index.js ├── SetAutoScale │ ├── sample.dat │ ├── function.json │ └── index.js ├── EvaluateAutoScale │ ├── sample.dat │ ├── function.json │ ├── autoScaleEvaluator.js │ └── index.js ├── AddTasks │ ├── sample.dat │ ├── function.json │ └── index.js ├── example.settings.json ├── ListJobs │ ├── function.json │ └── index.js └── helpers │ └── helpers.js ├── docker ├── processing │ ├── requirements.txt │ └── processing.py ├── shipyard │ ├── runshipyardcmd.sh │ └── configs │ │ ├── jobs.json │ │ ├── config.json │ │ ├── credentials.json │ │ └── pool.json ├── Dockerfile └── docker_install_start_task.sh ├── .gitattributes ├── .deployment ├── Assets └── storage-keys-howto.png ├── tests ├── run-integration-test.sh ├── test-runner.js ├── autoscaleEvaluator.test.js └── evaluateautoscale.test.js ├── package.json ├── LICENSE ├── .gitignore ├── README.md └── deploy.cmd /functions/host.json: -------------------------------------------------------------------------------- 1 | { } -------------------------------------------------------------------------------- /docker/processing/requirements.txt: -------------------------------------------------------------------------------- 1 | requests==2.17.3 -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | docker/docker_install_start_task.sh eol=lf -------------------------------------------------------------------------------- /functions/CreatePool/sample.dat: -------------------------------------------------------------------------------- 1 | { 2 | "poolid": "001" 3 | } -------------------------------------------------------------------------------- /functions/GetPoolInfo/sample.dat: -------------------------------------------------------------------------------- 1 | { 2 | "poolid": "001" 3 | } -------------------------------------------------------------------------------- /.deployment: -------------------------------------------------------------------------------- 1 | [config] 2 | project = functions 3 | command = deploy.cmd -------------------------------------------------------------------------------- /functions/CreateJob/sample.dat: -------------------------------------------------------------------------------- 1 | { 2 | "poolid": "001", 3 | "jobid": "000" 4 | } -------------------------------------------------------------------------------- /functions/SetAutoScale/sample.dat: -------------------------------------------------------------------------------- 1 | { 2 | "poolid": "shipyard", 3 | "maxNodes": 4 4 | } -------------------------------------------------------------------------------- /functions/EvaluateAutoScale/sample.dat: -------------------------------------------------------------------------------- 1 | { 2 | "poolid": "shipyard", 3 | "maxNodes": 4 4 | } -------------------------------------------------------------------------------- /Assets/storage-keys-howto.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tikyau/azure-batch-functions/master/Assets/storage-keys-howto.png -------------------------------------------------------------------------------- /tests/run-integration-test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ## run function command from the root of the app. 4 | func run ../functions/$1 -f ../functions/$1/sample.dat -------------------------------------------------------------------------------- /docker/shipyard/runshipyardcmd.sh: -------------------------------------------------------------------------------- 1 | docker run --entrypoint=/bin/sh --rm -it -v %CD%:/configs -e SHIPYARD_CONFIGDIR=/configs alfpark/batch-shipyard:cli-latest 2 | -------------------------------------------------------------------------------- /functions/AddTasks/sample.dat: -------------------------------------------------------------------------------- 1 | { 2 | "jobid": "000", 3 | "forcePull": true, 4 | "imageName": "jsturtevant/pyprocessor", 5 | "makeTasksRandom": true 6 | } -------------------------------------------------------------------------------- /docker/shipyard/configs/jobs.json: -------------------------------------------------------------------------------- 1 | { 2 | "job_specifications": [ 3 | { 4 | "id": "shipjob", 5 | "tasks": [ ] 6 | } 7 | ] 8 | } -------------------------------------------------------------------------------- /docker/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:2.7.13-alpine 2 | 3 | WORKDIR /usr/src/app 4 | COPY ./processing/ . 5 | 6 | RUN pip install -r requirements.txt 7 | 8 | ENTRYPOINT ["python","processing.py"] 9 | -------------------------------------------------------------------------------- /docker/shipyard/configs/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "batch_shipyard": { 3 | "storage_account_settings": "mystorageaccount" 4 | }, 5 | "global_resources": { 6 | "docker_images": [ 7 | "jsturtevant/pyprocessor" 8 | ] 9 | } 10 | } -------------------------------------------------------------------------------- /functions/example.settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "IsEncrypted": false, 3 | "Values": { 4 | "batchAccountKey":"", 5 | "batchAccountName":"yourbatchname", 6 | "batchAccountUrl":"https://yourbatchname.eastus.batch.azure.com", 7 | "storageConnectionString": "your-connectionstring" 8 | } 9 | } -------------------------------------------------------------------------------- /functions/AddTasks/function.json: -------------------------------------------------------------------------------- 1 | { 2 | "disabled": false, 3 | "bindings": [ 4 | { 5 | "authLevel": "function", 6 | "type": "httpTrigger", 7 | "direction": "in", 8 | "name": "req" 9 | }, 10 | { 11 | "type": "http", 12 | "direction": "out", 13 | "name": "res" 14 | } 15 | ] 16 | } -------------------------------------------------------------------------------- /functions/CreatePool/function.json: -------------------------------------------------------------------------------- 1 | { 2 | "disabled": false, 3 | "bindings": [ 4 | { 5 | "authLevel": "function", 6 | "type": "httpTrigger", 7 | "direction": "in", 8 | "name": "req" 9 | }, 10 | { 11 | "type": "http", 12 | "direction": "out", 13 | "name": "res" 14 | } 15 | ] 16 | } -------------------------------------------------------------------------------- /functions/GetPoolInfo/function.json: -------------------------------------------------------------------------------- 1 | { 2 | "disabled": false, 3 | "bindings": [ 4 | { 5 | "authLevel": "function", 6 | "type": "httpTrigger", 7 | "direction": "in", 8 | "name": "req" 9 | }, 10 | { 11 | "type": "http", 12 | "direction": "out", 13 | "name": "res" 14 | } 15 | ] 16 | } -------------------------------------------------------------------------------- /functions/ListJobs/function.json: -------------------------------------------------------------------------------- 1 | { 2 | "disabled": false, 3 | "bindings": [ 4 | { 5 | "authLevel": "function", 6 | "type": "httpTrigger", 7 | "direction": "in", 8 | "name": "req" 9 | }, 10 | { 11 | "type": "http", 12 | "direction": "out", 13 | "name": "res" 14 | } 15 | ] 16 | } -------------------------------------------------------------------------------- /functions/SetAutoScale/function.json: -------------------------------------------------------------------------------- 1 | { 2 | "disabled": false, 3 | "bindings": [ 4 | { 5 | "authLevel": "function", 6 | "type": "httpTrigger", 7 | "direction": "in", 8 | "name": "req" 9 | }, 10 | { 11 | "type": "http", 12 | "direction": "out", 13 | "name": "res" 14 | } 15 | ] 16 | } -------------------------------------------------------------------------------- /functions/EvaluateAutoScale/function.json: -------------------------------------------------------------------------------- 1 | { 2 | "disabled": false, 3 | "bindings": [ 4 | { 5 | "authLevel": "function", 6 | "type": "httpTrigger", 7 | "direction": "in", 8 | "name": "req" 9 | }, 10 | { 11 | "type": "http", 12 | "direction": "out", 13 | "name": "res" 14 | } 15 | ] 16 | } -------------------------------------------------------------------------------- /functions/CreateJob/function.json: -------------------------------------------------------------------------------- 1 | { 2 | "bindings": [ 3 | { 4 | "authLevel": "function", 5 | "type": "httpTrigger", 6 | "direction": "in", 7 | "name": "req", 8 | "methods": [ 9 | "post" 10 | ] 11 | }, 12 | { 13 | "type": "http", 14 | "direction": "out", 15 | "name": "res" 16 | } 17 | ], 18 | "disabled": false 19 | } -------------------------------------------------------------------------------- /docker/shipyard/configs/credentials.json: -------------------------------------------------------------------------------- 1 | { 2 | "credentials": { 3 | "batch": { 4 | "account_key": "", 5 | "account_service_url": "https://youraccount.eastus.batch.azure.com" 6 | }, 7 | "storage": { 8 | "mystorageaccount": { 9 | "account": "", 10 | "account_key": "", 11 | "endpoint": "core.windows.net" 12 | } 13 | } 14 | } 15 | } -------------------------------------------------------------------------------- /docker/docker_install_start_task.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | set -o pipefail 5 | 6 | sudo apt-get -y install \ 7 | apt-transport-https \ 8 | ca-certificates \ 9 | curl \ 10 | software-properties-common 11 | 12 | curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - 13 | 14 | sudo apt-key fingerprint 0EBFCD88 15 | 16 | sudo add-apt-repository \ 17 | "deb [arch=amd64] https://download.docker.com/linux/ubuntu \ 18 | $(lsb_release -cs) \ 19 | stable" 20 | 21 | sudo apt-get update 22 | sudo apt-get -y install docker-ce 23 | -------------------------------------------------------------------------------- /docker/shipyard/configs/pool.json: -------------------------------------------------------------------------------- 1 | { 2 | "pool_specification": { 3 | "id": "shipyard", 4 | "vm_configuration": { 5 | "platform_image": { 6 | "publisher": "Canonical", 7 | "offer": "UbuntuServer", 8 | "sku": "16.04-LTS" 9 | } 10 | }, 11 | "vm_size": "STANDARD_A1", 12 | "vm_count": { 13 | "dedicated": 0, 14 | "low_priority": 2 15 | }, 16 | "reboot_on_start_task_failed": false, 17 | "block_until_all_global_resources_loaded": true 18 | } 19 | } -------------------------------------------------------------------------------- /docker/processing/processing.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import requests 3 | 4 | parser = argparse.ArgumentParser(description='Process some integers.') 5 | parser.add_argument('track', metavar='N', help='pass any value') 6 | args = parser.parse_args() 7 | track = args.track 8 | 9 | print('processing request for track: ' + track) 10 | r = requests.get('http://freemusicarchive.org/services/track/single/{0}.json'.format(track)) 11 | track_info = r.json() 12 | 13 | track_file_url = track_info['track_file_url'] 14 | track_handle = track_info['track_handle'] 15 | 16 | print('downloading track file url: ' + track_file_url) 17 | print('Saving as file: ' + track_handle) 18 | 19 | r = requests.get(track_file_url) 20 | with open(track_handle, 'wb') as f: 21 | f.write(r.content) 22 | 23 | print('file download complete') -------------------------------------------------------------------------------- /functions/GetPoolInfo/index.js: -------------------------------------------------------------------------------- 1 | var batch = require('azure-batch'); 2 | var helpers = require('../helpers/helpers.js'); 3 | 4 | module.exports = function (context, req) { 5 | context.log('processing...'); 6 | 7 | var batch_client = helpers.batchClientFactory(); 8 | 9 | var poolid = "pool" + req.body.poolid; 10 | 11 | context.log(`getting info for ${poolid}`); 12 | batch_client.pool.get(poolid).then((poolinfo) => { 13 | context.log(`pool state: ${poolinfo.state}`); 14 | if(poolinfo.state == "active") 15 | { 16 | console.log("Pool is active"); 17 | } 18 | 19 | context.done(); 20 | }).catch(err => { 21 | context.log('An error occurred.'); 22 | context.log(err); 23 | context.done(); 24 | }); 25 | 26 | }; -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "batch-functions-example", 3 | "version": "1.0.0", 4 | "description": "Nodejs project working with docker and Azure Batch", 5 | "main": "index.js", 6 | "scripts": { 7 | "test": "node_modules/.bin/tape tests/**/*.test.js" 8 | }, 9 | "keywords": [], 10 | "author": "James Sturtevant", 11 | "license": "MIT", 12 | "dependencies": { 13 | "azure-batch": "^2.0.0-preview", 14 | "azure-storage": "^2.1.0", 15 | "validator": "^7.1.0" 16 | }, 17 | "repository": "https://github.com/jsturtevant/azure-batch-functions", 18 | "README": "https://github.com/jsturtevant/azure-batch-functions/blob/master/README.md", 19 | "devDependencies": { 20 | "azure-functions-node-harness": "github:jsturtevant/azure-functions-node-harness#dev", 21 | "tape": "^4.6.3", 22 | "tape-catch": "^1.0.6", 23 | "testdouble": "^3.2.1" 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /tests/test-runner.js: -------------------------------------------------------------------------------- 1 | var test = require('tape'); 2 | var path = require('path'); 3 | var fs = require('fs'); 4 | 5 | test.createStream().pipe(process.stdout); 6 | 7 | var testDir = process.argv[2]; 8 | 9 | // https://stackoverflow.com/a/21459809/697126 10 | var getAllFilesFromFolder = function(dir) { 11 | var results = []; 12 | 13 | fs.readdirSync(dir).forEach(function(file) { 14 | 15 | file = dir+'/'+file; 16 | var stat = fs.statSync(file); 17 | 18 | if (stat && stat.isDirectory()) { 19 | results = results.concat(getAllFilesFromFolder(file)) 20 | } else results.push(file); 21 | 22 | }); 23 | 24 | return results; 25 | 26 | }; 27 | 28 | var testFiles = getAllFilesFromFolder(testDir).filter(x => {return x.endsWith('.test.js')}); 29 | 30 | testFiles.forEach(file => { 31 | console.log('running test file: ' + file); 32 | var pathToModule = path.resolve(file); 33 | require(pathToModule); 34 | }); 35 | 36 | -------------------------------------------------------------------------------- /functions/ListJobs/index.js: -------------------------------------------------------------------------------- 1 | var batch = require('azure-batch'); 2 | var helpers = require('../helpers/helpers.js') 3 | 4 | module.exports = function (context, req) { 5 | context.log('processing...'); 6 | 7 | var batch_client = helpers.batchClientFactory(); 8 | 9 | let options = {} 10 | options.jobListOptions = { maxResults : 10 }; 11 | 12 | batch_client.job.list(options).then((result) => { 13 | context.log(result); 14 | 15 | loop(result.odatanextLink, batch_client.job.listNext).then(() => { 16 | context.log('complete'); 17 | context.done(); 18 | }); 19 | 20 | }).catch((err) => { 21 | context.log('An error occurred.'); 22 | context.log(err); 23 | context.done(); 24 | }); 25 | }; 26 | 27 | function loop(nextLink, callNext) { 28 | if (nextLink !== null && nextLink !== undefined) { 29 | return callNext(nextLink).then((res) => { 30 | context.log(res); 31 | return loop(res.odatanextLink, callNext); 32 | }); 33 | } 34 | 35 | return Promise.resolve(); 36 | }; -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 James Sturtevant 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | bin 3 | obj 4 | csx 5 | .vs 6 | edge 7 | Publish 8 | .vscode 9 | 10 | *.user 11 | *.suo 12 | *.cscfg 13 | *.Cache 14 | project.lock.json 15 | 16 | /packages 17 | /TestResults 18 | 19 | /tools/NuGet.exe 20 | /App_Data 21 | /secrets 22 | /data 23 | .secrets 24 | appsettings.json 25 | local.settings.json 26 | 27 | # Logs 28 | logs 29 | *.log 30 | npm-debug.log* 31 | yarn-debug.log* 32 | yarn-error.log* 33 | 34 | # Runtime data 35 | pids 36 | *.pid 37 | *.seed 38 | *.pid.lock 39 | 40 | # Directory for instrumented libs generated by jscoverage/JSCover 41 | lib-cov 42 | 43 | # Coverage directory used by tools like istanbul 44 | coverage 45 | 46 | # nyc test coverage 47 | .nyc_output 48 | 49 | # Grunt intermediate storage (http://gruntjs.com/creating-plugins#storing-task-files) 50 | .grunt 51 | 52 | # Bower dependency directory (https://bower.io/) 53 | bower_components 54 | 55 | # node-waf configuration 56 | .lock-wscript 57 | 58 | # Compiled binary addons (http://nodejs.org/api/addons.html) 59 | build/Release 60 | 61 | # Dependency directories 62 | node_modules/ 63 | jspm_packages/ 64 | 65 | # Typescript v1 declaration files 66 | typings/ 67 | 68 | # Optional npm cache directory 69 | .npm 70 | 71 | # Optional eslint cache 72 | .eslintcache 73 | 74 | # Optional REPL history 75 | .node_repl_history 76 | 77 | # Output of 'npm pack' 78 | *.tgz 79 | 80 | # Yarn Integrity file 81 | .yarn-integrity 82 | 83 | # dotenv environment variables file 84 | .env 85 | 86 | -------------------------------------------------------------------------------- /functions/CreateJob/index.js: -------------------------------------------------------------------------------- 1 | var batch = require('azure-batch'); 2 | var helpers = require('../helpers/helpers.js'); 3 | 4 | module.exports = function (context, req) { 5 | context.log('processing...'); 6 | 7 | var batch_client = helpers.batchClientFactory(); 8 | 9 | var poolid = "pool" + req.body.poolid; 10 | var jobid = "job" + req.body.jobid; 11 | 12 | batch_client.pool.exists(poolid).then(exists => { 13 | if (!exists){ 14 | context.res = { 15 | status: 404, 16 | body: "poolid does not exist" 17 | }; 18 | 19 | context.done(); 20 | } 21 | 22 | var pool_config = {poolId:poolid}; 23 | 24 | //pre-load image for fast task runs times 25 | var job_prep_task_config = { 26 | id: "installprereq", 27 | commandLine: "docker pull jsturtevant/pyprocessor", 28 | userIdentity: { 29 | autoUser: { 30 | elevationLevel: 'admin' 31 | } 32 | }, 33 | waitForSuccess: true 34 | }; 35 | 36 | var job_config = { 37 | id:jobid, 38 | displayName:"process audio files", 39 | jobPreparationTask:job_prep_task_config, 40 | poolInfo:pool_config 41 | } 42 | 43 | var job = batch_client.job.add(job_config).then(_ => { 44 | context.log('Added Job'); 45 | context.done(); 46 | }).catch((err) => { 47 | context.log('An error occurred.'); 48 | context.log(err); 49 | context.done(); 50 | }); 51 | }); 52 | }; -------------------------------------------------------------------------------- /functions/AddTasks/index.js: -------------------------------------------------------------------------------- 1 | var batch = require('azure-batch'); 2 | var helpers = require('../helpers/helpers.js'); 3 | var crypto = require('crypto'); 4 | 5 | module.exports = function (context, req) { 6 | context.log('processing...'); 7 | 8 | var batch_client = helpers.batchClientFactory(); 9 | 10 | // Create a unique Azure Batch pool ID 11 | var jobid = "job" + req.body.jobid; 12 | var forcePull = req.body.forcePull; 13 | var imageName = req.body.imageName; 14 | var makeTasksRandom = req.body.makeTasksRandom; 15 | 16 | // track id for http://freemusicarchive.org/music/Monplaisir/Free_To_Use/Monplaisir_-_Free_To_Use_-_13_Free_To_Use_13 17 | // http://freemusicarchive.org/services/track/single/152622.json 18 | var tasksToAdd = ["152622"] 19 | 20 | var tasks = []; 21 | tasksToAdd.forEach(function(val,index){ 22 | var taskName = `track_${val}`; 23 | if (makeTasksRandom){ 24 | // https://stackoverflow.com/a/14869745 25 | var id = crypto.randomBytes(10).toString('hex'); 26 | taskName = taskName.concat("_",id); 27 | } 28 | 29 | var commands = [`docker run ${imageName} ${val}`]; 30 | if (forcePull){ 31 | // don't know which node this will be run on so force on all tasks. 32 | commands.unshift(`docker pull ${imageName}`) 33 | } 34 | 35 | var taskConfig = { 36 | id: taskName, 37 | displayName: taskName, 38 | commandLine: helpers.wrapInShell(commands), 39 | userIdentity: { 40 | autoUser: { 41 | elevationLevel: 'admin' 42 | } 43 | }, 44 | }; 45 | 46 | context.log(`adding task ${taskName} to list`); 47 | tasks.push(taskConfig); 48 | }); 49 | 50 | batch_client.task.addCollection(jobid, tasks).then((tc) => { 51 | context.log(`added collection of tasks`); 52 | context.log(tc); 53 | //todo handle each task completion independently. 54 | 55 | context.done(); 56 | }).catch(err => { 57 | context.log(`An error occurred processing...`); 58 | context.log(err); 59 | context.done(); 60 | }); 61 | }; 62 | 63 | -------------------------------------------------------------------------------- /functions/CreatePool/index.js: -------------------------------------------------------------------------------- 1 | var batch = require('azure-batch'); 2 | var helpers = require('../helpers/helpers.js'); 3 | 4 | module.exports = function (context, req) { 5 | context.log('processing...'); 6 | 7 | var batch_client = helpers.batchClientFactory(); 8 | 9 | // Create a unique Azure Batch pool ID 10 | var poolid = "pool" + req.body.poolid; 11 | 12 | context.log(`Creating new pool ${poolid}...`); 13 | batch_client.account.listNodeAgentSkus().then((agentNodes) => { 14 | context.log(agentNodes); 15 | 16 | var agentNode = agentNodes.filter(x => x.id === 'batch.node.ubuntu 16.04')[0]; 17 | var verifiedImage = agentNode.verifiedImageReferences[0]; 18 | 19 | var vmconfig = {imageReference:verifiedImage, 20 | nodeAgentSKUId:"batch.node.ubuntu 16.04"}; 21 | var vmSize = "STANDARD_A1"; 22 | var numVMs = 2; 23 | 24 | var sastoken = helpers.generateSasToken("azurebatch", "docker_install_start_task.sh") 25 | 26 | var poolConfig = { 27 | id: poolid, 28 | displayName: poolid, 29 | vmSize: vmSize, 30 | virtualMachineConfiguration: vmconfig, 31 | targetDedicatedNodes: numVMs, 32 | targetLowPriorityNodes: numVMs, 33 | startTask: { 34 | commandLine: "./docker_install_start_task.sh > startup.log", 35 | resourceFiles: [{ 36 | blobSource: sastoken.uri, 37 | filePath: 'docker_install_start_task.sh' 38 | }], 39 | userIdentity: { 40 | autoUser: { 41 | elevationLevel: 'admin' 42 | } 43 | }, 44 | waitForSuccess: true 45 | }, 46 | enableAutoScale: false 47 | }; 48 | 49 | batch_client.pool.exists(poolid).then(exists => { 50 | if (exists){ 51 | context.log("already exists"); 52 | context.done(); 53 | } 54 | 55 | batch_client.pool.add(poolConfig).then(() =>{ 56 | context.log('pool added.') 57 | context.done(); 58 | }); 59 | }); 60 | }).catch((err) => { 61 | context.log('An error occurred.'); 62 | context.log(err); 63 | context.done(); 64 | }); 65 | }; -------------------------------------------------------------------------------- /functions/EvaluateAutoScale/autoScaleEvaluator.js: -------------------------------------------------------------------------------- 1 | const moment = require('moment'); 2 | 3 | module.exports = class AutoScaleEvaluator { 4 | constructor(pool){ 5 | this.pool = pool; 6 | } 7 | 8 | evaluateAutoScale(poolId, maxNodes){ 9 | return this.pool.get(poolId).then((poolInfo) => { 10 | return isAutoScaleEnabled(poolInfo); 11 | }).then(poolInfo => { 12 | return ensureAutoScaleSet(batch_client, poolInfo); 13 | }).then(_ => { 14 | return executeEvaluateAutoScale(poolId, maxNodes); 15 | }); 16 | } 17 | 18 | isAutoScaleEnabled(poolInfo){ 19 | if(poolInfo.state != "active") 20 | { 21 | return Promise.reject({code: "notActive"}); 22 | } 23 | 24 | return Promise.resolve(poolInfo); 25 | } 26 | 27 | ensureAutoScaleSet(poolInfo){ 28 | if (poolInfo.enableAutoScale == true) 29 | { 30 | return Promise.resolve(); 31 | } 32 | 33 | var autoScaleProperties ={ 34 | autoScaleFormula: `$TargetLowPriorityNodes = ${poolInfo.currentLowPriorityNodes};`, 35 | autoScaleEvaluationInterval: moment.duration(5, 'minutes') 36 | }; 37 | 38 | //enable if first and set to current. 39 | return this.pool.enableAutoScale(poolInfo.poolId, autoScaleProperties) 40 | } 41 | 42 | executeEvaluateAutoScale(poolId, maxNodes){ 43 | var myFormula = `maxNodes = ${maxNodes}; 44 | 45 | // Get pending tasks for the past 15 minutes. 46 | // If we have fewer than 70 percent data points, we use the last sample point, 47 | // otherwise we use the maximum of last sample point and the history average. 48 | $samples = $ActiveTasks.GetSamplePercent(TimeInterval_Minute * 15); 49 | $tasks = $samples < 70 ? max(0,$ActiveTasks.GetSample(1)) : max( $ActiveTasks.GetSample(1), avg($ActiveTasks.GetSample(TimeInterval_Minute * 15))); 50 | 51 | // If number of pending tasks is not 0, set targetVM to pending tasks, otherwise 52 | // half of current LowPriority. 53 | // The pool size is capped at maxNodes (4), if target VM value is more than that, set it 54 | // to maxNodes. This value should be adjusted according to your use case. 55 | $targetVMs = $tasks > 0? $tasks:max(0, $TargetLowPriorityNodes/2); 56 | 57 | $TargetLowPriorityNodes = max(0, min($targetVMs, maxNodes));`; 58 | 59 | return this.pool.evaluateAutoScale(poolId, myFormula); 60 | } 61 | 62 | } -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # azure-batch-functions 2 | Nodejs project working with Docker and [Azure Batch](https://azure.microsoft.com/en-us/services/batch/). 3 | 4 | ## Developing 5 | 6 | ### Azure Batch 7 | Get the [Azure Cli 2.0](https://docs.microsoft.com/en-us/cli/azure/overview) or use ```docker run -it azuresdk/azure-cli-python```. 8 | 9 | Set you subscription: 10 | 11 | ``` 12 | az login #follow the instructions 13 | az account list 14 | az account set --subscription . 15 | ``` 16 | 17 | Create azure batch: 18 | 19 | ``` 20 | az group create --name azurebatchfunctionsrg --location eastus 21 | az provider register -n Microsoft.Batch 22 | az provider show -n Microsoft.Batch -o table #run a few times and wait until it says registered 23 | az batch account create -l eastus -g azurebatchfunctionsrg -n myazurebatch 24 | ``` 25 | 26 | ### Azure functions Component 27 | To learn about [testing and debuging Azure functions locally read the docs](https://docs.microsoft.com/en-us/azure/azure-functions/functions-run-local). To get started with this project: 28 | 29 | 1. Install azure functions runtime ```npm i -g azure-functions-core-tools``` 30 | 2. Clone the repo and cd into the folder 31 | 3. Run ```npm install``` 32 | 4. Run ```npm test``` to run unit tests 33 | 5. Rename ```example.settings.json``` to ```local.settings.json``` 34 | 6. Upload [docker_install_start_task.sh](docker/docker_install_start_task.sh) to a storage account. 35 | 7. Update settings in ```local.settings.json```: 36 | 37 | - get your batch keys by running ```az batch account keys list -g dockerworkshop -n workshopbatch``` 38 | - get the storage account key 39 | 40 | ![get your storage keys in azure portal](/Assets/storage-keys-howto.png) 41 | 42 | ### Test function 43 | The following command will start the function host process in separate terminal and call the function ```GetPoolInfo``` passing the json object as body to request. For any of the other functions replace the name (some don't need the sample data as well). 44 | 45 | ``` 46 | func run GetPoolInfo -f GetPoolInfo/sample.dat 47 | ``` 48 | 49 | #### Add a new function 50 | 51 | ``` 52 | func new --language JavaScript --template HttpTrigger --name NameOfFunction 53 | ``` 54 | 55 | ## Other info 56 | Find the docs for the node.js library at http://azure.github.io/azure-sdk-for-node/azure-batch/latest/ 57 | 58 | Azure Batch has a useful (if early release) of a cross platform Batch view at https://github.com/Azure/BatchLabs. As the project is very young there are a few missing features and you have to build the project yourself. 59 | -------------------------------------------------------------------------------- /functions/EvaluateAutoScale/index.js: -------------------------------------------------------------------------------- 1 | const batch = require('azure-batch'), 2 | helpers = require('../helpers/helpers'), 3 | validator = require('validator'), 4 | os = require("os"), 5 | AutoScaleEvaluator = require('./autoScaleEvaluator'); 6 | 7 | module.exports = function (context, req) { 8 | context.log('processing...'); 9 | 10 | const batch_client = helpers.batchClientFactory(); 11 | 12 | if (!req.body.poolid || validator.isEmpty(req.body.poolid)) { 13 | context.log("Invalid response"); 14 | context.res = { status: 400, body: 'must pass poolid' }; 15 | context.done(); 16 | return; 17 | } 18 | 19 | if (!req.body.maxNodes || validator.isEmpty(req.body.maxNodes.toString()) || !validator.isNumeric(req.body.maxNodes.toString())) { 20 | context.log("Invalid response"); 21 | context.res = { status: 400, body: 'must pass maxNodes' }; 22 | context.done(); 23 | return; 24 | } 25 | 26 | const poolId = req.body.poolid; 27 | const maxNodes = req.body.maxNodes; 28 | 29 | let evaluator = new AutoScaleEvaluator(batch_client.pool); 30 | 31 | evaluator.evaluateAutoScale(poolId, maxNodes).then(evalResult => { 32 | context.log("Auto Scale Results:"); 33 | context.log(evalResult.results.replace("\\$/gi", os.EOL + "\t$")); 34 | 35 | const extractedResults = extractResults(evalResult.results); 36 | 37 | context.res = { 38 | status: 200, 39 | body: { 40 | TargetDedicatedNodes: extractedResults.dedicatedNodes, 41 | TargetLowPriorityNodes: extractedResults.lowPriorityNodes, 42 | Raw: evalResult.results 43 | } 44 | }; 45 | 46 | context.done(); 47 | }).catch(err => { 48 | context.log('An error occurred.'); 49 | printErrors(context, err); 50 | context.res = { status: 500 }; 51 | context.done(); 52 | }); 53 | }; 54 | 55 | function extractResults(results){ 56 | const dedicatedNodes = results.match("\\$TargetDedicatedNodes=([0-9]{0,});"); 57 | const lowPriorityNodes = results.match("\\$TargetLowPriorityNodes=([0-9]{0,});"); 58 | 59 | return { 60 | dedicatedNodes: (dedicatedNodes && dedicatedNodes.length > 1) ? parseInt(dedicatedNodes[1]) : 0, 61 | lowPriorityNodes: (lowPriorityNodes && lowPriorityNodes.length) > 1 ? parseInt(lowPriorityNodes[1]) :0 62 | } 63 | } 64 | 65 | 66 | function printErrors(context, err) { 67 | if (err.body) { 68 | context.log(err.body.code); 69 | context.log(err.body.message); 70 | context.log(err.body.values); 71 | } else if (err.code && err.code === "notActive") { 72 | context.log("pool is not active"); 73 | } else { 74 | context.log(err); 75 | } 76 | } -------------------------------------------------------------------------------- /functions/helpers/helpers.js: -------------------------------------------------------------------------------- 1 | var batch = require('azure-batch'); 2 | var azure = require('azure-storage'); 3 | 4 | module.exports = { 5 | batchClientFactory: function () { 6 | var accountName = process.env.batchAccountName; 7 | var accountKey = process.env.batchAccountKey; 8 | var accountUrl = process.env.batchAccountUrl; 9 | 10 | var credentials = new batch.SharedKeyCredentials(accountName, accountKey); 11 | return new batch.ServiceClient(credentials, accountUrl); 12 | }, 13 | 14 | /** 15 | * Wait for all promises. https://stackoverflow.com/a/31424853 16 | * 17 | * Promise.all(promises.map(helpers.reflect)).then(function(results){ 18 | * var success = results.filter(x => x.status === "resolved"); 19 | * }); 20 | * 21 | * @param {Promise} promise 22 | * @return {Array} 23 | */ 24 | reflect: function (promise) { 25 | return promise.then(function (v) { return { v: v, status: "resolved" } }, 26 | function (e) { return { e: e, status: "rejected" } }); 27 | }, 28 | 29 | /** 30 | * An HTTP trigger Azure Function that returns a SAS token for Azure Storage for the specified container. 31 | * You can also optionally specify a particular blob name and access permissions. 32 | * 33 | * Modified from: 34 | * The MIT License (MIT) Copyright (c) 2015 Microsoft Corporation 35 | * To learn more, see https://github.com/Azure-Samples/functions-dotnet-sas-token/blob/master/README.md 36 | */ 37 | generateSasToken: function generateSasToken(container, blobName, permissions) { 38 | var connString = process.env.storageConnectionString; 39 | var blobService = azure.createBlobService(connString); 40 | 41 | // Create a SAS token that expires in an hour 42 | // Set start time to five minutes ago to avoid clock skew. 43 | var startDate = new Date(); 44 | startDate.setMinutes(startDate.getMinutes() - 5); 45 | var expiryDate = new Date(startDate); 46 | expiryDate.setHours(startDate.getHours() + 12); 47 | 48 | permissions = permissions || azure.BlobUtilities.SharedAccessPermissions.READ; 49 | 50 | var sharedAccessPolicy = { 51 | AccessPolicy: { 52 | Permissions: permissions, 53 | Start: startDate, 54 | Expiry: expiryDate 55 | } 56 | }; 57 | 58 | var sasToken = blobService.generateSharedAccessSignature(container, blobName, sharedAccessPolicy); 59 | 60 | return { 61 | token: sasToken, 62 | uri: blobService.getUrl(container, blobName, sasToken, true) 63 | }; 64 | }, 65 | 66 | /** 67 | * Wrap commands in shell. 68 | * 69 | * https://github.com/Azure/azure-batch-samples/tree/master/Python/Batch#azure-batch-on-linux-best-practices 70 | */ 71 | wrapInShell: function(commands){ 72 | return `/bin/bash -c \'set -e; set -o pipefail; ${commands.join(';')}; wait\'`; 73 | } 74 | } -------------------------------------------------------------------------------- /functions/SetAutoScale/index.js: -------------------------------------------------------------------------------- 1 | var batch = require('azure-batch'); 2 | var helpers = require('../helpers/helpers.js'); 3 | var os = require("os"); 4 | var moment = require('moment'); 5 | 6 | module.exports = function (context, req) { 7 | context.log('processing...'); 8 | 9 | var batch_client = helpers.batchClientFactory(); 10 | 11 | var poolid = req.body.poolid; 12 | var maxNodes = req.body.maxNodes; 13 | 14 | batch_client.pool.get(poolid).then((poolinfo) => { 15 | context.log(`pool state: ${poolinfo.state}`); 16 | 17 | if(poolinfo.state != "active") 18 | { 19 | console.log("Pool is not active"); 20 | context.done(); 21 | } 22 | 23 | if (poolinfo.enableAutoScale == false) 24 | { 25 | context.log('Auto Scale is not enabled. You can enable it via Evaluate endpoint (which forces you to test results).'); 26 | context.done(); 27 | } 28 | 29 | if (poolinfo.enableAutoScale == true) 30 | { 31 | context.log('Auto Scale is enabled.'); 32 | 33 | var autoScaleProperties ={ 34 | autoScaleFormula: autoScaleFormula(maxNodes), 35 | autoScaleEvaluationInterval: moment.duration(5, 'minutes') 36 | }; 37 | 38 | //enable if first and set to current. 39 | batch_client.pool.enableAutoScale(poolid, autoScaleProperties).then((result) =>{ 40 | context.log('Auto Scale set.'); 41 | context.log(result); 42 | context.done(); 43 | }).catch(err => { 44 | context.log('An error occurred.'); 45 | 46 | if (err.body){ 47 | context.log(err.body.code); 48 | context.log(err.body.message); 49 | 50 | context.log(err.body.values); 51 | }else{ 52 | context.log(err); 53 | } 54 | 55 | context.done(); 56 | }); 57 | } 58 | 59 | }).catch(err => { 60 | context.log('An error occurred.'); 61 | context.log(err); 62 | context.done(); 63 | }); 64 | 65 | }; 66 | 67 | function autoScaleFormula(maxNodes){ 68 | return `maxNodes = ${maxNodes}; 69 | 70 | // Get pending tasks for the past 15 minutes. 71 | // If we have fewer than 70 percent data points, we use the last sample point, 72 | // otherwise we use the maximum of last sample point and the history average. 73 | $samples = $ActiveTasks.GetSamplePercent(TimeInterval_Minute * 15); 74 | $tasks = $samples < 70 ? max(0,$ActiveTasks.GetSample(1)) : max( $ActiveTasks.GetSample(1), avg($ActiveTasks.GetSample(TimeInterval_Minute * 15))); 75 | 76 | // If number of pending tasks is not 0, set targetVM to pending tasks, otherwise 77 | // half of current LowPriority. 78 | // The pool size is capped at maxNodes (4), if target VM value is more than that, set it 79 | // to maxNodes. This value should be adjusted according to your use case. 80 | $targetVMs = $tasks > 0? $tasks:max(0, $TargetLowPriorityNodes/2); 81 | 82 | $TargetLowPriorityNodes = max(0, min($targetVMs, maxNodes));`; 83 | } -------------------------------------------------------------------------------- /tests/autoscaleEvaluator.test.js: -------------------------------------------------------------------------------- 1 | const test = require('tape-catch'), 2 | td = require('testdouble'), 3 | moment = require('moment'); 4 | 5 | const AutoScaleEvaluator = require('../functions/EvaluateAutoScale/autoScaleEvaluator'); 6 | 7 | test('Auto Scale evaluator', function (group) { 8 | var poolFake = td.object(['enableAutoScale', 'evaluateAutoScale']); 9 | td.when(poolFake.enableAutoScale(td.matchers.anything(), td.matchers.anything())).thenResolve({}); 10 | 11 | var evaluator = new AutoScaleEvaluator(poolFake); 12 | 13 | group.test('if pool is not active reject', function (t) { 14 | t.plan(1); 15 | 16 | evaluator.isAutoScaleEnabled({ 17 | state: "notactive" 18 | }).then(_ => { 19 | t.fail("it was not rejected"); 20 | }).catch(err => { 21 | t.pass("should reject"); 22 | }); 23 | }); 24 | 25 | group.test('if pool info is null reject', function (t) { 26 | t.plan(1); 27 | 28 | evaluator.isAutoScaleEnabled({ 29 | state: null 30 | }).then(_ => { 31 | t.fail("it was not rejected"); 32 | }).catch(err => { 33 | t.pass("should reject"); 34 | }); 35 | }); 36 | 37 | group.test('if pool info is empty reject', function (t) { 38 | t.plan(1); 39 | 40 | evaluator.isAutoScaleEnabled({ 41 | state: null 42 | }).then(_ => { 43 | t.fail("it was not rejected"); 44 | }).catch(err => { 45 | t.pass("should reject"); 46 | }); 47 | }); 48 | 49 | group.test('if pool info is "active" resolve with pool info', function (t) { 50 | t.plan(1); 51 | 52 | evaluator.isAutoScaleEnabled({ 53 | state: "active" 54 | }).then(poolInfo => { 55 | t.same(poolInfo, { 56 | state: "active" 57 | }) 58 | }).catch(err => { 59 | t.fail("should not reject"); 60 | }); 61 | }); 62 | 63 | group.test('if auto scale is already enabled all ready true then resolve', function (t) { 64 | t.plan(1); 65 | 66 | evaluator.ensureAutoScaleSet({ 67 | enableAutoScale: true 68 | }).then(_ => { 69 | t.pass("should resolve") 70 | }).catch(err => { 71 | t.fail("should not reject"); 72 | }); 73 | }); 74 | 75 | group.test('set auto scale should return promise', function (t) { 76 | t.plan(1); 77 | 78 | evaluator.ensureAutoScaleSet({ 79 | poolId: "testpool", 80 | currentLowPriorityNodes: 2 81 | }).then(_ => { 82 | td.verify(poolFake.enableAutoScale("testpool", td.matchers.anything())); 83 | t.pass("should resolve"); 84 | }).catch(err => { 85 | console.log(td.explain(poolFake.enableAutoScale)); 86 | t.fail("should not reject"); 87 | }); 88 | }); 89 | 90 | group.test('executeEvaluateAutoScale auto scale should call evaluate with correct max nodes', function (t) { 91 | t.plan(1); 92 | 93 | evaluator.executeEvaluateAutoScale("testpool", 5); 94 | 95 | td.verify(poolFake.evaluateAutoScale("testpool", td.matchers.contains('maxNodes = 5;'))); 96 | t.pass(); 97 | }); 98 | 99 | group.end(); 100 | }); -------------------------------------------------------------------------------- /deploy.cmd: -------------------------------------------------------------------------------- 1 | @if "%SCM_TRACE_LEVEL%" NEQ "4" @echo off 2 | 3 | :: ---------------------- 4 | :: KUDU Deployment Script 5 | :: Version: 1.0.15 6 | :: ---------------------- 7 | 8 | :: Prerequisites 9 | :: ------------- 10 | 11 | :: Verify node.js installed 12 | where node 2>nul >nul 13 | IF %ERRORLEVEL% NEQ 0 ( 14 | echo Missing node.js executable, please install node.js, if already installed make sure it can be reached from current environment. 15 | goto error 16 | ) 17 | 18 | :: Setup 19 | :: ----- 20 | 21 | setlocal enabledelayedexpansion 22 | 23 | SET ARTIFACTS=%~dp0%..\artifacts 24 | 25 | IF NOT DEFINED DEPLOYMENT_SOURCE ( 26 | SET DEPLOYMENT_SOURCE=%~dp0%. 27 | ) 28 | 29 | IF NOT DEFINED DEPLOYMENT_TEMP ( 30 | SET DEPLOYMENT_TEMP=%~dp0%..\temp 31 | ) 32 | 33 | IF NOT DEFINED DEPLOYMENT_TARGET ( 34 | SET DEPLOYMENT_TARGET=%ARTIFACTS%\wwwroot 35 | ) 36 | 37 | IF NOT DEFINED NEXT_MANIFEST_PATH ( 38 | SET NEXT_MANIFEST_PATH=%ARTIFACTS%\manifest 39 | 40 | IF NOT DEFINED PREVIOUS_MANIFEST_PATH ( 41 | SET PREVIOUS_MANIFEST_PATH=%ARTIFACTS%\manifest 42 | ) 43 | ) 44 | 45 | IF NOT DEFINED KUDU_SYNC_CMD ( 46 | :: Install kudu sync 47 | echo Installing Kudu Sync 48 | call npm install kudusync -g --silent 49 | IF !ERRORLEVEL! NEQ 0 goto error 50 | 51 | :: Locally just running "kuduSync" would also work 52 | SET KUDU_SYNC_CMD=%appdata%\npm\kuduSync.cmd 53 | ) 54 | 55 | :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: 56 | :: Deployment 57 | :: ---------- 58 | 59 | echo Handling function App deployment. 60 | 61 | call :DeployWithoutFuncPack 62 | 63 | goto end 64 | 65 | :DeployWithoutFuncPack 66 | setlocal 67 | 68 | echo Not using funcpack because SCM_USE_FUNCPACK is not set to 1 69 | 70 | :: 1. Copy to local storage 71 | echo Copying all files to local storage 72 | xcopy "%DEPLOYMENT_SOURCE%" "%DEPLOYMENT_TEMP%" /seyiq 73 | IF !ERRORLEVEL! NEQ 0 goto error 74 | 75 | :: 2. Restore npm for test 76 | call :RestoreNpmPackages "%DEPLOYMENT_TEMP%" "test" 77 | 78 | echo running unit tests 79 | pushd "%DEPLOYMENT_TEMP%" 80 | call npm test 81 | IF !ERRORLEVEL! NEQ 0 goto error 82 | popd 83 | 84 | IF /I "%IN_PLACE_DEPLOYMENT%" NEQ "1" ( 85 | call :ExecuteCmd "%KUDU_SYNC_CMD%" -v 50 -f "%DEPLOYMENT_SOURCE%\functions" -t "%DEPLOYMENT_TARGET%" -n "%NEXT_MANIFEST_PATH%" -p "%PREVIOUS_MANIFEST_PATH%" -i ".git;.hg;.deployment;deploy.cmd" 86 | IF !ERRORLEVEL! NEQ 0 goto error 87 | ) 88 | 89 | echo Copying package.json 90 | xcopy "%DEPLOYMENT_SOURCE%\package.json" "%DEPLOYMENT_TARGET%" /yiq 91 | IF !ERRORLEVEL! NEQ 0 goto error 92 | 93 | :: 2. Restore npm 94 | call :RestoreNpmPackages "%DEPLOYMENT_TARGET%" "prod" 95 | 96 | exit /b %ERRORLEVEL% 97 | 98 | :RestoreNpmPackages 99 | setlocal 100 | 101 | echo Restoring npm packages in %1 102 | echo restore is %2 103 | 104 | IF EXIST "%1\package.json" ( 105 | pushd "%1" 106 | 107 | if %2 == "test" ( 108 | echo calling install 109 | call npm install 110 | ) else ( 111 | echo calling install production 112 | call npm install --production 113 | ) 114 | 115 | IF !ERRORLEVEL! NEQ 0 goto error 116 | popd 117 | ) 118 | 119 | FOR /F "tokens=*" %%i IN ('DIR /B %1 /A:D') DO ( 120 | IF EXIST "%1\%%i\package.json" ( 121 | pushd "%1\%%i" 122 | if %2 == "test" ( 123 | call npm install 124 | ) else ( 125 | call npm install --production 126 | ) 127 | IF !ERRORLEVEL! NEQ 0 goto error 128 | popd 129 | ) 130 | ) 131 | 132 | exit /b %ERRORLEVEL% 133 | 134 | :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: 135 | goto end 136 | 137 | :: Execute command routine that will echo out when error 138 | :ExecuteCmd 139 | setlocal 140 | set _CMD_=%* 141 | call %_CMD_% 142 | if "%ERRORLEVEL%" NEQ "0" echo Failed exitCode=%ERRORLEVEL%, command=%_CMD_% 143 | exit /b %ERRORLEVEL% 144 | 145 | :error 146 | endlocal 147 | echo An error has occurred during web site deployment. 148 | call :exitSetErrorLevel 149 | call :exitFromFunction 2>nul 150 | 151 | :exitSetErrorLevel 152 | exit /b 1 153 | 154 | :exitFromFunction 155 | () 156 | 157 | :end 158 | endlocal 159 | echo Finished successfully. 160 | -------------------------------------------------------------------------------- /tests/evaluateautoscale.test.js: -------------------------------------------------------------------------------- 1 | const test = require('tape-catch'), 2 | td = require('testdouble'), 3 | funcHarness = require('azure-functions-node-harness'); 4 | 5 | // overrides 6 | const helpers = td.replace('../functions/helpers/helpers'); 7 | const AutoScaleEvaluator = td.replace('../functions/EvaluateAutoScale/autoScaleEvaluator'); 8 | 9 | test('Evaluate AutoScale Tests', function (group) { 10 | const funcToTest = funcHarness('EvaluateAutoScale', { dirname: 'functions' }); 11 | 12 | td.when(helpers.batchClientFactory()).thenReturn({pool: {}}); 13 | 14 | 15 | group.test('if poolid is empty then return status 400', function (t) { 16 | t.plan(1); 17 | 18 | funcToTest.invokeHttpTrigger({ 19 | reqBody: { 20 | "poolid": "", 21 | "maxNodes": 4 22 | } 23 | }).then(context => { 24 | t.equal(context.res.status, 400); 25 | }).catch(err => { 26 | t.fail(`something went wrong: ${err}`); 27 | }); 28 | }); 29 | 30 | group.test('if poolid is null then return status 400', function (t) { 31 | t.plan(1); 32 | 33 | funcToTest.invokeHttpTrigger({ 34 | reqBody: { 35 | "poolid": null, 36 | "maxNodes": 4 37 | } 38 | }).then(context => { 39 | t.equal(context.res.status, 400); 40 | }).catch(err => { 41 | t.fail(`something went wrong: ${err}`); 42 | }); 43 | }); 44 | 45 | group.test('if poolid is missing then return status 400', function (t) { 46 | t.plan(1); 47 | 48 | funcToTest.invokeHttpTrigger({ 49 | reqBody: { 50 | "maxNodes": 4 51 | } 52 | }).then(context => { 53 | t.equal(context.res.status, 400); 54 | }).catch(err => { 55 | t.fail(`something went wrong: ${err}`); 56 | }); 57 | }); 58 | 59 | group.test('if maxnodes is empty then return status 400', function (t) { 60 | t.plan(1); 61 | 62 | funcToTest.invokeHttpTrigger({ 63 | reqBody: { 64 | "poolid": "testpool", 65 | "maxNodes": "" 66 | } 67 | }).then(context => { 68 | t.equal(context.res.status, 400); 69 | }).catch(err => { 70 | t.fail(`something went wrong: ${err}`); 71 | }); 72 | }); 73 | 74 | group.test('if maxnodes is null then return status 400', function (t) { 75 | t.plan(1); 76 | 77 | funcToTest.invokeHttpTrigger({ 78 | reqBody: { 79 | "poolid": "testpool", 80 | "maxNodes": null 81 | } 82 | }).then(context => { 83 | t.equal(context.res.status, 400); 84 | }).catch(err => { 85 | t.fail(`something went wrong: ${err}`); 86 | }); 87 | }); 88 | 89 | group.test('if maxnodes is missing then return status 400', function (t) { 90 | t.plan(1); 91 | 92 | funcToTest.invokeHttpTrigger({ 93 | reqBody: { 94 | "poolid": "testpool" 95 | } 96 | }).then(context => { 97 | t.equal(context.res.status, 400); 98 | }).catch(err => { 99 | t.fail(`something went wrong: ${err}`); 100 | }); 101 | }); 102 | 103 | group.test('if maxnodes not a number return 400', function (t) { 104 | t.plan(1); 105 | 106 | funcToTest.invokeHttpTrigger({ 107 | reqBody: { 108 | "poolid": "testpool", 109 | "maxNodes": "some" 110 | } 111 | }).then(context => { 112 | t.equal(context.res.status, 400); 113 | }).catch(err => { 114 | t.fail(`something went wrong: ${err}`); 115 | }); 116 | }); 117 | 118 | group.test('if valid parameters should return 200', function (t) { 119 | t.plan(1); 120 | 121 | td.when(AutoScaleEvaluator.prototype.evaluateAutoScale("testpool", "1")).thenResolve({results:""}) 122 | 123 | funcToTest.invokeHttpTrigger({ 124 | reqBody: { 125 | "poolid": "testpool", 126 | "maxNodes": "1" 127 | } 128 | }).then(context => { 129 | t.equal(context.res.status, 200); 130 | }).catch(err => { 131 | t.fail(`something went wrong: ${err}`); 132 | }); 133 | }); 134 | 135 | group.test('if valid max node is number should return 200', function (t) { 136 | t.plan(1); 137 | 138 | td.when(AutoScaleEvaluator.prototype.evaluateAutoScale("testpool", 1)).thenResolve({results:""}) 139 | 140 | funcToTest.invokeHttpTrigger({ 141 | reqBody: { 142 | "poolid": "testpool", 143 | "maxNodes": 1 144 | } 145 | }).then(context => { 146 | t.equal(context.res.status, 200); 147 | }).catch(err => { 148 | t.fail(`something went wrong: ${err}`); 149 | }); 150 | }); 151 | 152 | group.test('on success should parse results to return object', function (t) { 153 | t.plan(4); 154 | 155 | td.when(AutoScaleEvaluator.prototype.evaluateAutoScale("testpool", "1")) 156 | .thenResolve({results:"$TargetDedicatedNodes=10;$TargetLowPriorityNodes=5;$NodeDeallocationOption=requeue;$workHours=0"}) 157 | 158 | funcToTest.invokeHttpTrigger({ 159 | reqBody: { 160 | "poolid": "testpool", 161 | "maxNodes": "1" 162 | } 163 | }).then(context => { 164 | t.equal(context.res.status, 200); 165 | t.equal(context.res.body.TargetDedicatedNodes, 10); 166 | t.equal(context.res.body.TargetLowPriorityNodes, 5); 167 | t.equal(context.res.body.Raw, "$TargetDedicatedNodes=10;$TargetLowPriorityNodes=5;$NodeDeallocationOption=requeue;$workHours=0") 168 | }).catch(err => { 169 | t.fail(`something went wrong: ${err}`); 170 | }); 171 | }); 172 | 173 | group.test('if valid parameters but evaluator throws exception then 500', function (t) { 174 | t.plan(1); 175 | 176 | td.when(AutoScaleEvaluator.prototype.evaluateAutoScale("testpool", "1")).thenReject({err:"something blew up"}) 177 | 178 | funcToTest.invokeHttpTrigger({ 179 | reqBody: { 180 | "poolid": "testpool", 181 | "maxNodes": "1" 182 | } 183 | }).then(context => { 184 | t.equal(context.res.status, 500); 185 | }).catch(err => { 186 | t.fail(`something went wrong: ${err}`); 187 | }); 188 | }); 189 | 190 | group.end(); 191 | }); --------------------------------------------------------------------------------