├── .gitignore ├── .gitmodules ├── .npmignore ├── LICENSE ├── Makefile ├── Makefile.deps ├── Makefile.targ ├── README.md ├── bin ├── workflow-api └── workflow-runner ├── docs ├── index.md ├── media │ └── img │ │ ├── favicon.ico │ │ └── logo.png ├── motivation.md └── workflowapi.md ├── example.js ├── lib ├── api.js ├── child.js ├── errors.js ├── index.js ├── job-runner.js ├── make-emitter.js ├── runner.js ├── task-runner.js ├── workflow-factory.js └── workflow-in-memory-backend.js ├── package.json ├── test ├── api.test.js ├── child.test.js ├── config.json.sample ├── config.nofork.sample ├── errors.test.js ├── helper.js ├── in-memory-backend.test.js ├── job-runner.test.js ├── runner.test.js └── task-runner.test.js └── tools ├── bashstyle ├── jsl.node.conf ├── jsl.web.conf ├── npmfreeze.js └── service_bundle.dtd.1 /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | var/* 3 | *.tar.gz 4 | *.tar.bz2 5 | *.log 6 | coverage 7 | docs/*.json 8 | docs/*.html 9 | build 10 | examples/*.log 11 | examples/*.json 12 | workflow-indentifier 13 | *.vim 14 | *.javascript 15 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "deps/restdown"] 2 | path = deps/restdown 3 | url = https://github.com/trentm/restdown.git 4 | [submodule "deps/javascriptlint"] 5 | path = deps/javascriptlint 6 | url = https://github.com/davepacheco/javascriptlint.git 7 | [submodule "deps/jsstyle"] 8 | path = deps/jsstyle 9 | url = https://github.com/joyent/jsstyle.git 10 | -------------------------------------------------------------------------------- /.npmignore: -------------------------------------------------------------------------------- 1 | *.vim 2 | .git* 3 | .travis.yml 4 | build 5 | deps 6 | docs 7 | example.js 8 | examples 9 | node_modules 10 | tools 11 | Makefile* 12 | *.tar.gz 13 | *.log 14 | logs 15 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2017 Joyent, Inc. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | 23 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2012, Joyent, Inc. All rights reserved. 3 | # Copyright (c) 2016, Joyent, Inc. 4 | # 5 | 6 | # 7 | # Tools 8 | # 9 | NPM := $(shell which npm) 10 | TAP := ./node_modules/.bin/tap 11 | RESTDOWN_FLAGS := -b deps/restdown/brand/spartan 12 | 13 | # 14 | # Files 15 | # 16 | DOC_FILES = index.md motivation.md workflowapi.md 17 | JS_FILES := $(shell ls *.js) $(shell find lib test -name '*.js') 18 | JSL_CONF_NODE = tools/jsl.node.conf 19 | JSL_FILES_NODE = $(JS_FILES) 20 | JSSTYLE_FILES = $(JS_FILES) 21 | JSSTYLE_FLAGS = -o indent=4,doxygen,unparenthesized-return=0 22 | # SMF_MANIFESTS = smf/manifests/bapi.xml 23 | 24 | # 25 | # Repo-specific targets 26 | # 27 | .PHONY: all 28 | all: test check 29 | 30 | .PHONY: setup 31 | setup: $(NPM) 32 | $(NPM) install 33 | 34 | .PHONY: test 35 | test: setup nofork $(TAP) 36 | TAP_TIMEOUT=80 $(TAP) test/*.test.js 37 | 38 | .PHONY: nofork 39 | nofork: setup $(TAP) 40 | TAP_TIMEOUT=80 TEST_CONFIG_FILE=config.nofork.sample $(TAP) test/runner.test.js 41 | 42 | .PHONY: coverage 43 | coverage: $(TAP) 44 | TAP_COV=1 $(TAP) test/*.test.js 45 | 46 | include ./Makefile.deps 47 | include ./Makefile.targ 48 | -------------------------------------------------------------------------------- /Makefile.deps: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2012, Joyent, Inc. All rights reserved. 3 | # 4 | # Makefile.deps: Makefile for including common tools as dependencies 5 | # 6 | # This file is separate from Makefile.targ so that teams can choose 7 | # independently whether to use the common targets in Makefile.targ and the 8 | # common tools here. 9 | # 10 | 11 | # 12 | # javascriptlint 13 | # 14 | JSL_SCRIPT = deps/javascriptlint/build/install/jsl 15 | JSL = python $(JSL_SCRIPT) 16 | 17 | $(JSL_SCRIPT): | deps/javascriptlint/.git 18 | cd deps/javascriptlint && make install 19 | 20 | deps/javascriptlint/.git: 21 | git submodule update --init deps/javascriptlint 22 | 23 | # 24 | # jsstyle 25 | # 26 | JSSTYLE_SCRIPT = deps/jsstyle/jsstyle 27 | JSSTYLE = $(JSSTYLE_SCRIPT) 28 | 29 | deps/jsstyle/jsstyle: 30 | git submodule update --init deps/jsstyle 31 | 32 | # 33 | # restdown 34 | # 35 | RESTDOWN = python deps/restdown/bin/restdown 36 | 37 | $(RESTDOWN): | deps/restdown/.git 38 | 39 | deps/restdown/.git: 40 | git submodule update --init deps/restdown 41 | 42 | # 43 | # The restdown submodule should be updated before we build "docs". 44 | # 45 | docs: $(RESTDOWN) 46 | 47 | # 48 | # JSL and JSSTYLE must be around before we build "check". 49 | # 50 | check: $(JSL_SCRIPT) $(JSSTYLE) 51 | -------------------------------------------------------------------------------- /Makefile.targ: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2012, Joyent, Inc. All rights reserved. 3 | # 4 | # Makefile.targ: common targets. 5 | # 6 | # NOTE: This makefile comes from the "eng" repo. It's designed to be dropped 7 | # into other repos as-is without requiring any modifications. If you find 8 | # yourself changing this file, you should instead update the original copy in 9 | # eng.git and then update your repo to use the new version. 10 | # 11 | # This Makefile defines several useful targets and rules. You can use it by 12 | # including it from a Makefile that specifies some of the variables below. 13 | # 14 | # Targets defined in this Makefile: 15 | # 16 | # check Checks JavaScript files for lint and style 17 | # Checks bash scripts for syntax 18 | # Checks SMF manifests for validity against the SMF DTD 19 | # 20 | # clean Removes built files 21 | # 22 | # docs Builds restdown documentation in docs/ 23 | # 24 | # prepush Depends on "check" and "test" 25 | # 26 | # test Does nothing (you should override this) 27 | # 28 | # xref Generates cscope (source cross-reference index) 29 | # 30 | # For details on what these targets are supposed to do, see the Joyent 31 | # Engineering Guide. 32 | # 33 | # To make use of these targets, you'll need to set some of these variables. Any 34 | # variables left unset will simply not be used. 35 | # 36 | # BASH_FILES Bash scripts to check for syntax 37 | # (paths relative to top-level Makefile) 38 | # 39 | # CLEAN_FILES Files to remove as part of the "clean" target. Note 40 | # that files generated by targets in this Makefile are 41 | # automatically included in CLEAN_FILES. These include 42 | # restdown-generated HTML and JSON files. 43 | # 44 | # DOC_FILES Restdown (documentation source) files. These are 45 | # assumed to be contained in "docs/", and must NOT 46 | # contain the "docs/" prefix. 47 | # 48 | # JSL_CONF_NODE Specify JavaScriptLint configuration files 49 | # JSL_CONF_WEB (paths relative to top-level Makefile) 50 | # 51 | # Node.js and Web configuration files are separate 52 | # because you'll usually want different global variable 53 | # configurations. If no file is specified, none is given 54 | # to jsl, which causes it to use a default configuration, 55 | # which probably isn't what you want. 56 | # 57 | # JSL_FILES_NODE JavaScript files to check with Node config file. 58 | # JSL_FILES_WEB JavaScript files to check with Web config file. 59 | # 60 | # SMF_MANIFESTS XML files to check for validity using the SMF DTD. 61 | # 62 | # You can also override these variables: 63 | # 64 | # BASH Path to bash (default: bash) 65 | # 66 | # JSL Path to JavaScriptLint (default: "jsl") 67 | # 68 | # JSL_FLAGS_NODE Additional flags to pass through to JSL 69 | # JSL_FLAGS_WEB 70 | # JSL_FLAGS 71 | # 72 | # JSSTYLE Path to jsstyle (default: jsstyle) 73 | # 74 | # JSSTYLE_FLAGS Additional flags to pass through to jsstyle 75 | # 76 | 77 | # 78 | # Defaults for the various tools we use. 79 | # 80 | BASH ?= bash 81 | BASHSTYLE ?= tools/bashstyle 82 | CP ?= cp 83 | CSCOPE ?= cscope 84 | JSL ?= jsl 85 | JSSTYLE ?= jsstyle 86 | MKDIR ?= mkdir -p 87 | MV ?= mv 88 | RESTDOWN ?= restdown 89 | RESTDOWN_FLAGS ?= 90 | RMTREE ?= rm -rf 91 | XMLLINT ?= xmllint --noout 92 | JSL_FLAGS ?= --nologo --nosummary 93 | 94 | # 95 | # Defaults for other fixed values. 96 | # 97 | SMF_DTD ?= tools/service_bundle.dtd.1 98 | 99 | BUILD = build 100 | CLEAN_FILES += $(BUILD) 101 | DOC_BUILD = $(BUILD)/docs/public 102 | 103 | # 104 | # Configure JSL_FLAGS_{NODE,WEB} based on JSL_CONF_{NODE,WEB}. 105 | # 106 | ifneq ($(origin JSL_CONF_NODE), undefined) 107 | JSL_FLAGS_NODE += --conf=$(JSL_CONF_NODE) 108 | endif 109 | 110 | ifneq ($(origin JSL_CONF_WEB), undefined) 111 | JSL_FLAGS_WEB += --conf=$(JSL_CONF_WEB) 112 | endif 113 | 114 | # 115 | # Targets. For descriptions on what these are supposed to do, see the 116 | # Joyent Engineering Guide. 117 | # 118 | # These recipes make heavy use of dynamically-created phony targets. The parent 119 | # Makefile defines a list of input files like BASH_FILES. We then say that each 120 | # of these files depends on a fake target called filename.bashchk, and then we 121 | # define a pattern rule for those targets that runs bash in check-syntax-only 122 | # mode. This mechanism has the nice properties that if you specify zero files, 123 | # the rule becomes a noop (unlike a single rule to check all bash files, which 124 | # would invoke bash with zero files), and you can check individual files from 125 | # the command line with "make filename.bashchk". 126 | # 127 | .PHONY: check-bash 128 | check-bash: $(BASH_FILES:%=%.bashchk) $(BASH_FILES:%=%.bashstyle) 129 | 130 | %.bashchk: % 131 | $(BASH) -n $^ 132 | 133 | %.bashstyle: % 134 | $(BASHSTYLE) $^ 135 | 136 | .PHONY: check-jsl check-jsl-node check-jsl-web 137 | check-jsl: check-jsl-node check-jsl-web 138 | 139 | check-jsl-node: $(JSL_FILES_NODE:%=%.jslnodechk) 140 | 141 | check-jsl-web: $(JSL_FILES_WEB:%=%.jslwebchk) 142 | 143 | %.jslnodechk: % $(JSL_SCRIPT) 144 | $(JSL) $(JSL_FLAGS) $(JSL_FLAGS_NODE) $< 145 | 146 | %.jslwebchk: % $(JSL_SCRIPT) 147 | $(JSL) $(JSL_FLAGS) $(JSL_FLAGS_WEB) $< 148 | 149 | .PHONY: check-jsstyle 150 | check-jsstyle: $(JSSTYLE_FILES:%=%.jsstylechk) 151 | 152 | %.jsstylechk: % $(JSSTYLE_SCRIPT) 153 | $(JSSTYLE) $(JSSTYLE_FLAGS) $< 154 | 155 | .PHONY: check-manifests 156 | check-manifests: $(SMF_MANIFESTS:%=%.smfchk) 157 | 158 | %.smfchk: % 159 | $(XMLLINT) --path $(dir $(SMF_DTD)) --dtdvalid $(SMF_DTD) $^ 160 | 161 | .PHONY: check 162 | check: check-jsl check-jsstyle check-bash check-manifests 163 | @echo check ok 164 | 165 | .PHONY: clean 166 | clean: 167 | -$(RMTREE) $(CLEAN_FILES) 168 | 169 | CSCOPE_FILES = cscope.in.out cscope.out cscope.po.out 170 | CLEAN_FILES += $(CSCOPE_FILES) 171 | 172 | .PHONY: xref 173 | xref: cscope.files 174 | $(CSCOPE) -bqR 175 | 176 | .PHONY: cscope.files 177 | cscope.files: 178 | find . -name '*.c' -o -name '*.h' -o -name '*.cc' -o -name '*.js' \ 179 | -o -name '*.s' -o -name '*.cpp' > $@ 180 | 181 | # 182 | # The "docs" target is complicated because we do several things here: 183 | # 184 | # (1) Use restdown to build HTML and JSON files from each of DOC_FILES. 185 | # 186 | # (2) Copy these files into $(DOC_BUILD) (build/docs/public), which 187 | # functions as a complete copy of the documentation that could be 188 | # mirrored or served over HTTP. 189 | # 190 | # (3) Then copy any directories and media from docs/media into 191 | # $(DOC_BUILD)/media. This allows projects to include their own media, 192 | # including files that will override same-named files provided by 193 | # restdown. 194 | # 195 | # Step (3) is the surprisingly complex part: in order to do this, we need to 196 | # identify the subdirectories in docs/media, recreate them in 197 | # $(DOC_BUILD)/media, then do the same with the files. 198 | # 199 | DOC_MEDIA_DIRS := $(shell find docs/media -type d | grep -v "^docs/media$$") 200 | DOC_MEDIA_DIRS := $(DOC_MEDIA_DIRS:docs/media/%=%) 201 | DOC_MEDIA_DIRS_BUILD := $(DOC_MEDIA_DIRS:%=$(DOC_BUILD)/media/%) 202 | 203 | DOC_MEDIA_FILES := $(shell find docs/media -type f) 204 | DOC_MEDIA_FILES := $(DOC_MEDIA_FILES:docs/media/%=%) 205 | DOC_MEDIA_FILES_BUILD := $(DOC_MEDIA_FILES:%=$(DOC_BUILD)/media/%) 206 | 207 | # 208 | # Like the other targets, "docs" just depends on the final files we want to 209 | # create in $(DOC_BUILD), leveraging other targets and recipes to define how 210 | # to get there. 211 | # 212 | .PHONY: docs 213 | docs: \ 214 | $(DOC_FILES:%.restdown=$(DOC_BUILD)/%.html) \ 215 | $(DOC_FILES:%.restdown=$(DOC_BUILD)/%.json) \ 216 | $(DOC_MEDIA_FILES_BUILD) 217 | 218 | # 219 | # We keep the intermediate files so that the next build can see whether the 220 | # files in DOC_BUILD are up to date. 221 | # 222 | .PRECIOUS: \ 223 | $(DOC_FILES:%.restdown=docs/%.html) \ 224 | $(DOC_FILES:%.restdown=docs/%json) 225 | 226 | # 227 | # We do clean those intermediate files, as well as all of DOC_BUILD. 228 | # 229 | CLEAN_FILES += \ 230 | $(DOC_BUILD) \ 231 | $(DOC_FILES:%.restdown=docs/%.html) \ 232 | $(DOC_FILES:%.restdown=docs/%.json) 233 | 234 | # 235 | # Before installing the files, we must make sure the directories exist. The | 236 | # syntax tells make that the dependency need only exist, not be up to date. 237 | # Otherwise, it might try to rebuild spuriously because the directory itself 238 | # appears out of date. 239 | # 240 | $(DOC_MEDIA_FILES_BUILD): | $(DOC_MEDIA_DIRS_BUILD) 241 | 242 | $(DOC_BUILD)/%: docs/% | $(DOC_BUILD) 243 | $(CP) $< $@ 244 | 245 | docs/%.json docs/%.html: docs/%.restdown | $(DOC_BUILD) 246 | $(RESTDOWN) $(RESTDOWN_FLAGS) -m $(DOC_BUILD) $< 247 | 248 | $(DOC_BUILD): 249 | $(MKDIR) $@ 250 | 251 | info: 252 | echo "DOC_MEDIA_FILES is '$(DOC_MEDIA_FILES)'" 253 | echo "DOC_MEDIA_FILES_BUILD is '$(DOC_MEDIA_FILES_BUILD)'" 254 | 255 | $(DOC_MEDIA_DIRS_BUILD): 256 | $(MKDIR) $@ 257 | 258 | # 259 | # The default "test" target does nothing. This should usually be overridden by 260 | # the parent Makefile. It's included here so we can define "prepush" without 261 | # requiring the repo to define "test". 262 | # 263 | .PHONY: test 264 | test: 265 | 266 | .PHONY: prepush 267 | prepush: check test 268 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Tasks Workflows Orchestration API and Runners. 2 | 3 | This repository is part of the Joyent Triton project. See the [contribution 4 | guidelines](https://github.com/joyent/triton/blob/master/CONTRIBUTING.md) 5 | and general documentation at the main 6 | [Triton project](https://github.com/joyent/triton) page. 7 | 8 | 9 | - Repository: 10 | - Browsing: 11 | - Who: [Pedro Palazón Candel](https://github.com/kusor), [Trent Mick](https://github.com/trentm), [Mark Cavage](https://github.com/mcavage), [Josh Wilsdon](https://github.com/joshwilsdon), [Rob Gulewich](https://github.com/rgulewich), [Andrés Rodríguez](https://github.com/cachafla), [Fred Kuo](https://github.com/fkuo). 12 | - Docs: 13 | - Tickets/bugs: 14 | 15 | 16 | # Installation 17 | 18 | npm install wf 19 | 20 | # Overview 21 | 22 | If you are building a completely new system composed of many discrete API 23 | applications, each of them with a clearly defined area of responsibility, or 24 | if you are trying to assemble a collaboration channel between a heterogeneous 25 | set of unrelated API applications, you need a means to orchestrate interactions 26 | between these applications. 27 | 28 | A workflow is effectively an orchestration. It gives you a way to decompose a 29 | complex series of operations down to a sequence of discrete tasks within a state 30 | machine. 31 | 32 | The sequence of tasks is more complex than just a series. Tasks can fail, and 33 | so you need to deal with timeouts, retries, "stuck" flows, and so forth. 34 | 35 | One way to define a workflow and its tasks is using an arbitrarily-complex 36 | language. Or you can keep it simple by making some assumptions: 37 | 38 | * Code is the definition language. 39 | * Tasks are independent. Can be used into different workflows. 40 | * The only way to communicate between tasks is the workflow. Tasks can add, 41 | remove or modify properties of the workflow. 42 | * If a task requires a specific property of the workflow to be present, the 43 | task can fail, or re-schedule itself within the workflow, or ... 44 | 45 | ## wf 46 | 47 | This package provides a way to define re-usable `workflows` using JSON and run 48 | concrete `jobs` with specific `targets` and `parameters` based on such 49 | `workflows`. 50 | 51 | ### Workflow Runners 52 | 53 | In order to execute `jobs`, at least one `WorkflowRunner` must be up and ready 54 | to take jobs. An arbitrary number of `runners` can be used on any set of hosts; 55 | their configuration must match. 56 | 57 | An example `WorkflowRunner` is provided with the package and can be started 58 | with: 59 | 60 | ./bin/workflow-runner path/to/config.json 61 | 62 | (The `test` directory contains the file `config.json.sample` which can be 63 | used as reference). 64 | 65 | You can create `workflows` and `jobs` either by using the provided REST API(s), 66 | or by embedding this module's API into your own system(s). The former will be 67 | easier to get up and running, but you should use the latter when: 68 | 69 | - You want to use the Workflow API in a (node.js) application that is not the 70 | bundled [REST API](http://joyent.github.io/node-workflow/workflowapi.html). 71 | - You want to use a different backend storage system, or otherwise change the 72 | assumptions of the bundled REST API. 73 | 74 | The package also provides a binary file to run the `WorkflowAPI` using the 75 | same configuration file we pass to our `WorkflowRunner`: 76 | 77 | ./bin/workflow-api path/to/config.json 78 | 79 | See demo section below for more details about both approaches. 80 | 81 | # Development 82 | 83 | ## Clone the repo and build the deps: 84 | 85 | git clone git://github.com/joyent/node-workflow.git 86 | cd node-workflow 87 | make all 88 | 89 | Note `make all` will setup all the required dependencies, node modules and run 90 | `make check` and `make test`. In order to just setup node modules, `make setup` 91 | is enough. 92 | 93 | To run the Workflow API server: 94 | 95 | ./bin/workflow-api path/to/config.json 96 | 97 | To run a Job Runner: 98 | 99 | ./bin/workflow-runner path/to/config.json 100 | 101 | Note that it's fine to run more than one Runner, either on the same or different 102 | machines, so long as they have access to the same storage backend. 103 | 104 | # Testing 105 | 106 | make test 107 | 108 | # Pre-commit git hook 109 | 110 | In order to run `make prepush` before every commit, add the following to a file 111 | called `.git/hooks/pre-commit` and `chmod +x` it: 112 | 113 | #!/bin/sh 114 | # Run make prepush before allow commit 115 | 116 | set -e 117 | 118 | make prepush 119 | 120 | exit 0 121 | 122 | If you've made a change that does not affect source code files, but (for 123 | example) only docs, you can skip this hook by passing the option `--no-verify` 124 | to the `git commit` command. 125 | 126 | # Demo 127 | 128 | The [workflow-example](https://github.com/kusor/node-workflow-example) 129 | repository contains everything needed to illustrate: 130 | 131 | - An example config file `config.json.sample` which should be renamed to 132 | `config.json`, and modified to properly match your local environment. 133 | 134 | Remember that, in order to process any `job` the `workflow-runner` needs to be 135 | initialized pointing to the aforementioned configuration file: 136 | 137 | ./node_modules/.bin/workflow-runner config.json 138 | 139 | Also, in order to be able to run the API-based example mentioned below, the 140 | `workflow-api` HTTP server needs to be up and running too: 141 | 142 | ./node_modules/.bin/workflow-api config.json 143 | 144 | Contents for the other files within the [workflow-example](https://github.com/kusor/node-workflow-example) 145 | repository are: 146 | 147 | - An example of how to use node-workflow as a node module in order to create 148 | workflows, queue jobs and wait for the results. See `module.js`. 149 | - An example of how to achieve the same goal using the Workflow API instead of 150 | the node module. See `api.js`. 151 | - Both examples share the same workflow definition, contained in the file 152 | `shared-workflow.js`. The beginning of the aforementioned files 153 | can be useful to understand the differences when trying to create a workflow 154 | using these different approaches. 155 | - Finally, this directory also contains the file `node.js`, which does 156 | exactly the same thing as the workflow/job does -- create and star a gist 157 | using your github's username and password -- but straight from node.js. This 158 | file is useful in order to understand the differences between writing code 159 | to be executed by node.js directly, and using it to create workflows and the 160 | associated tasks. Remember, code within tasks runs sandboxed using 161 | [Node's VM API](http://nodejs.org/docs/latest/api/vm.html) and that tasks 162 | are totally independent. 163 | 164 | See also `example.js` for more options when defining workflows and the different 165 | possibilities for task fallbacks, retries, timeouts, ... 166 | 167 | # Repository 168 | 169 | deps/ Git submodules and/or commited 3rd-party deps. 170 | See "node_modules/" for node.js deps. 171 | docs/ Project docs (restdown) 172 | lib/ Source files. 173 | node_modules/ Node.js deps, either populated at build time or commited. 174 | pkg/ Package lifecycle scripts 175 | test/ Test suite (using node-tap) 176 | tools/ Miscellaneous dev/upgrade/deployment tools and data. 177 | Makefile 178 | package.json npm module info 179 | README.md 180 | 181 | # TODO 182 | 183 | See https://github.com/joyent/node-workflow/issues. 184 | 185 | # LICENSE 186 | 187 | The MIT License (MIT) Copyright (c) 2018 Joyent, Inc. 188 | 189 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 190 | 191 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 192 | 193 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 194 | -------------------------------------------------------------------------------- /bin/workflow-api: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | // Copyright (c) 2016, Joyent, Inc. 3 | 4 | // vim: set filetype=javascript : 5 | 6 | var path = require('path'), 7 | fs = require('fs'), 8 | util = require('util'), 9 | Logger = require('bunyan'), 10 | API = require('../lib/').API, 11 | levels = [Logger.TRACE, Logger.DEBUG, Logger.INFO, 12 | Logger.WARN, Logger.ERROR, Logger.FATAL]; 13 | 14 | if (process.argv.length < 3) { 15 | console.error('Usage: [node] ' + process.argv[1] + ' path/to/config.json'); 16 | process.exit(1); 17 | } else { 18 | var config_file = path.resolve(process.argv[2]); 19 | fs.readFile(config_file, 'utf8', function (err, data) { 20 | if (err) { 21 | console.error('Error reading config file:'); 22 | console.dir(err); 23 | process.exit(1); 24 | } else { 25 | try { 26 | var config = JSON.parse(data); 27 | } catch (e) { 28 | console.error('Error parsing config file JSON:'); 29 | console.dir(e); 30 | process.exit(1); 31 | } 32 | 33 | var api = new API(config); 34 | api.init(function () { 35 | api.log.info('API server up and running!'); 36 | }); 37 | 38 | // Increase/decrease loggers levels using SIGUSR2/SIGUSR1: 39 | var sigyan = require('sigyan'); 40 | sigyan.add([api.log, api.server.log, api.backend.log]); 41 | } 42 | }); 43 | } 44 | -------------------------------------------------------------------------------- /bin/workflow-runner: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | // Copyright (c) 2016, Joyent, Inc. 3 | 4 | // vim: set filetype=javascript : 5 | 6 | var path = require('path'), 7 | fs = require('fs'), 8 | util = require('util'), 9 | Logger = require('bunyan'), 10 | WorkflowRunner = require('../lib/').Runner, 11 | levels = [Logger.TRACE, Logger.DEBUG, Logger.INFO, 12 | Logger.WARN, Logger.ERROR, Logger.FATAL]; 13 | 14 | if (process.argv.length < 3) { 15 | console.error('Usage: [node] ' + process.argv[1] + ' path/to/config.json'); 16 | process.exit(1); 17 | } else { 18 | var config_file = path.resolve(process.argv[2]); 19 | var config; 20 | fs.readFile(config_file, 'utf8', function (err, data) { 21 | if (err) { 22 | console.error('Error reading config file:'); 23 | console.dir(err); 24 | process.exit(1); 25 | } else { 26 | try { 27 | config = JSON.parse(data); 28 | } catch (e) { 29 | console.error('Error parsing config file JSON:'); 30 | console.dir(e); 31 | process.exit(1); 32 | } 33 | var runner = WorkflowRunner(config); 34 | runner.init(function (err) { 35 | if (err) { 36 | console.error('Error initializing runner:'); 37 | console.dir(err); 38 | process.exit(1); 39 | } 40 | runner.run(); 41 | runner.log.info('Workflow Runner up!'); 42 | }); 43 | 44 | // Increase/decrease loggers levels using SIGUSR2/SIGUSR1: 45 | var sigyan = require('sigyan'); 46 | sigyan.add([runner.log, runner.backend.log]); 47 | 48 | process.on('SIGINT', function () { 49 | console.log('Got SIGINT. Waiting for child processes to finish'); 50 | runner.quit(function () { 51 | console.log('All child processes finished. Exiting now.'); 52 | process.exit(0); 53 | }); 54 | }); 55 | 56 | process.on('SIGTERM', function () { 57 | console.log('Got SIGTERM. Finishing child processes'); 58 | runner.kill(function () { 59 | console.log('All child processes finished. Exiting now.'); 60 | process.exit(0); 61 | }); 62 | }); 63 | 64 | } 65 | }); 66 | } 67 | -------------------------------------------------------------------------------- /docs/media/img/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TritonDataCenter/node-workflow/f9b723ff9009aeb9e2cd037e1ab97413c14e9aea/docs/media/img/favicon.ico -------------------------------------------------------------------------------- /docs/media/img/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/TritonDataCenter/node-workflow/f9b723ff9009aeb9e2cd037e1ab97413c14e9aea/docs/media/img/logo.png -------------------------------------------------------------------------------- /docs/motivation.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Motivation 3 | logo-color: #aa0000 4 | --- 5 | # Overview 6 | 7 | A workflow is effectively an orchestration. It gives you a way to decompose a 8 | complex series of operations down to a sequence of discrete tasks within a state 9 | machine. 10 | 11 | The sequence of tasks is more complex than just a series. Tasks can fail, and 12 | so you need to deal with timeouts, retries, "stuck" flows, and so forth. 13 | 14 | One way to define a workflow and its tasks is using an arbitrarily-complex 15 | language. Or you can keep it simple by making some assumptions: 16 | 17 | * Code is the definition language. 18 | * Tasks are independent. Can be used into different workflows. 19 | * The only way to communicate between tasks is the workflow. Tasks can add, 20 | remove or modify properties of the workflow. 21 | * If a task requires a specific property of the workflow to be present, the 22 | task can fail, or re-schedule itself within the workflow, or ... 23 | 24 | # System design 25 | 26 | The system must be designed with failures in mind. Tasks can fail and, as a 27 | consequence, workflows may fail. You may want to recover from a task failure, 28 | or from a whole workflow failure. 29 | 30 | ## Terminology 31 | 32 | * _Task_: A single discrete operation, such as Send Email. 33 | * _Workflow_: An abstract definition of a sequence of Tasks, including 34 | transition conditions and branches. 35 | * _Job_: The execution of a workflow. It is an instance of a Workflow, 36 | containing all the required information to execute itself. 37 | 38 | ## System components 39 | 40 | - A workflow and task **factory** for creating tasks, workflows and queueing 41 | jobs. Uses node.js. 42 | - Alongside the **factory**, the **Workflow API** allows the creation of tasks, 43 | workflows and jobs through a REST API, with JSON as the payload. 44 | - A **Status API**, used to check the status of a given job, get information 45 | about failures, and so forth. 46 | - **Job runners**. These are what actually execute workflows. You can have as 47 | many runners as you want, and they can live anywhere on the network. Once a 48 | runner atomically takes a job, that job is flagged with the runner's unique 49 | identifier to prevent any other runner from executing it. One runner can be 50 | composed of multiple associated processes for executing jobs. 51 | 52 | The factory talks to a __persistence layer__, and saves workflows and tasks. 53 | Once a Job is created, it's saved with all the information required for its 54 | execution, including the associated tasks' code at the moment of job creation. 55 | This will __avoid any potential problems resulting from the modification of 56 | task code once a Job has already been queued__. 57 | 58 | A runner itself may unintentionally go nuts or crash while a job is being 59 | executed, leaving the job with an inappropriated _"running"_ status. When a 60 | runner (re)starts, there shouldn't be any job flagged with that runner's 61 | unique identifier, nor should it have a running status. If that happens, the 62 | first thing a runner must do upon restart is pick any job with such an invalid 63 | state and execute the fall-back recovery branch for that job. 64 | 65 | ## Recursion and Branches 66 | 67 | Imagine you have a task, and you specify that same task as a fall-back in case 68 | that task fails, This isn't especially useful, since the fall-back task will 69 | probably also fail and -- as a consequence -- the workflow will also fail ... 70 | 71 | ... wait!, or will it call the workflow's failure branch, which may also contain 72 | _the same task!_ Obviously, this will also fail. 73 | 74 | So, first rule: when a task is specified as part of a workflow, that same task 75 | can neither be specified as that same task's `onerror` fall-back, nor be in the 76 | workflow's `onerror` branch. 77 | 78 | Now, consider the following scenario: a job is created as the result of some 79 | action, e.g. the GET request to a given REST URI. Then -- as part 80 | of the workflow's tasks -- another GET request is made to the same REST URI. 81 | This will obviously result in an infinite number of jobs being created to do 82 | exactly the same thing. 83 | 84 | A job's target property may help us on avoid this infinite loop. While a 85 | Workflow is something abstract, a Job can operate on a concrete target. For 86 | example, you can use a REST URI as the target of the job, or an LDAP DN, or 87 | whatever you need to make sure that the same job will not be queued twice. 88 | 89 | When jobs are created and queued, they check if another job with the same target 90 | (and the same parameters) exists. If so, the job creation will fail. 91 | 92 | Obviously, there are some cases where you may want the same job to be queued 93 | for the same target; for example, POST to a given URI to create a new collection 94 | element. For that reason, a job's `parameters` are also checked with the job's 95 | `target` when creating a new job. 96 | 97 | If a job has failed for a given target and parameters, you may want to 98 | create a new job after some time. This is possible since the uniqueness checks 99 | are only made against previous jobs which are "running" or "queued", not versus 100 | "finished" jobs (regardless of their result). 101 | 102 | Finally, a note about multiple branches: 103 | 104 | In theory, one may want to specify an arbitrary number of branches to be 105 | executed, depending on workflow's tasks results. That would bring us into a 106 | complex scenario where we make decisions like: _should we allow the same task to 107 | be specified as part of several different branches?_. 108 | 109 | So far I don't have a clear answer for that. In theory, only the fallbacks 110 | should be different than their respective tasks, and the workflow's `onerror` 111 | chain shouldn't contain any of the tasks specified as part of any of the other 112 | branches. 113 | 114 | Can we imagine a concrete example where multiple branches are required? If so, 115 | we can take care of things like avoiding recursion and infinite loops, but I 116 | think we'd rather worry about that once we find an example. 117 | 118 | # Implementation details 119 | 120 | 1. We need a way to define tasks. 121 | 2. We need a way to define a workflow. 122 | 3. We need a way to add tasks to a workflow, and sometimes specify the exact 123 | position of a task in a workflow's chain. 124 | 4. We need a way to remove tasks from a workflow. Some tasks may be able to flag 125 | themselves as non-removable. 126 | 5. A task can fail. 127 | 6. We need to know how many times to retry a task in case of failure. 128 | 7. We need to know what to do when a task fails. Fail the workflow? Try a 129 | different branch? Or...? 130 | 8. A task can be stuck. 131 | 9. We need to know when to timeout a task. 132 | 10. We need to know what to do when a task times out. 133 | 11. We need to be able to instantiate a workflow in order to create a Job. 134 | 12. A Job may have parameters. 135 | 13. A Job may have a target. 136 | 14. We need to run a Job. 137 | 15. A job may have an acceptable period of time to be completed. 138 | 16. At job completion, we may want to specify another job to be completed. 139 | 17. We want to know the results of each task execution. 140 | 18. We may want to know a workflow's status at any moment. 141 | 142 | ## Task properties 143 | 144 | - Name. 145 | - Code to be executed. 146 | - Timeout. 147 | - Number of retries. 148 | - A fall-back task to be executed if the task fails. 149 | 150 | 151 | { 152 | name: 'A Task', 153 | timeout: 30, 154 | retry: 2, 155 | body: function(job, cb) { 156 | if (!job.foo) { 157 | job.foo = true; 158 | return cb('Foo was not defined'); 159 | } 160 | return cb(null); 161 | } 162 | } 163 | 164 | 165 | Note that a task's timeout shouldn't be bigger than the workflow timeout, but 166 | it really doesn't matter. If a task's execution exceeds the workflow timeout, it 167 | will be failed with a 'workflow timeout' error. 168 | 169 | ## Workflow properties 170 | 171 | - Name. 172 | - A 'chain' of tasks to be executed. 173 | - A global timeout. 174 | - An alternate error branch. 175 | - Optionally, another workflow to execute right after the current one completes 176 | 177 | 178 | factory.workflow({ 179 | name: 'Sample Workflow', 180 | chain: [aTask, anotherTask, aTaskWhichWillFail], 181 | timeout: 300, 182 | onError: [aFallbackTask] 183 | }, function(err, workflow) { 184 | if (err) { 185 | throw(err); 186 | } 187 | return workflow; 188 | }); 189 | 190 | 191 | ## Job properties 192 | 193 | Same as a workflow, plus: 194 | 195 | - Results for each one of the tasks. 196 | - The job target, when given. 197 | - The job parameters, when necessary. 198 | - The job status (something like "queued", "running", "finished" may work). 199 | Note that a job is running while a task is being executed. It's possible to 200 | change job status to "queued" once a task has been completed, and leave the 201 | job there to be picked by a runner at some later moment. 202 | - When to run the job. Maybe we want to delay execution in time?. 203 | - Any additional properties a task may want to save with the job, to be used by 204 | a subsequent task. 205 | 206 | 207 | 208 | factory.job({ 209 | workflow: aWorkflow, 210 | exec_after: '2012-01-03T12:54:05.788Z' 211 | }, function(err, job) { 212 | if (err) { 213 | callback(err); 214 | } 215 | aJob = job; 216 | callback(null, job); 217 | }); 218 | 219 | 220 | Some questions about other jobs options: 221 | 222 | - Do we want to be able to "abort" a job? 223 | - Even if it's running? 224 | - If so, what to do when it's running and we abort? Call the `onerror` 225 | branch? (note that I'm beginning to think that `onerror` should be called 226 | `revert` sometimes, or maybe it's just an extra branch...). 227 | 228 | See `example.js` for a detailed description on how to create Tasks, Workflows 229 | and Jobs using NodeJS through the **factory** component. 230 | 231 | ## Workflow Runners 232 | 233 | The system design requires that we can have workflow runners everywhere. As 234 | many as needed, and all of them reporting health periodically. Also, it would 235 | be desirable that runners implement a `ping` method to provide immediate 236 | information about their status. 237 | 238 | All runners will periodically query the backend for information about other 239 | runners. If they detect one of those other runners has been inactive for a 240 | configurable period of time, they will check for stale jobs associated with 241 | that inactive runner. They will fail those jobs or run the associated `onerror` 242 | branch. 243 | 244 | The first thing a runner does when it boots is to register itself with the 245 | backend (which is the same as reporting its health). At a configurable interval 246 | a runner will try to pick queued jobs and execute them. Runners will report 247 | activity at this same interval. 248 | 249 | Every runner must have a unique identifier, which can either be passed in at the 250 | runner's initialization, or be auto-generated the first time the runner is 251 | created and saved for future runs. 252 | 253 | Runners will spawn child processes, one process per job. The maximum number of 254 | child processes is also configurable. 255 | 256 | ### How runners pick and execute jobs 257 | 258 | A runner will query the backend for queued jobs (exactly the same number of 259 | jobs as available child processes to spawn). Once the runner gets a set of 260 | these queued jobs, it will try to obtain an exclusive lock on each job before 261 | processing it. When a job is locked by a runner, it will not be found by other 262 | runners searching for queued jobs. 263 | 264 | Once the runner has an exclusive lock over the job, it'll change job status 265 | from _queued_ to _running_, and begin executing the associated tasks. 266 | 267 | In order to execute the job, the runner will spawn a child process, and pass 268 | it all the information about the job; child processes don't have access to 269 | the backend, just to the job, which must be a JSON object. 270 | 271 | Note that everything must be executed within the acceptable amount of time 272 | provided for the job. If this time expires, the job execution will fail and 273 | the `onerror` branch will be executed when given. 274 | 275 | ### Task execution: 276 | 277 | A runner will then try to execute the `job` chain of tasks, in order. For every 278 | task, the runner will try to execute the task `body` using the node.js VM API, 279 | effectively as a separate process. Every task will get as arguments the job and 280 | a callback. A task should call the callback once it's completed. 281 | 282 | 283 | // A task body: 284 | function(job, cb) { 285 | // Task stuff here: 286 | cb(null); 287 | } 288 | 289 | 290 | If a task succeeds, it will call the callback without `error`: 291 | 292 | cb(null); 293 | 294 | Otherwise, the task should call the callback with an error message: 295 | 296 | cb('whatever the error reason'); 297 | 298 | These error messages will be available for the task's `onerror` function, in 299 | order to allow a task's fallback to decide if it can recover the task from a 300 | failure. 301 | 302 | It's also possible to set a specific timeout for every task execution. 303 | 304 | If a task fails, or if the task timeout is reached, the runner will check if 305 | we've exceeded the number of retries for the task. If that's not the case, 306 | it'll try to execute the task again. 307 | 308 | Once the max number of retries for a task has been reached, the runner will 309 | check if the task has an `onerror` fallback. If that's the case, it'll call it 310 | with the error which caused the failure, as follows: 311 | 312 | task.onerror(error, job, cb); 313 | 314 | The same logic as for task bodies can be applied to `onerror` fallbacks. 315 | 316 | Note that __tasks run sandboxed__. Only the node modules we specify to the 317 | runner at initialization time, alongside with `setTimeout`, `clearTimeout`, 318 | `setInterval` and `clearInterval` global functions, will be available for 319 | task `body` and `onerror` functions (this will be configurable). 320 | 321 | All the task results will be saved in order on the job's property 322 | `chain_results`. For every task, the results will be something like: 323 | 324 | { 325 | error: '', 326 | results: 'OK' 327 | } 328 | 329 | or, for a task which failed: 330 | 331 | { 332 | error: 'Whatever the error reason', 333 | results: '' 334 | } 335 | 336 | If the task fails because its `onerror` fallback failed, or because the task 337 | doesn't have such a fallback, the job's `onerror` chain will be invoked if 338 | present. 339 | 340 | The logic to execute the job's `onerror` chain is exactly the same as we've 341 | described here for the main `chain` of tasks. 342 | 343 | Once the job is finished, either successfully or right after a failure, or even 344 | in the case a task tells the runner to _re-queue_ the job, the child process 345 | running the job will communicate to runner the results. The runner will save 346 | back those results to the backend, and either finish the job, or re-queue it 347 | for another runner. 348 | 349 | Fork me on GitHub 350 | -------------------------------------------------------------------------------- /docs/workflowapi.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Workflow REST API 3 | logo-color: #aa0000 4 | --- 5 | # Workflow REST API 6 | 7 | This document describes the HTTP REST API for workflows and tasks, and for the 8 | creation of Jobs and tracking their execution progress. 9 | 10 | __This API speaks only JSON__. For every request. For all the HTTP methods. 11 | This means that any `POST` or `PUT` request, the `Content-Type` __must be 12 | JSON__. 13 | 14 | (See [node-workflow](index.html) docs for information on the whole module, 15 | not just the REST API). 16 | 17 | # End-Points 18 | 19 | The API is composed by the following end-points: 20 | 21 | - `/workflows` 22 | - `/jobs` 23 | - `/jobs/:uuid/info` 24 | - `/stats` (since version `0.10.0`) 25 | 26 | `/workflows` accept any of the HTTP verbs for the usual CRUD, but `/jobs` will 27 | not accept `PUT` since the only way to modify a Job once it has been created is 28 | through the job's execution. 29 | 30 | For the same reason, `/jobs/:uuid/info` will not accept neither `POST` (since the 31 | staus information for a Job is created when the Job itself is created), nor 32 | `DELETE` (given the status information for a job will be removed only if the job 33 | is removed). 34 | 35 | ## GET /workflows 36 | 37 | Retrieve a list of all the existing workflows. 38 | 39 | ### HTTP Parameters. 40 | 41 | None. 42 | 43 | ### Status Codes 44 | 45 | - `200 OK`: A list of existing workflows is returned. If there are no workflows, 46 | an empty array is returned. 47 | 48 | ### Response Body 49 | 50 | An array of workflow objects (see `POST /workflows`). 51 | 52 | ## POST /workflows 53 | 54 | Create a new workflow. 55 | 56 | ### HTTP Parameters 57 | 58 | - `name`: Required. The workflow name. 59 | - `chain[]`: Optional. The tasks to add to the workflow. Multiple values 60 | allowed. 61 | - `onerror[]`: Optional. The tasks to add to the workflow fallback. Multiple 62 | values allowed. 63 | - `timeout`: Optional. Timeout in seconds for workflow execution. 64 | 65 | ### Every `task` may be composed of: 66 | 67 | - `name`: Optional. The task name. 68 | - `body`: Required. A string enclosing a JavaScript function definition. 69 | The function __must__ take the parameters `job` and `cb`, where `cb` is a 70 | callback to be called by the function when its execution is finished. If the 71 | task succeeded, invoke the cb without arguments. If the task failed, invoke 72 | the cb with an error mssage. 73 | - `fallback`: Optional. A string enclosing a JavaScript function definition. 74 | The function __must__ take the parameters `err`, `job` and `cb`, where `cb` 75 | is a callback to be called by the function when its execution fails; 76 | `err` is the error message returned by task `body`. 77 | - `retry`: Optional. Number of times to retry the task's body before either 78 | failing the task, or calling the `fallback` function (when given). 79 | - `timeout`: Optional. Timeout in seconds for task execution. 80 | 81 | ### Status Codes 82 | 83 | - `409 Conflict`: One of the required parameters is either missing or incorrect. 84 | Information about the missing/incorrect parameter will be included in the 85 | response body. 86 | - `201 Created`: Successful creation of the workflow. The workflow's JSON 87 | representation will be included in the response body, together with a 88 | `Location` header for the new resource. A workflow's generated `uuid` will be 89 | part of this `Location`, and a member of the returned workflow JSON object. 90 | 91 | ### Response Body: 92 | 93 | { 94 | uuid: UUID, 95 | name: 'The workflow name', 96 | chain: [:task, :task, ...], 97 | onerror: [:task, :task, ...], 98 | timeout: 3600 99 | } 100 | 101 | #### Sample task in the response: 102 | 103 | { 104 | uuid: UUID, 105 | name: 'The task name', 106 | body: "function(job, cb) { 107 | if (job.foo) { 108 | return cb(null); 109 | } else { 110 | return cb('Uh, oh!, no foo.'); 111 | } 112 | }", 113 | fallback: "function(err, job, cb) { 114 | if (err === 'Uh, oh!, no foo.') { 115 | job.foo = 'bar'; 116 | return cb(null); 117 | } else { 118 | return cb('Arise chicken, arise!'); 119 | } 120 | }", 121 | timeout: 360 122 | } 123 | 124 | ## GET /workflows/:wf_uuid 125 | 126 | ### HTTP Parameters: 127 | 128 | - `wf_uuid`: The workflow UUID. 129 | 130 | ### Status Codes: 131 | 132 | - `404 Not Found`: There's no worlflow with the provided `wf_uuid`. 133 | - `200 OK`: The workflow with the provided `wf_uuid` has been found and is 134 | returned as response body. 135 | 136 | Note this API will not keep track of _destroyed_ workflows. When a request for 137 | destroyed workflows is made, the HTTP Status code will be `404 Not Found` 138 | instead of `410 Gone`. 139 | 140 | ### Response Body 141 | 142 | Same as for `POST /workflows` + `wf_uuid`. 143 | 144 | ## PUT /workflows/:wf_uuid 145 | 146 | ### HTTP Parameters 147 | 148 | Same as for `POST /workflows`. 149 | 150 | ### Status Codes 151 | 152 | Same as for `POST /workflows` with the addition of: 153 | 154 | - `404 Not Found`, when the provided `wf_uuid` cannot be found on the backend. 155 | 156 | ### Response Body 157 | 158 | Same as for `POST /workflows`. 159 | 160 | ## DELETE /workflows/:wf_uuid 161 | 162 | ### HTTP Parameters: 163 | 164 | - `wf_uuid`: The workflow UUID. 165 | 166 | ### Status Codes 167 | 168 | - `204 OK`: Workflow successfully destroyed. 169 | 170 | ## GET /jobs 171 | 172 | Retrieve a list of jobs. Without an `execution` HTTP parameter, all the existing 173 | jobs will be retrieved. If `execution` is given, only the jobs with the given 174 | execution status are retrieved. 175 | 176 | ### HTTP Parameters. 177 | 178 | - `execution`: Optional. One of `succeeded`, `failed`, `running`, 'canceled' 179 | or `queued`. 180 | 181 | ### Status Codes 182 | 183 | - `200 OK`: A list of existing jobs is returned, even when it's empty. 184 | 185 | ## POST /jobs 186 | 187 | ### HTTP Parameters. 188 | 189 | - `workflow`: Required. UUID of the workflow from which the new job will be 190 | created. 191 | - `exec_after`: Optional. ISO 8601 Date. Delay job execution until the provided 192 | time. 193 | - `target`: The job's target, intended to restrict the creation of another job 194 | with this same target and parameters until this job completes. 195 | - Any extra `k/v` pairs of parameters desired, which will be passed to the job 196 | object as an object like `{k1: v1, k2: v2, ...}`. 197 | 198 | ### Status Codes 199 | 200 | - `409 Conflict`: One of the required parameters is either missing or incorrect. 201 | Information about the missing/incorrect parameter will be included in the 202 | response body. 203 | - `201 Created`: Successful creation of the job. The job's JSON representation 204 | will be included in the the response body together with a `Location` header 205 | for the new resource. The job's generated `uuid` will be part of this 206 | `Location`, and a member of the returned job JSON object. 207 | 208 | ### Response Body 209 | 210 | { 211 | uuid: UUID, 212 | workflow_uuid: wf_uuid, 213 | name: 'The workflow name', 214 | chain: ['task object', 'task object', ...], 215 | onerror: ['task object', 'task object', ...], 216 | timeout: 3600, 217 | exec_after: new Date().toISOString(), 218 | target: '/some/uri', 219 | params: { 220 | k1: v1, 221 | k2: v2 222 | }, 223 | chain_results: [{result: 'OK', error: ''}, ...], 224 | onerror_results: [{result: 'OK', error: ''}, ...], 225 | execution: 'queued' ('running'|'failure'|'success') 226 | } 227 | 228 | 229 | ## GET /jobs/:job_uuid 230 | 231 | ### HTTP Parameters. 232 | 233 | - `job_uuid`: The job's UUID. 234 | 235 | ### Status Codes 236 | 237 | - `404 Not Found`: There's no job with the provided `job_uuid`. 238 | - `200 OK`: The task with the provided `job_uuid` has been found and is 239 | returned as response body. 240 | 241 | Note this API will not keep track of _destroyed_ workflows. When a request for 242 | destroyed workflows is made, the HTTP Status code will be `404 Not Found` 243 | instead of `410 Gone`. 244 | 245 | ### Response Body 246 | 247 | Same as for `POST /jobs`. 248 | 249 | ## PUT /jobs/:job_uuid 250 | 251 | __TBD__. Response with status code `405 Method Not Allowed`. 252 | 253 | ## DELETE /jobs/:job_uuid 254 | 255 | __TBD__. Response with status code `405 Method Not Allowed`. 256 | 257 | ## POST /jobs/:job_uuid/cancel 258 | 259 | Cancel a job's execution. Only unfinished jobs can be canceled. 260 | 261 | ### HTTP Parameters. 262 | 263 | - `job_uuid`: The job's UUID. 264 | 265 | ### Status Codes 266 | 267 | - `404 Not Found`: There's no job with the provided `job_uuid`. 268 | - `409 Conflict`: The job is already finalized and cannot be canceled. 269 | - `200 OK`: Successfully canceled job. 270 | 271 | ### Response Body 272 | 273 | Same than for `POST /jobs`. 274 | 275 | ## POST /jobs/:job_uuid/resume 276 | 277 | Resumes a job's execution which was previously set to waiting by any of the Job 278 | tasks. The job should have a *waiting* status in order to resume it. 279 | 280 | Depending on the given parameters, the job will be either *re-queued* or flagged 281 | as *failed*. The `chain_results` entry for the task which set the job to 282 | `waiting` will be updated with the given `result` or `error` messages. 283 | 284 | On success, *resume* the job will set its status from *waiting* to *queued* 285 | again. The job will run once one of the runners has a free slot to run it. 286 | 287 | ### HTTP Parameters. 288 | 289 | - `job_uuid`: The job's UUID. 290 | - `result`: Optional. Result message from the remote task if it runs okay. 291 | Defaults to *"OK"*. 292 | - `error`: Optional. Error message for the remote task. Note that providing any 293 | value for this parameter means you want to fail the job. 294 | 295 | ### Status Codes 296 | 297 | - `404 Not Found`: There's no job with the provided `job_uuid`. 298 | - `409 Conflict`: The job is not waiting and cannot be resumed. 299 | - `200 OK`: Successfully resumed job. 300 | 301 | ### Response Body 302 | 303 | Same than for `POST /jobs`. 304 | 305 | ## GET /jobs/:job_uuid/info 306 | 307 | Detailed information about the given job. A task may cause a 3rd-party 308 | application to execute a process which may require some time to finish. While 309 | our task is running and waiting for the finalization of such a process, those 310 | 3rd-party applications can publish information about progress using 311 | `POST /jobs/:job_uuid/info`; this information can then being used by other 312 | applications interested in job results using `GET /jobs/:job_uuid/info`. 313 | 314 | This information will consist of an arbitrary-length array, where every `POST` 315 | request will result in a new member being appended. 316 | 317 | ### HTTP Parameters. 318 | 319 | - `job_uuid`: The job's UUID. 320 | 321 | ### Status Codes 322 | 323 | Same as for `GET /jobs/:job_uuid`. 324 | 325 | ### Response Body 326 | 327 | [ 328 | { '10%': 'Task completed step one' }, 329 | { '20%': 'Task completed step two' } 330 | ] 331 | 332 | ## POST /jobs/:job_uuid/info 333 | 334 | ### HTTP Parameters. 335 | 336 | - `message`: Required. Object containing a message regarding the progress of 337 | operations. 338 | 339 | { '10%': 'Task completed step one' } 340 | 341 | Note you can provide any key/value pair here. 342 | 343 | ### Status Codes 344 | 345 | Same as for `GET /jobs/:job_uuid`. 346 | 347 | ### Response Body 348 | 349 | None. 350 | 351 | ## PUT /jobs/:job_uuid/info 352 | 353 | Response with status code `405 Method Not Allowed`. 354 | 355 | ## DELETE /jobs/:job_uuid/info 356 | 357 | Response with status code `405 Method Not Allowed`. 358 | 359 | ## GET /stats 360 | 361 | Returns a list of statistics for the current, past hour, past day and all time 362 | system status, in terms of number of jobs on each one of the execution satatus 363 | 364 | ### HTTP Parameters. 365 | 366 | None. 367 | 368 | ### Status Codes 369 | 370 | - `200 OK`: A list of stats is returned, even when it's all zero counters. 371 | 372 | ### Response Body 373 | 374 | Something with the following members but, most likely, with different numbers: 375 | 376 | { 377 | all_time: { 378 | queued: 0, 379 | failed: 0, 380 | succeeded: 0, 381 | canceled: 0, 382 | running: 0, 383 | retried: 0, 384 | waiting: 0 385 | }, 386 | past_24h: { 387 | queued: 0, 388 | failed: 0, 389 | succeeded: 0, 390 | canceled: 0, 391 | running: 0, 392 | retried: 0, 393 | waiting: 0 394 | }, 395 | past_hour: { 396 | queued: 0, 397 | failed: 3, 398 | succeeded: 15, 399 | canceled: 3, 400 | running: 0, 401 | retried: 0, 402 | waiting: 0 403 | }, 404 | current: { 405 | queued: 2, 406 | failed: 0, 407 | succeeded: 0, 408 | canceled: 0, 409 | running: 1, 410 | retried: 1, 411 | waiting: 1 412 | }, 413 | } 414 | 415 | Please, note that `all_time` member does not contains results contained into 416 | `past_24h`, neither this one contains results already contained into 417 | `past_hour`, and this one ... you got it!. 418 | 419 | Fork me on GitHub 420 | -------------------------------------------------------------------------------- /example.js: -------------------------------------------------------------------------------- 1 | // Copyright 2013 Pedro P. Candel . All rights reserved. 2 | // Copyright (c) 2017, Joyent, Inc. 3 | // First part of the example: You create workflows, add tasks, queue jobs 4 | // anywhere in your code using NodeJS. 5 | 6 | var assert = require('assert'); 7 | var util = require('util'); 8 | var wf = require('./lib/index'); 9 | // With modules, it would be require('workflow'); 10 | var Factory = wf.Factory; 11 | var Backend = wf.Backend; 12 | 13 | var backend, factory; 14 | 15 | // Some globals: 16 | var aWorkflow, aJob; 17 | 18 | // We'll use 'vasync' module to simplify definitions a bit, and avoid nesting 19 | // stuff for clarity: 20 | var vasync = require('vasync'); 21 | 22 | // Our serie of functions to execute: 23 | var pipeline = [ 24 | 25 | function createWorkflow(_, callback) { 26 | // A workflow definition: 27 | factory.workflow({ 28 | name: 'Sample Workflow', 29 | chain: [ { 30 | // A task. It will fail on first retry, but succeed on 2nd one: 31 | name: 'A Task', 32 | timeout: 30, 33 | retry: 2, 34 | body: function (job, cb) { 35 | if (!job.foo) { 36 | job.foo = true; 37 | return cb('Foo was not defined'); 38 | } 39 | return cb(null); 40 | } 41 | }, 42 | { 43 | // This task will fail, but it will succeed when the task's 44 | // fallback function is called: 45 | name: 'Another task', 46 | body: function (job, cb) { 47 | return cb('Task body error'); 48 | }, 49 | // Note that the `fallback` function takes the error as its 50 | // first argument: 51 | fallback: function (err, job, cb) { 52 | job.the_err = err; 53 | return cb(null); 54 | } 55 | }, 56 | { 57 | // This task will fail and, given there isn't an fallback 58 | // callback, the workflow will call the `onerror` chain: 59 | name: 'A task which will fail', 60 | body: function (job, cb) { 61 | job.this_failed_because = 'We decided it.'; 62 | return cb('Task body error'); 63 | } 64 | }], 65 | timeout: 180, 66 | onError: [ { 67 | name: 'A fallback task', 68 | body: function (job, cb) { 69 | // Some task failed and, as a consequence, this task is 70 | // being executed 71 | if (job.error) { 72 | // Do something here ... 73 | cb(job.error); 74 | } 75 | cb(null); 76 | } 77 | }] 78 | }, function (err, workflow) { 79 | if (err) { 80 | callback(err); 81 | } 82 | aWorkflow = workflow; 83 | callback(null, workflow); 84 | }); 85 | }, 86 | function createJob(_, callback) { 87 | // A Job based on the workflow: 88 | factory.job({ 89 | workflow: aWorkflow.uuid, 90 | exec_after: '2012-01-03T12:54:05.788Z' 91 | }, function (err, job) { 92 | if (err) { 93 | callback(err); 94 | } 95 | aJob = job; 96 | callback(null, job); 97 | }); 98 | } 99 | 100 | ]; 101 | 102 | 103 | function main() { 104 | backend = Backend({}); 105 | 106 | backend.init(function () { 107 | factory = Factory(backend); 108 | assert.ok(factory); 109 | 110 | vasync.pipeline({ 111 | funcs: pipeline 112 | }, function (err, results) { 113 | if (err) { 114 | console.error(err); 115 | return; 116 | } 117 | // At this point, we should have a results array with all the 118 | // tasks, the workflow and the job, on the same order we defined 119 | // them but,given we've set the objects to globals, we couldn't 120 | // care less about this async's results array. 121 | // 122 | // Our tasks and workflow should have been created, and our job 123 | // should have been created and queued: 124 | assert.ok(aWorkflow); 125 | assert.ok(aWorkflow.uuid); 126 | assert.ok(aJob); 127 | // We need the UUID in order to be able to check Job Status 128 | assert.ok(aJob.uuid); 129 | console.log(util.inspect(results.operations, false, 8)); 130 | }); 131 | }); 132 | } 133 | 134 | main(); 135 | -------------------------------------------------------------------------------- /lib/child.js: -------------------------------------------------------------------------------- 1 | // Copyright 2012 Pedro P. Candel All rights reserved. 2 | // Copyright (c) 2017, Joyent, Inc. 3 | // 4 | // Child process to be "forked" from task-runner.js. 5 | // 6 | var util = require('util'), 7 | WorkflowTaskRunner = require('./task-runner'), 8 | wf_task_runner; 9 | 10 | // Every possible situation finishes this way, notifying parent process about 11 | // execution results: 12 | function notifyParent(msg) { 13 | process.send(msg); 14 | } 15 | 16 | 17 | 18 | // This is the only way we have for communication from the parent process. 19 | // 20 | // Main case: 21 | // - We receive a message from parent including the 'task' to run and the 'job' 22 | // object itself. Optionally, this object may also contain a 'sandbox' object 23 | // and 'trace' enabled. 24 | // Side case: 25 | // - We receive a message to finish the task "as is" due to a "finish task now" 26 | // call. 27 | process.on('message', function (msg) { 28 | if (msg.job && msg.task) { 29 | try { 30 | wf_task_runner = WorkflowTaskRunner(msg); 31 | wf_task_runner.runTask(notifyParent); 32 | } catch (e) { 33 | notifyParent({ 34 | error: e.message 35 | }); 36 | } 37 | } else if (msg.cmd && msg.cmd === 'cancel') { 38 | // Cancel message received from job runner 39 | wf_task_runner.canceled = true; 40 | } else { 41 | // Finally, notify parent about unknown messages 42 | notifyParent({ 43 | error: 'unknown message' 44 | }); 45 | } 46 | }); 47 | -------------------------------------------------------------------------------- /lib/errors.js: -------------------------------------------------------------------------------- 1 | // Copyright 2012 Pedro P. Candel All rights reserved. 2 | // Copyright (c) 2017, Joyent, Inc. 3 | 4 | var restify = require('restify'), 5 | util = require('util'); 6 | 7 | var CODES = { 8 | BackendInternal: 500, 9 | BackendInvalidArgument: 409, 10 | BackendMissingParameter: 409, 11 | BackendPreconditionFailed: 412, 12 | BackendResourceNotFound: 404 13 | }; 14 | 15 | function BackendError(code, restCode, message) { 16 | restify.RestError.call( 17 | this, { 18 | restCode: restCode, 19 | message: message || 'BackendError' 20 | }); 21 | 22 | this.name = 'BackendError'; 23 | 24 | this.__defineGetter__('toRestError', function () { 25 | var errName = this.name.replace(/^Backend/, ''); 26 | 27 | if (typeof (restify[errName]) === 'function') { 28 | return new restify[errName](message); 29 | } else { 30 | return new restify.InternalError(message); 31 | } 32 | 33 | }); 34 | } 35 | 36 | util.inherits(BackendError, restify.RestError); 37 | 38 | 39 | module.exports = { 40 | BackendError: BackendError 41 | }; 42 | 43 | Object.keys(CODES).forEach(function (k) { 44 | var name = k + 'Error'; 45 | module.exports[name] = function () { 46 | var message = util.format.apply(util, arguments); 47 | BackendError.call(this, CODES[k], k, message); 48 | 49 | this.name = name; 50 | }; 51 | util.inherits(module.exports[name], BackendError); 52 | }); 53 | -------------------------------------------------------------------------------- /lib/index.js: -------------------------------------------------------------------------------- 1 | // Copyright 2012 Pedro P. Candel . All rights reserved. 2 | // Copyright (c) 2017, Joyent, Inc. 3 | 4 | var DTRACE; 5 | 6 | // Shamelessly copied from https://github.com/mcavage/node-restify 7 | function createDTrace(name) { 8 | // see https://github.com/mcavage/node-restify/issues/80 and 9 | // https://github.com/mcavage/node-restify/issues/100 10 | if (!DTRACE) { 11 | try { 12 | var d = require('dtrace-provider'); 13 | DTRACE = d.createDTraceProvider(name); 14 | } catch (e) { 15 | DTRACE = { 16 | addProbe: function addProbe() { 17 | var p = { 18 | fire: function () {} 19 | }; 20 | return (p); 21 | }, 22 | enable: function enable() {}, 23 | fire: function fire() {}, 24 | removeProbe: function () {}, 25 | disable: function () {} 26 | 27 | }; 28 | } 29 | } 30 | return (DTRACE); 31 | } 32 | 33 | module.exports = { 34 | Factory: function (backend) { 35 | if (typeof (backend) !== 'object') { 36 | throw new Error('backend must be an object'); 37 | } 38 | 39 | var WorkflowFactory = require('./workflow-factory'); 40 | 41 | return WorkflowFactory(backend); 42 | }, 43 | Backend: function (config) { 44 | var Backend = require('./workflow-in-memory-backend'); 45 | return Backend(config); 46 | }, 47 | API: function (config) { 48 | if (typeof (config) !== 'object') { 49 | throw new Error('config must be an object'); 50 | } 51 | 52 | var API = require('./api'); 53 | return API(config); 54 | }, 55 | Runner: function (config) { 56 | if (typeof (config) !== 'object') { 57 | throw new Error('config must be an object'); 58 | } 59 | var WorkflowRunner = require('./runner'); 60 | config.dtrace = createDTrace('workflow'); 61 | return WorkflowRunner(config); 62 | }, 63 | CreateDTrace: createDTrace, 64 | makeEmitter: require('./make-emitter') 65 | }; 66 | 67 | var errors = require('./errors'); 68 | Object.keys(errors).forEach(function (k) { 69 | module.exports[k] = errors[k]; 70 | }); 71 | -------------------------------------------------------------------------------- /lib/job-runner.js: -------------------------------------------------------------------------------- 1 | // Copyright 2014 Pedro P. Candel . All rights reserved. 2 | // Copyright (c) 2017, Joyent, Inc. 3 | 4 | var assert = require('assert-plus'); 5 | var util = require('util'); 6 | var fork = require('child_process').fork; 7 | var vasync = require('vasync'); 8 | var WorkflowTaskRunner = require('./task-runner'); 9 | var backoff = require('backoff'); 10 | var clone = require('clone'); 11 | 12 | // Run the given job. Optionally, can pass sandbox object for the 'task' VM 13 | // - opts (Object) with the following members: 14 | // 15 | // - runner (Object) insteance of the runner running this job. Required to 16 | // notify the runner about child processes spawned/finished. Required. 17 | // - backend (Object) instance of the backend used. Required. 18 | // - job (Object) the job to run. Required. 19 | // - log (Object) A bunyan logger. Required. 20 | // - sandbox (Object) VM's sandbox for task (see WorkflowTaskRunner). Optional. 21 | var WorkflowJobRunner = module.exports = function (opts) { 22 | if (typeof (opts) !== 'object') { 23 | throw new TypeError('opts (Object) required'); 24 | } 25 | 26 | if (typeof (opts.runner) !== 'object') { 27 | throw new TypeError('opts.runner (Object) required'); 28 | } 29 | 30 | if (typeof (opts.backend) !== 'object') { 31 | throw new TypeError('opts.backend (Object) required'); 32 | } 33 | 34 | if (typeof (opts.job) !== 'object') { 35 | throw new TypeError('opts.job (Object) required'); 36 | } 37 | 38 | if (opts.sandbox && typeof (opts.sandbox) !== 'object') { 39 | throw new TypeError('opts.sandbox must be an Object'); 40 | } 41 | 42 | if (typeof (opts.dtrace) !== 'object') { 43 | throw new TypeError('opts.dtrace (Object) required'); 44 | } 45 | 46 | assert.object(opts.log, 'opts.log'); 47 | assert.optionalObject(opts.trace, 'opts.trace'); 48 | 49 | if (typeof (opts.runner.do_fork) === 'undefined') { 50 | opts.runner.do_fork = true; 51 | } 52 | 53 | var runner = opts.runner; 54 | var job = opts.job; 55 | var trace = opts.trace; 56 | var log = opts.log; 57 | var backend = opts.backend; 58 | var sandbox = opts.sandbox || {}; 59 | var dtrace = opts.dtrace; 60 | var do_fork = opts.runner.do_fork; 61 | var timeout = null; 62 | 63 | // pointer to child process forked by runTask 64 | var child = null; 65 | // Properties of job object which a task should not be allowed to modify: 66 | // Properties of job object which a task should not be allowed to modify: 67 | var frozen_props = [ 68 | 'chain', 'chain_results', 'onerror', 'onerror_results', 69 | 'exec_after', 'timeout', 'elapsed', 'uuid', 'workflow_uuid', 70 | 'name', 'execution', 'num_attempts', 'max_attempts', 'initial_delay', 71 | 'max_delay', 'prev_attempt', 'oncancel', 'oncancel_results', 72 | 'workflow', 'created_at', 'started', 'log', 'name', 'runner_id', 73 | 'locks', 'target' 74 | ]; 75 | // Our job has been canceled while 76 | // running. If so, we set this to true: 77 | var canceled = false; 78 | var failed = false; 79 | var failedErr = null; 80 | // In case we aren't forking tasks: 81 | var taskRunner = null; 82 | 83 | if (!util.isDate(job.exec_after)) { 84 | job.exec_after = new Date(job.exec_after); 85 | } 86 | 87 | if (!job.chain) { 88 | job.chain = []; 89 | } 90 | 91 | if (!job.chain_results) { 92 | job.chain_results = []; 93 | } 94 | 95 | if (job.onerror && !job.onerror_results) { 96 | job.onerror_results = []; 97 | } 98 | 99 | if (job.oncancel && !job.oncancel_results) { 100 | job.oncancel_results = []; 101 | } 102 | 103 | if (job.timeout) { 104 | timeout = ((job.elapsed) ? 105 | (job.timeout - job.elapsed) : 106 | job.timeout) * 1000; 107 | } 108 | 109 | 110 | function _updateJobProperty(uuid, prop, val, meta, callback) { 111 | if (typeof (meta) === 'function') { 112 | callback = meta; 113 | meta = {}; 114 | } 115 | 116 | var lastError = null; 117 | 118 | var attempt = backoff.exponential({ 119 | initialDelay: 1, 120 | maxDelay: Infinity 121 | }); 122 | 123 | // Retry max-attempts: 124 | // attempt.failAfter(10); 125 | 126 | attempt.on('backoff', function (number, delay) { 127 | // Do something when backoff starts, e.g. show to the 128 | // user the delay before next reconnection attempt. 129 | }); 130 | 131 | // Do something when backoff ends, e.g. retry a failed 132 | // operation. If it fails again with BackendInternalError, then 133 | // backoff, otherwise reset the backoff instance. 134 | attempt.on('ready', function (number, delay) { 135 | return backend.updateJobProperty( 136 | uuid, 137 | prop, 138 | val, 139 | meta, 140 | function (err) { 141 | lastError = err; 142 | if (err && err.name === 'BackendInternalError') { 143 | return attempt.backoff(); 144 | } else { 145 | attempt.reset(); 146 | return callback(err); 147 | } 148 | }); 149 | }); 150 | 151 | // Do something when the maximum number of backoffs is 152 | // reached. 153 | attempt.on('fail', function () { 154 | return callback(lastError); 155 | }); 156 | 157 | return attempt.backoff(); 158 | } 159 | 160 | function onChildUp() { 161 | if (do_fork && child) { 162 | child._pid = child.pid; 163 | runner.childUp(job.uuid, child._pid); 164 | } 165 | } 166 | 167 | function onChildExit() { 168 | if (do_fork && child) { 169 | runner.childDown(job.uuid, child._pid); 170 | } 171 | } 172 | 173 | 174 | function saveJob(callback) { 175 | job.elapsed = (new Date().getTime() - job.started) / 1000; 176 | // Decide what to do with the Job depending on its execution status: 177 | if ( 178 | job.execution === 'failed' || 179 | job.execution === 'succeeded' || 180 | job.execution === 'canceled') { 181 | log.trace('Finishing job ...'); 182 | return backend.finishJob(job, function (err, job) { 183 | runner.releaseSlot(); 184 | if (err) { 185 | return callback(err); 186 | } 187 | return callback(null, job); 188 | }); 189 | } else if (job.execution === 'queued') { 190 | log.trace('Re queueing job ...'); 191 | return backend.queueJob(job, function (err, job) { 192 | runner.releaseSlot(); 193 | if (err) { 194 | return callback(err); 195 | } 196 | return callback(null, job); 197 | }); 198 | } else if (job.execution === 'waiting') { 199 | log.trace('Pausing job ...'); 200 | return backend.pauseJob(job, function (err, job) { 201 | runner.releaseSlot(); 202 | if (err) { 203 | return callback(err); 204 | } 205 | return callback(null, job); 206 | }); 207 | } else if (job.execution === 'retried') { 208 | log.trace('Retrying job ...'); 209 | return backend.finishJob(job, function (err, job) { 210 | runner.releaseSlot(); 211 | if (err) { 212 | return callback(err); 213 | } 214 | return callback('retry', job); 215 | }); 216 | } else { 217 | log.error('Unknown job execution status ' + job.execution); 218 | runner.releaseSlot(); 219 | return callback('Unknown job execution status ' + job.execution); 220 | } 221 | } 222 | 223 | function onEnd(err, callback) { 224 | if (!err && failed) { 225 | err = failedErr; 226 | } 227 | if (err) { 228 | if (err === 'queue') { 229 | job.execution = 'queued'; 230 | } else if (err === 'cancel') { 231 | job.execution = 'canceled'; 232 | } else if (err === 'retry') { 233 | job.execution = 'retried'; 234 | } else if (err === 'wait') { 235 | job.execution = 'waiting'; 236 | } else { 237 | job.execution = 'failed'; 238 | } 239 | } else { 240 | job.execution = 'succeeded'; 241 | } 242 | return saveJob(callback); 243 | } 244 | 245 | function runTask(task, chain, cb) { 246 | var task_start = new Date().toISOString(); 247 | // We may have cancel the job due to runner process exit/restart 248 | // If that's the case, do not fork, just return: 249 | if (canceled === true && job.execution === 'queued') { 250 | return cb('queue'); 251 | } 252 | 253 | if (trace) { 254 | trace.begin(trace.fields.name + '.' + task.name); 255 | } 256 | 257 | // Task name, task body, start time (time we fire start probe) 258 | dtrace.addProbe('wf-task-start', 259 | 'char *', 260 | 'char *', 261 | 'int'); 262 | // Task name, result, error, started_at/finished_at, 263 | // end time (time we fire done probe): 264 | dtrace.addProbe('wf-task-done', 265 | 'char *', 266 | 'char *', 267 | 'char *', 268 | 'int', 269 | 'int', 270 | 'int'); 271 | 272 | dtrace.fire('wf-task-start', function taskProbeStart() { 273 | var ret = [ 274 | task.name, 275 | task.body, 276 | new Date().getTime() 277 | ]; 278 | return (ret); 279 | }); 280 | 281 | function runTaskCb(msg) { 282 | // Message may contain one of the 'error', 'cmd', or 'info' members, 283 | // plus 'result'. 284 | log.trace({message: msg}, 'child process message'); 285 | 286 | if (msg.info) { 287 | var info = { 288 | data: msg.info, 289 | date: new Date().toISOString() 290 | }; 291 | return backend.addInfo(job.uuid, info, function (err) { 292 | if (err) { 293 | log.error({err: err}, 'Error adding info'); 294 | } 295 | }); 296 | } 297 | 298 | if (do_fork) { 299 | // If we don't have msg.info member, it's safe to tell the 300 | // child process to exit if it hasn't done yet: 301 | if (child && child.exitCode === null) { 302 | child.kill(); 303 | } 304 | } else { 305 | // Once we're done, taskRunner should be reset for next task: 306 | if (taskRunner) { 307 | // Allow tasks which might go haywire, a chance to know the 308 | // task should have been complete. 309 | taskRunner.markDone(); 310 | } 311 | taskRunner = null; 312 | } 313 | 314 | // Save the results into the result chain + backend update. 315 | var res = { 316 | result: msg.result, 317 | error: msg.error, 318 | name: msg.task_name, 319 | started_at: task_start, 320 | finished_at: new Date().toISOString() 321 | }; 322 | 323 | dtrace.fire('wf-task-done', function tastkProbeDone() { 324 | var ret = [ 325 | res.name, 326 | res.result, 327 | res.error, 328 | new Date(res.started_at).getTime(), 329 | new Date(res.finished_at).getTime(), 330 | new Date().getTime() 331 | ]; 332 | return (ret); 333 | }); 334 | 335 | if (trace) { 336 | trace.end(trace.fields.name + '.' + task.name); 337 | } 338 | 339 | // If the task added/updated any property to the job, 340 | // let's get it 341 | if (msg.job) { 342 | Object.keys(msg.job).forEach(function (p) { 343 | if (frozen_props.indexOf(p) === -1) { 344 | job[p] = msg.job[p]; 345 | } 346 | }); 347 | } 348 | 349 | // Prevent backend double JSON encoding issues, just in case: 350 | if (!util.isArray(job[chain])) { 351 | return cb(util.format('Job chain is not an array of ' + 352 | 'results, but has type %s', typeof (job[chain]))); 353 | } else { 354 | job[chain].push(res); 355 | return _updateJobProperty( 356 | job.uuid, 357 | chain, 358 | job[chain], 359 | function (err) { 360 | // If we canceled the job and got a reply from the 361 | // running task we want to stop execution ASAP: 362 | if (canceled) { 363 | if (job.execution === 'queued') { 364 | return cb('queue'); 365 | } else { 366 | if (chain !== 'oncancel_results') { 367 | return cb('cancel'); 368 | } else { 369 | return cb(null, res.result); 370 | } 371 | } 372 | } else { 373 | // Backend error 374 | if (err) { 375 | return cb(err); 376 | } else if (msg.error) { 377 | // Task error 378 | return cb(msg.error); 379 | } else { 380 | // All good: 381 | return cb(null, res.result); 382 | } 383 | } 384 | }); 385 | } 386 | } 387 | 388 | if (do_fork) { 389 | try { 390 | child = fork(__dirname + '/child.js'); 391 | } catch (e) { 392 | // If we can't fork, log error and re-queue the job execution 393 | log.error(e, 'Error forking child process'); 394 | return cb('queue'); 395 | } 396 | 397 | // Keep withing try/catch block and prevent wf-runner exiting if 398 | // child exits due to out of memory 399 | try { 400 | onChildUp(); 401 | // Message may contain one of the 'error', 'cmd', or 'info' 402 | // members, plus 'result'. 403 | child.on('message', runTaskCb); 404 | 405 | child.on('exit', function (code) { 406 | onChildExit(); 407 | }); 408 | 409 | return child.send({ 410 | task: task, 411 | job: job, 412 | sandbox: sandbox 413 | }); 414 | } catch (ex) { 415 | log.error(ex, 'Error from child process'); 416 | onChildExit(); 417 | return cb(ex); 418 | } 419 | } else { 420 | taskRunner = WorkflowTaskRunner({ 421 | task: task, 422 | job: clone(job), 423 | sandbox: sandbox 424 | }); 425 | return taskRunner.runTask(runTaskCb); 426 | } 427 | } 428 | 429 | // Run the given chain of tasks 430 | // Arguments: 431 | // - chain: the chain of tasks to run. 432 | // - chain_results: the name of the job property to append current chain 433 | // results. For main `chain` it'll be `job.chain_results`; for `onerror` 434 | // branch, it'll be `onerror_results` and so far. 435 | // - callback: f(err) 436 | function runChain(chain, chain_results, callback) { 437 | var timeoutId, chain_to_run; 438 | 439 | if (timeout) { 440 | timeoutId = setTimeout(function () { 441 | // Execution of everything timed out, have to abort running 442 | // tasks and run the onerror chain. 443 | clearTimeout(timeoutId); 444 | if (do_fork && child) { 445 | process.kill(child._pid, 'SIGTERM'); 446 | } 447 | // If it's already failed, what it's timing out is the 'onerror' 448 | // chain. We don't wanna run it again. 449 | if (!failed) { 450 | job[chain_results].push({ 451 | error: 'workflow timeout', 452 | result: '', 453 | finished_at: new Date().toISOString() 454 | }); 455 | _updateJobProperty( 456 | job.uuid, 457 | chain_results, 458 | job[chain_results], 459 | function (err) { 460 | if (err) { 461 | return onEnd('backend error', callback); 462 | } 463 | return onError('workflow timeout', callback); 464 | }); 465 | } else { 466 | job.onerror_results.push({ 467 | error: 'workflow timeout', 468 | result: '', 469 | finished_at: new Date().toISOString() 470 | }); 471 | _updateJobProperty( 472 | job.uuid, 473 | chain_results, 474 | job.onerror_results, 475 | function (err) { 476 | if (err) { 477 | return onEnd('backend error', callback); 478 | } 479 | return onEnd('workflow timeout', callback); 480 | }); 481 | } 482 | }, timeout); 483 | } 484 | 485 | // Job may have been re-queued. If that's the case, we already 486 | // have results for some tasks: restart from the task right 487 | // after the one which re-queued the workflow. 488 | if (job[chain_results].length) { 489 | chain_to_run = chain.slice( 490 | job[chain_results].length, chain.length); 491 | } else { 492 | chain_to_run = chain; 493 | } 494 | 495 | var pipeline = chain_to_run.map(function (task) { 496 | return (function (_, cb) { 497 | if (task.modules && typeof (task.modules) === 'string') { 498 | try { 499 | task.modules = JSON.parse(task.modules); 500 | } catch (e) { 501 | delete task.modules; 502 | } 503 | } 504 | return runTask(task, chain_results, cb); 505 | }); 506 | }); 507 | 508 | vasync.pipeline({ 509 | funcs: pipeline 510 | }, function (err, results) { 511 | log.trace({results: results}, 'Pipeline results'); 512 | // Whatever happened here, we are timeout done. 513 | if (timeoutId) { 514 | clearTimeout(timeoutId); 515 | } 516 | 517 | if (err) { 518 | // If we are cancelating job, we want to avoid running 519 | // "onerror" branch 520 | if (err === 'cancel') { 521 | return onCancel(callback); 522 | } else { 523 | return onError(err, callback); 524 | } 525 | } else { 526 | // All tasks run successful. Need to report information so, 527 | // we rather emit 'end' and delegate into another function 528 | // (unless we are running the onCancel chain) 529 | if (canceled) { 530 | return onEnd('cancel', callback); 531 | } 532 | return onEnd(null, callback); 533 | } 534 | }); 535 | } 536 | 537 | 538 | function onCancel(callback) { 539 | canceled = true; 540 | if (job.oncancel && util.isArray(job.oncancel)) { 541 | log.trace('Running oncancel'); 542 | return runChain( 543 | job.oncancel, 'oncancel_results', function (err) { 544 | if (err) { 545 | log.error({err: err}, 'Error running oncancel chain'); 546 | } 547 | return onEnd('cancel', callback); 548 | }); 549 | } else { 550 | return onEnd('cancel', callback); 551 | } 552 | } 553 | 554 | 555 | function onError(err, callback) { 556 | // We're already running the onerror chain, do not retry again! 557 | if (failed) { 558 | return onEnd(err, callback); 559 | } else { 560 | if (err === 'queued') { 561 | return onEnd('queue', callback); 562 | } else if (err === 'retry') { 563 | return onEnd('retry', callback); 564 | } else if (err === 'wait') { 565 | return onEnd('wait', callback); 566 | } else { 567 | failed = true; 568 | failedErr = err; 569 | if (job.onerror && util.isArray(job.onerror)) { 570 | return runChain( 571 | job.onerror, 'onerror_results', callback); 572 | } else { 573 | return onEnd(err, callback); 574 | } 575 | } 576 | } 577 | } 578 | 579 | 580 | return ({ 581 | timeout: timeout, 582 | 583 | cancel: function cancel(execution, callback) { 584 | canceled = true; 585 | if (execution === 'canceled') { 586 | if (do_fork && child) { 587 | child.send({ 588 | cmd: 'cancel' 589 | }); 590 | } else if (taskRunner) { 591 | taskRunner.canceled = true; 592 | } 593 | job.execution = 'canceled'; 594 | } else if (execution === 'queued') { 595 | job.execution = 'queued'; 596 | } 597 | return callback(); 598 | }, 599 | 600 | saveJob: saveJob, 601 | 602 | onChildUp: onChildUp, 603 | 604 | onChildExit: onChildExit, 605 | 606 | onEnd: onEnd, 607 | 608 | onError: onError, 609 | 610 | runTask: runTask, 611 | 612 | runChain: runChain, 613 | // Run the workflow within a timeout which, in turn, will call tasks in 614 | // chain within their respective timeouts when given: 615 | // Arguments: 616 | // - callback: f(err) - Used to send final job results 617 | run: function run(callback) { 618 | runner.getSlot(); 619 | // Keep track of time: 620 | job.started = new Date().getTime(); 621 | runChain(job.chain, 'chain_results', callback); 622 | } 623 | 624 | }); 625 | }; 626 | -------------------------------------------------------------------------------- /lib/make-emitter.js: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2017, Joyent, Inc. 2 | 3 | var EventEmitter = require('events').EventEmitter; 4 | 5 | function makeEmitter(o) { 6 | var emitter = new EventEmitter(); 7 | 8 | o.on = function () { 9 | return emitter.on.apply(emitter, arguments); 10 | }; 11 | 12 | o.once = function () { 13 | return emitter.once.apply(emitter, arguments); 14 | }; 15 | 16 | o.addListener = function () { 17 | return emitter.addListener.apply(emitter, arguments); 18 | }; 19 | 20 | o.emit = function () { 21 | return emitter.emit.apply(emitter, arguments); 22 | }; 23 | 24 | o.listeners = function () { 25 | return emitter.listeners.apply(emitter, arguments); 26 | }; 27 | 28 | o.removeAllListeners = function () { 29 | return emitter.removeAllListeners.apply(emitter, arguments); 30 | }; 31 | 32 | o.removeListener = function () { 33 | return emitter.removeListener.apply(emitter, arguments); 34 | }; 35 | 36 | o.setMaxListeners = function () { 37 | return emitter.setMaxListeners.apply(emitter, arguments); 38 | }; 39 | 40 | return o; 41 | } 42 | 43 | module.exports = makeEmitter; 44 | -------------------------------------------------------------------------------- /lib/task-runner.js: -------------------------------------------------------------------------------- 1 | // Copyright 2014 Pedro P. Candel . All rights reserved. 2 | // Copyright (c) 2018, Joyent, Inc. 3 | 4 | var bunyan = require('bunyan'); 5 | var util = require('util'); 6 | var vm = require('vm'); 7 | 8 | // Run a single task. 9 | // - opts (Object) required options to run the task: 10 | // - job (Object) the job current task is part of. 11 | // - task (Object) the task to run. 12 | // - sandbox (Object) the sandbox to pass to the VM where the task run 13 | // in the form: 14 | // { 15 | // "any_var": "aValue", 16 | // "modules": { 17 | // 'module_global_var_name': 'node-module-name' 18 | // } 19 | // } 20 | // Will return an object with the following information: 21 | // - job (Object) the job object updated with any modification the task 22 | // may need to realize. Note this is the only way of communication between 23 | // tasks. 24 | // - result (String) information about task results. 25 | // - error (String) when an error has happened, descriptive information. 26 | // - cmd (String) next command workflow should run after this task. Right now 27 | // one of 'run', 'error' or 'queue'. 28 | var WorkflowTaskRunner = module.exports = function (opts) { 29 | 30 | if (typeof (opts) !== 'object') { 31 | throw new TypeError('opts (Object) required'); 32 | } 33 | 34 | if (typeof (opts.job) !== 'object') { 35 | throw new TypeError('opts.job (Object) required'); 36 | } 37 | 38 | if (typeof (opts.task) !== 'object') { 39 | throw new TypeError('opts.task (Object) required'); 40 | } 41 | 42 | function info(rec) { 43 | return taskCallback(formatResults({ 44 | info: rec 45 | })); 46 | } 47 | 48 | var sandbox = { 49 | setTimeout: global.setTimeout, 50 | clearTimeout: global.clearTimeout, 51 | setInterval: global.setInterval, 52 | clearInterval: global.clearInterval, 53 | console: global.console, 54 | util: global.util, 55 | info: info, 56 | Buffer: global.Buffer, 57 | Error: Error 58 | }; 59 | if (global.setImmediate) { 60 | sandbox.setImmediate = global.setImmediate; 61 | sandbox.clearImmediate = global.clearImmediate; 62 | } 63 | 64 | if (opts.sandbox) { 65 | if (typeof (opts.sandbox) !== 'object') { 66 | throw new TypeError('opts.sandbox must be an Object'); 67 | } else { 68 | Object.keys(opts.sandbox).forEach(function (k) { 69 | if (k === 'modules') { 70 | // Allow tasks to load none or some of the sandbox 71 | // modules: 72 | if (opts.task.modules) { 73 | if (typeof (opts.task.modules) !== 'object') { 74 | throw new TypeError( 75 | 'opts.task.modules must be an Object'); 76 | } 77 | Object.keys(opts.task.modules).forEach( 78 | function (mod) { 79 | global[mod] = require(opts.task.modules[mod]); 80 | sandbox[mod] = global[mod]; 81 | }); 82 | } else { 83 | Object.keys(opts.sandbox.modules).forEach( 84 | function (mod) { 85 | global[mod] = require(opts.sandbox.modules[mod]); 86 | sandbox[mod] = global[mod]; 87 | }); 88 | } 89 | } else { 90 | sandbox[k] = opts.sandbox[k]; 91 | } 92 | }); 93 | } 94 | } 95 | 96 | var context = vm.createContext(sandbox); 97 | 98 | var job = opts.job; 99 | var name = opts.task.name || opts.task.uuid; 100 | // Number of times to attempt the task 101 | var retry = opts.task.retry || 1; 102 | // Timeout for the task, when given 103 | var timeout = (opts.task.timeout * 1000) || null; 104 | var body = null; 105 | // First, wrap into try/catch, since it may be invalid JavaScript: 106 | try { 107 | body = vm.runInContext('(' + opts.task.body + ')', context); 108 | } catch (e) { 109 | throw new TypeError('opt.task.body (String) must be a Function source'); 110 | } 111 | 112 | // Even if it is valid JavaScript code, we need it to be a function: 113 | if (typeof (body) !== 'function') { 114 | throw new TypeError('opt.task.body (String) must be a Function source'); 115 | } 116 | 117 | var fallback; 118 | try { 119 | fallback = (!opts.task.fallback) ? null : 120 | vm.runInContext('(' + opts.task.fallback + ')', context); 121 | } catch (err) { 122 | throw new TypeError( 123 | 'opt.task.fallback (String) must be a Function source'); 124 | } 125 | 126 | if (fallback && typeof (fallback) !== 'function') { 127 | throw new TypeError( 128 | 'opt.task.fallback (String) must be a Function source'); 129 | } 130 | 131 | // Number of already run retries: 132 | var retries = 0; 133 | // Placeholder for timeout identifiers: 134 | var taskTimeoutId = null; 135 | var taskFallbackTimeoutId = null; 136 | 137 | var taskCallback = null; 138 | 139 | var taskRunner = { 140 | name: name, 141 | body: body, 142 | fallback: fallback, 143 | timeout: timeout, 144 | // Received cancelation message from parent? 145 | canceled: false 146 | }; 147 | 148 | function formatResults(msg) { 149 | if (!msg.result) { 150 | msg.result = ''; 151 | } 152 | 153 | if (!msg.error) { 154 | msg.error = ''; 155 | } 156 | 157 | // GH-82: If we have an error instance, try to provide useful 158 | // information, like restify.Error does: 159 | if (msg.error && typeof (msg.error) !== 'string' && 160 | !msg.error.statusCode) { 161 | // NOTE: Intentionally avoiding error.stack, since it will point to 162 | // this file despite of task.body contents. 163 | msg.error = { 164 | name: msg.error.name, 165 | message: msg.error.message 166 | }; 167 | } 168 | if (msg.info) { 169 | msg.cmd = 'info'; 170 | } 171 | 172 | if (!msg.cmd) { 173 | if (msg.error === '') { 174 | msg.cmd = 'run'; 175 | } else { 176 | msg.cmd = (msg.error === 'queue') ? 'queue' : 'error'; 177 | } 178 | } 179 | 180 | msg.job = {}; 181 | var p; 182 | for (p in job) { 183 | if (p === 'log') { 184 | continue; 185 | } 186 | msg.job[p] = job[p]; 187 | } 188 | 189 | msg.task_name = name; 190 | return msg; 191 | } 192 | 193 | 194 | var log = new bunyan({ 195 | name: name, 196 | job_uuid: job.uuid, 197 | stream: process.stdout, 198 | serializers: bunyan.stdSerializers, 199 | level: 'info' 200 | }); 201 | 202 | job.log = log; 203 | 204 | function clearTaskTimeoutId(tId) { 205 | if (tId) { 206 | clearTimeout(tId); 207 | tId = null; 208 | } 209 | } 210 | 211 | // A retry may fail either due to a task timeout or just a task failure: 212 | function onRetryError(err, cb) { 213 | clearTaskTimeoutId(taskTimeoutId); 214 | 215 | // If job sent a cancelation message, stop here: 216 | if (taskRunner.canceled) { 217 | return cb(formatResults({ 218 | error: 'cancel', 219 | cmd: 'cancel' 220 | })); 221 | } 222 | 223 | // If we are not at the latest retry, try again: 224 | if (retries < retry) { 225 | return retryTask(cb); 226 | } else { 227 | // We are at the latest retry, check if the task has a 'fallback': 228 | if (fallback) { 229 | // Set the task timeout when given also for fallback: 230 | if (timeout) { 231 | clearTaskTimeoutId(taskFallbackTimeoutId); 232 | // Task timeout must be in seconds: 233 | taskFallbackTimeoutId = setTimeout(function () { 234 | return cb(formatResults({ 235 | error: 'task timeout error' 236 | })); 237 | }, timeout); 238 | } 239 | 240 | try { 241 | return fallback(err, job, function (error, result) { 242 | clearTaskTimeoutId(taskFallbackTimeoutId); 243 | // If even the error handler returns an error, we have 244 | // to bubble it up: 245 | if (error) { 246 | return cb(formatResults({ 247 | error: error 248 | })); 249 | } 250 | // If the 'fallback' handler fixed the error, let's 251 | // return success despite of body failure: 252 | return cb(formatResults({ 253 | result: (result) ? result : 'OK' 254 | })); 255 | }); 256 | } catch (e) { 257 | return cb(formatResults({ 258 | error: e.toString() 259 | })); 260 | } 261 | } else { 262 | // Latest retry and task 'fallback' is not defined, fail the 263 | // task save the error and bubble up: 264 | return cb(formatResults({ 265 | error: err 266 | })); 267 | } 268 | } 269 | } 270 | 271 | function retryTask(cb) { 272 | var retryTimedOut = false; 273 | retries += 1; 274 | 275 | // Set the task timeout when given: 276 | if (timeout) { 277 | clearTaskTimeoutId(taskTimeoutId); 278 | // Task timeout must be in seconds: 279 | taskTimeoutId = setTimeout(function () { 280 | retryTimedOut = true; 281 | return onRetryError('task timeout error', cb); 282 | }, timeout); 283 | } 284 | 285 | return body(job, function (err, res) { 286 | if (retryTimedOut) { 287 | return null; 288 | } 289 | 290 | // Reached callback from task body, clear the taskTimeout first: 291 | clearTaskTimeoutId(taskTimeoutId); 292 | // Task invokes callback with an error message: 293 | if (err) { 294 | // A task can re-queue a job: 295 | if (err === 'queue') { 296 | return cb(formatResults({ 297 | result: (res) ? res : 'OK', 298 | error: 'queue' 299 | })); 300 | // Or retry the whole job: 301 | } else if (err === 'retry') { 302 | return cb(formatResults({ 303 | result: (res) ? res : 'OK', 304 | error: 'retry' 305 | })); 306 | // Or tell the job to hold on until something external happens: 307 | } else if (err === 'wait') { 308 | return cb(formatResults({ 309 | result: (res) ? res : 'OK', 310 | error: 'wait' 311 | })); 312 | } 313 | return onRetryError(err, cb); 314 | } else { 315 | // All good calling the task body, let's save the results and 316 | // move to next task: 317 | return cb(formatResults({ 318 | result: (res) ? res : 'OK' 319 | })); 320 | } 321 | }); 322 | 323 | } 324 | 325 | // Run the task. As many retries as required, invoke fallback if 326 | // necessary. Send back a message with info about results, error, 327 | // the job object itself to communicate with other tasks and a command 328 | // to let WorkflowJobRunner how to proceed. 329 | // 330 | // - callback - f(message) 331 | // 332 | // - message is the same than self.message, and will be composed of the 333 | // following members: 334 | // - result (String) any information the tasks want to save into the 335 | // proper result chain. 336 | // - err (String) if there is an error, it'll be here as an string. 337 | // - job (Object) the job object itself, without this task's results 338 | // appended, so we can pass job properties to the following tasks on the 339 | // chain. 340 | // - cmd (String) depending on results and the task itself, a clue for the 341 | // WorkflowJobRunner to decide what to do next. These values will be one 342 | // of 'run', 'error', 'queue', 'retry'. (In the future we may also 343 | // implement 'pause' to let the runner set a timeout to continue 344 | // execution). 345 | function runTask(callback) { 346 | taskCallback = callback; 347 | try { 348 | retryTask(callback); 349 | } catch (e) { 350 | clearTaskTimeoutId(taskTimeoutId); 351 | // On this case, we will make an exception and return the stack, 352 | // b/c this means the task is raising an uncaught exception: 353 | callback(formatResults({ 354 | result: '', 355 | error: e.stack 356 | })); 357 | } 358 | } 359 | 360 | function markDone(execution) { 361 | if (job) { 362 | job.timeToDie = true; 363 | } 364 | } 365 | 366 | taskRunner.markDone = markDone; 367 | taskRunner.runTask = runTask; 368 | return taskRunner; 369 | }; 370 | -------------------------------------------------------------------------------- /lib/workflow-factory.js: -------------------------------------------------------------------------------- 1 | // Copyright 2014 Pedro P. Candel . All rights reserved. 2 | // Copyright (c) 2017, Joyent, Inc. 3 | 4 | var uuid = require('uuid'); 5 | var util = require('util'); 6 | var crypto = require('crypto'); 7 | 8 | var clone = require('clone'); 9 | 10 | var WorkflowFactory = module.exports = function (backend) { 11 | // Create a workflow and store it on the backend 12 | // 13 | // - workflow - the workflow object properties: 14 | // - name: string workflow name, uniqueness enforced. 15 | // - timeout: integer, acceptable time, in seconds, to run the wf. 16 | // (60 minutes if nothing given). Also, the Boolean `false` can be used 17 | // to explicitly create a workflow without a timeout. 18 | // - chain: An array of Tasks to run. 19 | // - onerror: An array of Tasks to run in case `chain` fails. 20 | // - oncancel: An array of Tasks to run in case job is canceled. 21 | // - max_attempts: integer, maximum number of attempts allowed for retries 22 | // (10 if nothing given). 23 | // - initial_delay: integer, initial delay in milliseconds before a retry 24 | // is executed. (optional) 25 | // - max_delay: integer, maximum delay in milliseconds between retries 26 | // (optional) 27 | // - opts - Object, any additional information to be passed to the backend 28 | // when creating a workflow object which are not workflow 29 | // properties, like HTTP request ID or other meta information. 30 | // - callback - function(err, workflow) 31 | // 32 | // Every Task can have the following members: 33 | // - name - string task name, optional. 34 | // - body - function(job, cb) the task main function. Required. 35 | // - fallback: function(err, job, cb) a function to run in case `body` 36 | // fails. Optional. 37 | // - retry: Integer, number of attempts to run the task before try 38 | // `fallback`. Optional. By default, just one retry. 39 | // - timeout: Integer, acceptable time, in seconds, a task execution 40 | // should take, before fail it with timeout error. Optional. 41 | // 42 | function workflow(w, opts, callback) { 43 | var wf = w || {}; 44 | 45 | if (typeof (opts) === 'function') { 46 | callback = opts; 47 | opts = {}; 48 | } 49 | 50 | function validateTask(task, cb) { 51 | var p; 52 | 53 | if (!task.body) { 54 | return cb('Task "body" is required'); 55 | } 56 | 57 | if (typeof (task.body) !== 'function') { 58 | return cb('Task "body" must be a function'); 59 | } 60 | 61 | if (!task.uuid) { 62 | task.uuid = uuid(); 63 | } 64 | 65 | // Ensure that if task.fallback is given, it's a function 66 | if (task.fallback && typeof (task.fallback) !== 'function') { 67 | return cb('Task "fallback" must be a function'); 68 | } 69 | 70 | // If task is overriding modules to load, make sure it's a 71 | // proper object with keys: 72 | if (task.modules && 73 | (typeof (task.modules) !== 'object' || 74 | !Array.isArray(Object.keys(task.modules)))) { 75 | return cb('Task "modules" must be an object'); 76 | } 77 | for (p in task) { 78 | if (typeof (task[p]) === 'function') { 79 | task[p] = task[p].toString(); 80 | } else if (typeof (task[p]) === 'object') { 81 | task[p] = JSON.stringify(task[p]); 82 | } 83 | } 84 | return task; 85 | } 86 | 87 | if (!wf.name) { 88 | return callback('Workflow "name" is required'); 89 | } 90 | 91 | if (wf.chain && ( 92 | typeof (wf.chain) !== 'object' || 93 | typeof (wf.chain.length) === 'undefined')) { 94 | return callback('Workflow "chain" must be an array'); 95 | } 96 | 97 | if (!wf.chain) { 98 | wf.chain = []; 99 | } 100 | 101 | if (!wf.uuid) { 102 | wf.uuid = uuid(); 103 | } 104 | 105 | if (typeof (wf.max_attempts) !== 'number') { 106 | wf.max_attempts = 10; 107 | } 108 | 109 | if (wf.onError) { 110 | wf.onerror = wf.onError; 111 | delete wf.onError; 112 | } 113 | 114 | if (wf.onerror && ( 115 | typeof (wf.onerror) !== 'object' || 116 | typeof (wf.onerror.length) === 'undefined')) { 117 | return callback('Workflow "onerror" must be an array'); 118 | } 119 | 120 | if (wf.onCancel) { 121 | wf.oncancel = wf.onCancel; 122 | delete wf.onCancel; 123 | } 124 | 125 | if (wf.oncancel && ( 126 | typeof (wf.oncancel) !== 'object' || 127 | typeof (wf.oncancel.length) === 'undefined')) { 128 | return callback('Workflow "oncancel" must be an array'); 129 | } 130 | 131 | wf.chain.forEach(function (task, i, arr) { 132 | wf.chain[i] = validateTask(task, callback); 133 | }); 134 | 135 | wf.chain_md5 = crypto.createHash('md5').update( 136 | JSON.stringify(clone(wf.chain))).digest('hex'); 137 | 138 | if (wf.onerror) { 139 | wf.onerror.forEach(function (task, i, arr) { 140 | wf.onerror[i] = validateTask(task, callback); 141 | }); 142 | wf.onerror_md5 = crypto.createHash('md5').update( 143 | JSON.stringify(clone(wf.onerror))).digest('hex'); 144 | } 145 | 146 | if (wf.oncancel) { 147 | wf.oncancel.forEach(function (task, i, arr) { 148 | wf.oncancel[i] = validateTask(task, callback); 149 | }); 150 | wf.oncancel_md5 = crypto.createHash('md5').update( 151 | JSON.stringify(clone(wf.oncancel))).digest('hex'); 152 | } 153 | 154 | if (typeof (wf.timeout) !== 'number') { 155 | wf.timeout = 3600; 156 | } else if (wf.timeout === 0) { 157 | delete wf.timeout; 158 | } 159 | 160 | return backend.createWorkflow(wf, function (err, result) { 161 | if (err) { 162 | return callback(err); 163 | } else { 164 | return callback(null, wf); 165 | } 166 | }); 167 | 168 | } 169 | // Create a queue a Job from the given Workflow: 170 | // 171 | // - j - the Job object workflow and extra arguments: 172 | // - workflow - (required) UUID of Workflow object to create the job from. 173 | // - params - (opt) JSON object, parameters to pass to the job during exec 174 | // - target - (opt) String, Job's target, used to ensure that we don't 175 | // queue two jobs with the same target and params at once. 176 | // - exec_after - (opt) ISO 8601 Date, delay job execution after the 177 | // given timestamp (execute from now when not given). 178 | // - num_attempts - (opt) if this job is a retry of another job, this is 179 | // how many attempts have happened before this one. 180 | // - opts - Object, any additional information to be passed to the backend 181 | // when creating a workflow object which are not workflow 182 | // properties, like HTTP request ID or other meta information. 183 | // - callback - f(err, job) 184 | function job(j, opts, callback) { 185 | var theJob = { execution: 'queued', chain_results: []}; 186 | 187 | if (!j.workflow) { 188 | return callback('"j.workflow" is required'); 189 | } 190 | 191 | if (typeof (opts) === 'function') { 192 | callback = opts; 193 | opts = {}; 194 | } 195 | 196 | return backend.getWorkflow(j.workflow, function (err, wf) { 197 | var p; 198 | var q; 199 | if (err) { 200 | return callback(err); 201 | } 202 | 203 | if (Object.keys(wf).length === 0) { 204 | return callback( 205 | 'Cannot create a job from an unexisting workflow'); 206 | } 207 | 208 | if (wf.chain.length === 0) { 209 | return callback( 210 | 'Cannot queue a job from a workflow without any task'); 211 | } 212 | 213 | for (q in j) { 214 | if (j !== 'workflow') { 215 | theJob[q] = j[q]; 216 | } 217 | } 218 | 219 | if (!theJob.exec_after) { 220 | theJob.exec_after = new Date().toISOString(); 221 | } 222 | 223 | if (!theJob.params) { 224 | theJob.params = {}; 225 | } 226 | 227 | if (!theJob.num_attempts) { 228 | theJob.num_attempts = 0; 229 | } 230 | 231 | if (!theJob.uuid) { 232 | theJob.uuid = uuid(); 233 | } 234 | 235 | 236 | for (p in wf) { 237 | if (p === 'uuid') { 238 | theJob.workflow_uuid = wf.uuid; 239 | } else if (p !== 'chain_md5' && p !== 'onerror_md5') { 240 | theJob[p] = wf[p]; 241 | } 242 | } 243 | 244 | return backend.validateJobTarget(theJob, function (err) { 245 | if (err) { 246 | return callback(err); 247 | } else { 248 | return backend.createJob(theJob, function (err, results) { 249 | if (err) { 250 | return callback(err); 251 | } else { 252 | return callback(null, theJob); 253 | } 254 | }); 255 | } 256 | }); 257 | }); 258 | } 259 | 260 | return { 261 | workflow: workflow, 262 | job: job 263 | }; 264 | }; 265 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "wf", 3 | "description": "Tasks Workflows orchestration API and runners", 4 | "version": "1.2.3", 5 | "repository": { 6 | "type": "git", 7 | "url": "https://github.com/joyent/node-workflow.git" 8 | }, 9 | "author": "Pedro Palazón Candel (http://www.joyent.com)", 10 | "license": "MIT", 11 | "contributors": [ 12 | "Mark Cavage", 13 | "Trent Mick", 14 | "Josh Wilsdon", 15 | "Bryan Cantrill", 16 | "Andrés Rodríquez", 17 | "Rob Gulewich", 18 | "Fred Kuo" 19 | ], 20 | "bin": { 21 | "workflow-api": "./bin/workflow-api", 22 | "workflow-runner": "./bin/workflow-runner" 23 | }, 24 | "main": "lib/index.js", 25 | "dependencies": { 26 | "assert-plus": "1.0.0", 27 | "backoff": "1.2.0", 28 | "bunyan": "1.8.1", 29 | "clone": "0.1.6", 30 | "restify": "4.1.1", 31 | "sigyan": "0.2.0", 32 | "trace-event": "1.3.0", 33 | "triton-metrics": "0.1.1", 34 | "uuid": "3.0.1", 35 | "vasync": "1.6.4" 36 | }, 37 | "scripts": { 38 | "test": "./node_modules/.bin/tap ./test/*.test.js" 39 | }, 40 | "devDependencies": { 41 | "tap": "~0.3" 42 | }, 43 | "optionalDependencies": { 44 | "dtrace-provider": "0.6.0" 45 | }, 46 | "engines": { 47 | "node": ">=0.10.0" 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /test/child.test.js: -------------------------------------------------------------------------------- 1 | // Copyright 2012 Pedro P. Candel . All rights reserved. 2 | // Copyright (c) 2017, Joyent, Inc. 3 | 4 | var util = require('util'), 5 | test = require('tap').test, 6 | uuid = require('uuid'), 7 | fork = require('child_process').fork; 8 | 9 | var job = { 10 | timeout: 180, 11 | workflow_uuid: 'bdfa0821-5071-4682-b965-88293149a8d2', 12 | name: 'A workflow name', 13 | exec_after: '2012-01-03T12:54:05.788Z', 14 | params: { 15 | 'a': '1', 16 | 'b': '2' 17 | }, 18 | uuid: 'fb4c202d-19ed-4ed9-afda-8255aa7f38ad', 19 | target: '/foo/bar', 20 | execution: 'running', 21 | chain_results: [], 22 | chain: [], 23 | onerror: [] 24 | }; 25 | 26 | var task = { 27 | 'uuid': uuid(), 28 | 'name': 'A name', 29 | 'body': 'Fake body' 30 | }; 31 | 32 | 33 | test('unkown message', function (t) { 34 | var child = fork(__dirname + '/../lib/child.js'); 35 | 36 | child.on('message', function (msg) { 37 | t.ifError(msg.job); 38 | t.ok(msg.error); 39 | t.equal(msg.error, 'unknown message'); 40 | if (child.exitCode === null) { 41 | child.kill(); 42 | } 43 | }); 44 | 45 | child.on('exit', function (code, signal) { 46 | t.end(); 47 | }); 48 | 49 | child.send({ 50 | foo: 'bar' 51 | }); 52 | 53 | }); 54 | 55 | 56 | test('message without job', function (t) { 57 | var child = fork(__dirname + '/../lib/child.js'); 58 | 59 | child.on('message', function (msg) { 60 | t.ifError(msg.job); 61 | t.ok(msg.error); 62 | t.equal(msg.error, 'unknown message'); 63 | if (child.exitCode === null) { 64 | child.kill(); 65 | } 66 | }); 67 | 68 | child.on('exit', function (code) { 69 | t.end(); 70 | }); 71 | 72 | child.send({ 73 | task: {} 74 | }); 75 | }); 76 | 77 | test('message without task', function (t) { 78 | var child = fork(__dirname + '/../lib/child.js'); 79 | 80 | child.on('message', function (msg) { 81 | t.ifError(msg.job); 82 | t.ok(msg.error); 83 | t.equal(msg.error, 'unknown message'); 84 | if (child.exitCode === null) { 85 | child.kill(); 86 | } 87 | }); 88 | 89 | child.on('exit', function (code) { 90 | t.end(); 91 | }); 92 | 93 | child.send({ 94 | job: {} 95 | }); 96 | }); 97 | 98 | test('message with invalid task', function (t) { 99 | var child = fork(__dirname + '/../lib/child.js'); 100 | 101 | child.on('message', function (msg) { 102 | t.ifError(msg.job); 103 | t.ok(msg.error); 104 | t.ok(msg.error.match(/opt\.task\.body/)); 105 | if (child.exitCode === null) { 106 | child.kill(); 107 | } 108 | }); 109 | 110 | child.on('exit', function (code) { 111 | t.end(); 112 | }); 113 | 114 | child.send({ 115 | job: {}, 116 | task: {} 117 | }); 118 | }); 119 | 120 | test('message with successful task', function (t) { 121 | task.body = function (job, cb) { 122 | return cb(null); 123 | }.toString(); 124 | 125 | job.chain.push(task); 126 | 127 | var child = fork(__dirname + '/../lib/child.js'); 128 | 129 | child.on('message', function (msg) { 130 | t.ifError(msg.error); 131 | t.ok(msg.result); 132 | t.equal(msg.cmd, 'run'); 133 | t.ok(msg.job); 134 | if (child.exitCode === null) { 135 | child.kill(); 136 | } 137 | }); 138 | 139 | child.on('exit', function (code) { 140 | t.end(); 141 | }); 142 | 143 | child.send({ 144 | job: job, 145 | task: task 146 | }); 147 | }); 148 | 149 | test('message with failed task', function (t) { 150 | task.body = function (job, cb) { 151 | return cb('Task body error'); 152 | }.toString(); 153 | 154 | job.chain.push(task); 155 | 156 | var child = fork(__dirname + '/../lib/child.js'); 157 | 158 | child.on('message', function (msg) { 159 | t.ok(msg.error); 160 | t.ifError(msg.result); 161 | t.equal(msg.cmd, 'error'); 162 | t.ok(msg.job); 163 | if (child.exitCode === null) { 164 | child.kill(); 165 | } 166 | }); 167 | 168 | child.on('exit', function (code) { 169 | t.end(); 170 | }); 171 | 172 | child.send({ 173 | job: job, 174 | task: task 175 | }); 176 | }); 177 | 178 | 179 | test('cancel message', function (t) { 180 | task.body = function (job, cb) { 181 | job.timer = 'Timeout set'; 182 | setTimeout(function () { 183 | // Should not be called: 184 | return cb(null); 185 | }, 1550); 186 | }.toString(); 187 | task.retry = 2; 188 | task.timeout = 1; 189 | 190 | job.chain.push(task); 191 | 192 | var child = fork(__dirname + '/../lib/child.js'); 193 | 194 | child.on('message', function (msg) { 195 | t.ok(msg.error); 196 | t.equal(msg.error, 'cancel'); 197 | t.ifError(msg.result); 198 | t.equal(msg.cmd, 'cancel'); 199 | t.ok(msg.job); 200 | if (child.exitCode === null) { 201 | child.kill(); 202 | } 203 | }); 204 | 205 | child.on('exit', function (code) { 206 | t.end(); 207 | }); 208 | 209 | setTimeout(function () { 210 | child.send({ 211 | cmd: 'cancel' 212 | }); 213 | }, 750); 214 | 215 | child.send({ 216 | job: job, 217 | task: task 218 | }); 219 | 220 | }); 221 | -------------------------------------------------------------------------------- /test/config.json.sample: -------------------------------------------------------------------------------- 1 | { 2 | "backend": { 3 | "module": "../lib/workflow-in-memory-backend", 4 | "opts": { 5 | } 6 | }, 7 | "api": { 8 | "port": 8080, 9 | "wf_extra_params": ["bar"], 10 | "job_extra_params": ["foo"] 11 | }, 12 | "runner": { 13 | "identifier": "cd925eef-93fb-4bfe-a820-2aaedf9fc006", 14 | "forks": 2, 15 | "run_interval": 250, 16 | "sandbox": { 17 | "modules": { 18 | "http": "http", 19 | "uuid": "uuid" 20 | }, 21 | "foo": "bar", 22 | "bool": true, 23 | "aNumber": 5 24 | } 25 | } 26 | } 27 | 28 | -------------------------------------------------------------------------------- /test/config.nofork.sample: -------------------------------------------------------------------------------- 1 | { 2 | "backend": { 3 | "module": "../lib/workflow-in-memory-backend", 4 | "opts": { 5 | } 6 | }, 7 | "api": { 8 | "port": 8080, 9 | "wf_extra_params": ["bar"], 10 | "job_extra_params": ["foo"] 11 | }, 12 | "runner": { 13 | "identifier": "cd925eef-93fb-4bfe-a820-2aaedf9fc006", 14 | "do_fork": false, 15 | "forks": 2, 16 | "run_interval": 250, 17 | "sandbox": { 18 | "modules": { 19 | "http": "http", 20 | "uuid": "uuid" 21 | }, 22 | "foo": "bar", 23 | "bool": true, 24 | "aNumber": 5 25 | } 26 | } 27 | } 28 | 29 | -------------------------------------------------------------------------------- /test/errors.test.js: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2017, Joyent, Inc. 2 | 3 | var test = require('tap').test, 4 | util = require('util'), 5 | restify = require('restify'), 6 | wf = require('../lib/index'); 7 | 8 | 9 | test('errors defined', function (t) { 10 | t.equal(typeof (wf.BackendError), 'function'); 11 | t.equal(typeof (wf.BackendInternalError), 'function'); 12 | t.equal(typeof (wf.BackendInvalidArgumentError), 'function'); 13 | t.equal(typeof (wf.BackendMissingParameterError), 'function'); 14 | t.equal(typeof (wf.BackendPreconditionFailedError), 'function'); 15 | t.equal(typeof (wf.BackendResourceNotFoundError), 'function'); 16 | t.end(); 17 | }); 18 | 19 | test('errors to RestErrors', function (t) { 20 | var nfError, nfRestError, errors = [ 21 | wf.BackendInvalidArgumentError, 22 | wf.BackendMissingParameterError, 23 | wf.BackendPreconditionFailedError, 24 | wf.BackendResourceNotFoundError 25 | ]; 26 | errors.forEach(function (E) { 27 | nfError = new E('A message'); 28 | t.equal(typeof (nfError), 'object'); 29 | nfRestError = nfError.toRestError; 30 | t.equal(typeof (nfRestError), 'object'); 31 | t.equal(nfError.restCode.replace(/^Backend/, ''), nfRestError.restCode); 32 | t.equal(nfError.message, nfRestError.message); 33 | }); 34 | t.end(); 35 | }); 36 | -------------------------------------------------------------------------------- /test/helper.js: -------------------------------------------------------------------------------- 1 | // Copyright 2012 Pedro P. Candel . All rights reserved. 2 | // Copyright (c) 2017, Joyent, Inc. 3 | 4 | var path = require('path'), 5 | fs = require('fs'), 6 | existsSync = fs.existsSync || path.existsSync; 7 | 8 | 9 | var cfg = path.resolve(__dirname, (process.env.TEST_CONFIG_FILE ? 10 | process.env.TEST_CONFIG_FILE : './config.json')); 11 | var cfg_file = existsSync(cfg) ? cfg : 12 | path.resolve(__dirname, './config.json.sample'), 13 | config; 14 | 15 | module.exports = { 16 | config: function () { 17 | if (!config) { 18 | config = JSON.parse(fs.readFileSync(cfg_file, 'utf-8')); 19 | } 20 | return config; 21 | } 22 | }; 23 | -------------------------------------------------------------------------------- /test/in-memory-backend.test.js: -------------------------------------------------------------------------------- 1 | // Copyright 2012 Pedro P. Candel . All rights reserved. 2 | // Copyright (c) 2017, Joyent, Inc. 3 | 4 | var test = require('tap').test, 5 | uuid = require('uuid'), 6 | SOCKET = '/tmp/.' + uuid(), 7 | util = require('util'), 8 | Factory = require('../lib/index').Factory, 9 | WorkflowInMemoryBackend = require('../lib/workflow-in-memory-backend'); 10 | 11 | var backend, factory; 12 | 13 | var aWorkflow, aJob, anotherJob; 14 | 15 | var helper = require('./helper'), 16 | config = helper.config(), 17 | runnerId = config.runner.identifier; 18 | 19 | test('setup', function (t) { 20 | console.time('In Memory Backend'); 21 | backend = WorkflowInMemoryBackend(config.backend.opts); 22 | t.ok(backend, 'backend ok'); 23 | backend.init(function () { 24 | factory = Factory(backend); 25 | t.ok(factory, 'factory ok'); 26 | t.end(); 27 | }); 28 | }); 29 | 30 | 31 | test('Is an EventEmitter', function (t) { 32 | backend.on('error', function (e) { 33 | t.ok(e, 'Error emitted'); 34 | t.end(); 35 | }); 36 | backend.emit('error', new Error('Emit Error')); 37 | }); 38 | 39 | test('add a workflow', function (t) { 40 | factory.workflow({ 41 | name: 'A workflow', 42 | chain: [ { 43 | name: 'A Task', 44 | timeout: 30, 45 | retry: 3, 46 | body: function (job, cb) { 47 | return cb(null); 48 | } 49 | }], 50 | timeout: 180, 51 | onError: [ { 52 | name: 'Fallback task', 53 | body: function (job, cb) { 54 | return cb('Workflow error'); 55 | } 56 | }], 57 | arbitrary: 'Arbitrary property' 58 | }, function (err, workflow) { 59 | t.ifError(err, 'add workflow error'); 60 | t.ok(workflow, 'add workflow ok'); 61 | aWorkflow = workflow; 62 | t.ok(workflow.chain[0].uuid, 'add workflow chain task'); 63 | t.ok(workflow.onerror[0].uuid, 'add workflow onerror task'); 64 | t.ok(workflow.arbitrary); 65 | t.end(); 66 | }); 67 | }); 68 | 69 | test('workflow name must be unique', function (t) { 70 | factory.workflow({ 71 | name: 'A workflow', 72 | chain: [ { 73 | name: 'A Task', 74 | timeout: 30, 75 | retry: 3, 76 | body: function (job, cb) { 77 | return cb(null); 78 | } 79 | }], 80 | timeout: 180, 81 | onError: [ { 82 | name: 'Fallback task', 83 | body: function (job, cb) { 84 | return cb('Workflow error'); 85 | } 86 | }] 87 | }, function (err, workflow) { 88 | t.ok(err, 'duplicated workflow name err'); 89 | t.end(); 90 | }); 91 | }); 92 | 93 | 94 | test('get workflow', function (t) { 95 | backend.getWorkflow(aWorkflow.uuid, function (err, workflow) { 96 | t.ifError(err, 'get workflow error'); 97 | t.ok(workflow, 'get workflow ok'); 98 | t.equivalent(workflow, aWorkflow); 99 | backend.getWorkflow(uuid(), function (err, workflow) { 100 | t.equal(typeof (err), 'object'); 101 | t.equal(err.name, 'BackendResourceNotFoundError'); 102 | t.ok(err.message.match(/uuid/gi), 'unexisting workflow error'); 103 | t.end(); 104 | }); 105 | }); 106 | }); 107 | 108 | 109 | test('update workflow', function (t) { 110 | aWorkflow.chain.push({ 111 | name: 'Another task', 112 | body: function (job, cb) { 113 | return cb(null); 114 | }.toString() 115 | }); 116 | aWorkflow.name = 'A workflow name'; 117 | backend.updateWorkflow(aWorkflow, function (err, workflow) { 118 | t.ifError(err, 'update workflow error'); 119 | t.ok(workflow, 'update workflow ok'); 120 | t.ok(workflow.chain[1].name, 'Updated task ok'); 121 | t.ok(workflow.chain[1].body, 'Updated task body ok'); 122 | t.end(); 123 | }); 124 | }); 125 | 126 | test('create job', function (t) { 127 | factory.job({ 128 | workflow: aWorkflow.uuid, 129 | target: '/foo/bar', 130 | params: { 131 | foo: 'bar', 132 | chicken: 'arise!' 133 | }, 134 | locks: 'something$', 135 | whatever: 'test arbitrary job properties' 136 | }, function (err, job) { 137 | t.ifError(err, 'create job error'); 138 | t.ok(job, 'create job ok'); 139 | t.ok(job.exec_after, 'job exec_after'); 140 | t.equal(job.execution, 'queued', 'job queued'); 141 | t.ok(job.uuid, 'job uuid'); 142 | t.ok(util.isArray(job.chain), 'job chain is array'); 143 | t.ok(util.isArray(job.onerror), 'job onerror is array'); 144 | t.ok( 145 | (typeof (job.params) === 'object' && !util.isArray(job.params)), 146 | 'params ok'); 147 | t.ok(job.whatever); 148 | t.ok(job.arbitrary); 149 | aJob = job; 150 | backend.getJobProperty(aJob.uuid, 'target', function (err, val) { 151 | t.ifError(err, 'get job property error'); 152 | t.equal(val, '/foo/bar', 'property value ok'); 153 | t.end(); 154 | }); 155 | }); 156 | }); 157 | 158 | test('duplicated job target', function (t) { 159 | factory.job({ 160 | workflow: aWorkflow.uuid, 161 | target: '/foo/bar', 162 | params: { 163 | foo: 'bar', 164 | chicken: 'arise!' 165 | } 166 | }, function (err, job) { 167 | t.ok(err, 'duplicated job error'); 168 | t.end(); 169 | }); 170 | }); 171 | 172 | 173 | test('locked job target', function (t) { 174 | factory.job({ 175 | workflow: aWorkflow.uuid, 176 | target: '/foo/something', 177 | params: { 178 | foo: 'bar', 179 | chicken: 'arise!' 180 | } 181 | }, function (err, job) { 182 | t.ok(err, 'locked job error'); 183 | t.end(); 184 | }); 185 | }); 186 | 187 | 188 | test('job with different params', function (t) { 189 | factory.job({ 190 | workflow: aWorkflow.uuid, 191 | target: '/foo/bar', 192 | params: { 193 | foo: 'bar', 194 | chicken: 'egg' 195 | } 196 | }, function (err, job) { 197 | t.ifError(err, 'create job error'); 198 | t.ok(job, 'create job ok'); 199 | t.ok(job.exec_after); 200 | t.equal(job.execution, 'queued'); 201 | t.ok(job.uuid); 202 | t.ok(util.isArray(job.chain), 'job chain is array'); 203 | t.ok(util.isArray(job.onerror), 'job onerror is array'); 204 | t.ok( 205 | (typeof (job.params) === 'object' && !util.isArray(job.params)), 206 | 'params ok'); 207 | anotherJob = job; 208 | t.end(); 209 | }); 210 | }); 211 | 212 | 213 | test('next jobs', function (t) { 214 | backend.nextJobs(0, 1, function (err, jobs) { 215 | t.ifError(err, 'next jobs error'); 216 | t.equal(jobs.length, 2); 217 | t.equal(jobs[0], aJob.uuid); 218 | t.equal(jobs[1], anotherJob.uuid); 219 | t.end(); 220 | }); 221 | }); 222 | 223 | 224 | test('next queued job', function (t) { 225 | var idx = 0; 226 | backend.nextJob(function (err, job) { 227 | t.ifError(err, 'next job error' + idx); 228 | idx += 1; 229 | t.ok(job, 'first queued job OK'); 230 | t.equal(aJob.uuid, job.uuid); 231 | backend.nextJob(idx, function (err, job) { 232 | t.ifError(err, 'next job error: ' + idx); 233 | idx += 1; 234 | t.ok(job, '2nd queued job OK'); 235 | t.notEqual(aJob.uuid, job.uuid); 236 | backend.nextJob(idx, function (err, job) { 237 | t.ifError(err, 'next job error: ' + idx); 238 | t.equal(job, null, 'no more queued jobs'); 239 | t.end(); 240 | }); 241 | }); 242 | }); 243 | }); 244 | 245 | 246 | test('run job', function (t) { 247 | backend.runJob(aJob.uuid, runnerId, function (err, job) { 248 | t.ifError(err, 'run job error'); 249 | backend.getRunnerJobs(runnerId, function (err, jobs) { 250 | t.ifError(err, 'get runner jobs err'); 251 | t.equal(jobs.length, 1); 252 | t.equal(jobs[0], aJob.uuid); 253 | // If the job is running, it shouldn't be available for nextJob: 254 | backend.nextJob(function (err, job) { 255 | t.ifError(err, 'run job next error'); 256 | t.notEqual(aJob.uuid, job.uuid, 'run job next job'); 257 | backend.getJob(aJob.uuid, function (err, job) { 258 | t.ifError(err, 'run job getJob'); 259 | t.equal(job.runner_id, runnerId, 'run job runner'); 260 | t.equal(job.execution, 'running', 'run job status'); 261 | aJob = job; 262 | t.end(); 263 | }); 264 | }); 265 | }); 266 | }); 267 | }); 268 | 269 | 270 | test('update job', function (t) { 271 | aJob.chain_results = [ 272 | {result: 'OK', error: ''}, 273 | {result: 'OK', error: ''} 274 | ]; 275 | 276 | backend.updateJob(aJob, function (err, job) { 277 | t.ifError(err, 'update job error'); 278 | t.equal(job.runner_id, runnerId, 'update job runner'); 279 | t.equal(job.execution, 'running', 'update job status'); 280 | t.ok(util.isArray(job.chain_results), 'chain_results is array'); 281 | t.equal(2, job.chain_results.length); 282 | aJob = job; 283 | t.end(); 284 | }); 285 | }); 286 | 287 | 288 | test('finish job', function (t) { 289 | aJob.chain_results = [ 290 | {result: 'OK', error: ''}, 291 | {result: 'OK', error: ''}, 292 | {result: 'OK', error: ''}, 293 | {result: 'OK', error: ''} 294 | ]; 295 | 296 | backend.finishJob(aJob, function (err, job) { 297 | t.ifError(err, 'finish job error'); 298 | t.equivalent(job.chain_results, [ 299 | {result: 'OK', error: ''}, 300 | {result: 'OK', error: ''}, 301 | {result: 'OK', error: ''}, 302 | {result: 'OK', error: ''} 303 | ], 'finish job results'); 304 | t.ok(!job.runner_id); 305 | t.equal(job.execution, 'succeeded', 'finished job status'); 306 | aJob = job; 307 | t.end(); 308 | }); 309 | }); 310 | 311 | 312 | // Now that the job with the lock run, this shouldn't be locked 313 | test('unlocked job target', function (t) { 314 | factory.job({ 315 | workflow: aWorkflow.uuid, 316 | target: '/foo/something', 317 | params: { 318 | foo: 'bar', 319 | chicken: 'arise!' 320 | } 321 | }, function (err, job) { 322 | t.ifError(err, 'unlocked job error'); 323 | t.ok(job); 324 | t.end(); 325 | }); 326 | }); 327 | 328 | test('re queue job', function (t) { 329 | backend.runJob(anotherJob.uuid, runnerId, function (err, job) { 330 | t.ifError(err, 're queue job run job error'); 331 | anotherJob.chain_results = [ 332 | {success: true, error: ''} 333 | ]; 334 | backend.queueJob(anotherJob, function (err, job) { 335 | t.ifError(err, 're queue job error'); 336 | t.ok(!job.runner_id, 're queue job runner'); 337 | t.equal(job.execution, 'queued', 're queue job status'); 338 | anotherJob = job; 339 | t.end(); 340 | }); 341 | }); 342 | }); 343 | 344 | 345 | test('register runner', function (t) { 346 | var d = new Date(); 347 | t.test('without specific time', function (t) { 348 | backend.registerRunner(runnerId, function (err) { 349 | t.ifError(err, 'register runner error'); 350 | backend.getRunner(runnerId, function (err, res) { 351 | t.ifError(err, 'get runner error'); 352 | t.ok(util.isDate(res), 'runner active at'); 353 | t.ok((res.getTime() >= d.getTime()), 'runner timestamp'); 354 | t.end(); 355 | }); 356 | }); 357 | }); 358 | t.test('with specific time', function (t) { 359 | backend.registerRunner(runnerId, d.toISOString(), function (err) { 360 | t.ifError(err, 'register runner error'); 361 | backend.getRunner(runnerId, function (err, timestamp) { 362 | t.ifError(err, 'backend get runner error'); 363 | t.equivalent(d, timestamp); 364 | t.end(); 365 | }); 366 | }); 367 | }); 368 | }); 369 | 370 | 371 | test('runner active', function (t) { 372 | var d = new Date(); 373 | backend.runnerActive(runnerId, function (err) { 374 | t.ifError(err, 'runner active error'); 375 | backend.getRunner(runnerId, function (err, res) { 376 | t.ifError(err, 'get runner error'); 377 | t.ok((res.getTime() >= d.getTime()), 'runner timestamp'); 378 | t.end(); 379 | }); 380 | }); 381 | }); 382 | 383 | 384 | test('get all runners', function (t) { 385 | backend.getRunners(function (err, runners) { 386 | t.ifError(err, 'get runners error'); 387 | t.ok(runners, 'runners ok'); 388 | t.ok(runners[runnerId], 'runner id ok'); 389 | t.ok(util.isDate(runners[runnerId]), 'runner timestamp ok'); 390 | t.end(); 391 | }); 392 | }); 393 | 394 | 395 | test('idle runner', function (t) { 396 | t.test('check runner is not idle', function (t) { 397 | backend.isRunnerIdle(runnerId, function (idle) { 398 | t.equal(idle, false); 399 | t.end(); 400 | }); 401 | }); 402 | t.test('set runner as idle', function (t) { 403 | backend.idleRunner(runnerId, function (err) { 404 | t.ifError(err); 405 | t.end(); 406 | }); 407 | }); 408 | t.test('check runner is idle', function (t) { 409 | backend.isRunnerIdle(runnerId, function (idle) { 410 | t.equal(idle, true); 411 | t.end(); 412 | }); 413 | }); 414 | t.test('set runner as not idle', function (t) { 415 | backend.wakeUpRunner(runnerId, function (err) { 416 | t.ifError(err); 417 | t.end(); 418 | }); 419 | }); 420 | t.test('check runner is not idle', function (t) { 421 | backend.isRunnerIdle(runnerId, function (idle) { 422 | t.equal(idle, false); 423 | t.end(); 424 | }); 425 | }); 426 | t.end(); 427 | }); 428 | 429 | 430 | test('get workflows', function (t) { 431 | backend.getWorkflows(function (err, workflows) { 432 | t.ifError(err, 'get workflows error'); 433 | t.ok(workflows, 'workflows ok'); 434 | t.equal(workflows[0].uuid, aWorkflow.uuid, 'workflow uuid ok'); 435 | t.ok(util.isArray(workflows[0].chain), 'workflow chain ok'); 436 | t.ok(util.isArray(workflows[0].onerror), 'workflow onerror ok'); 437 | t.end(); 438 | }); 439 | }); 440 | 441 | 442 | test('count jobs', function (t) { 443 | backend.countJobs(function (err, results) { 444 | t.ifError(err, 'count jobs'); 445 | t.ok(results, 'results ok'); 446 | t.ok(results.current); 447 | t.ok(results.current.queued); 448 | t.ok(results.current.succeeded); 449 | t.end(); 450 | }); 451 | }); 452 | 453 | 454 | test('get all jobs', function (t) { 455 | backend.getJobs(function (err, jobs) { 456 | t.ifError(err, 'get all jobs error'); 457 | t.ok(jobs, 'jobs ok'); 458 | t.ok(util.isArray(jobs[0].chain), 'jobs chain ok'); 459 | t.ok(util.isArray(jobs[0].onerror), 'jobs onerror ok'); 460 | t.ok(util.isArray(jobs[0].chain_results), 'jobs chain_results ok'); 461 | t.ok( 462 | (typeof (jobs[0].params) === 'object' && 463 | !util.isArray(jobs[0].params)), 464 | 'job params ok'); 465 | t.equal(jobs.length, 3); 466 | t.end(); 467 | }); 468 | }); 469 | 470 | 471 | test('get all jobs searching by params', function (t) { 472 | backend.getJobs({foo: 'bar'}, function (err, jobs) { 473 | t.ifError(err, 'get all jobs error'); 474 | t.ok(jobs, 'jobs ok'); 475 | t.equal(jobs.length, 3); 476 | t.end(); 477 | }); 478 | }); 479 | 480 | 481 | test('get some jobs searching by params', function (t) { 482 | backend.getJobs({foo: 'bar', chicken: 'arise!'}, function (err, jobs) { 483 | t.ifError(err, 'get all jobs error'); 484 | t.ok(jobs, 'jobs ok'); 485 | t.equal(jobs.length, 2); 486 | t.end(); 487 | }); 488 | }); 489 | 490 | 491 | test('get succeeded jobs', function (t) { 492 | backend.getJobs({execution: 'succeeded'}, function (err, jobs) { 493 | t.ifError(err, 'get succeeded jobs error'); 494 | t.ok(jobs, 'jobs ok'); 495 | t.equal(jobs.length, 1); 496 | t.equal(jobs[0].execution, 'succeeded'); 497 | t.ok(util.isArray(jobs[0].chain), 'jobs chain ok'); 498 | t.ok(util.isArray(jobs[0].onerror), 'jobs onerror ok'); 499 | t.ok(util.isArray(jobs[0].chain_results), 'jobs chain_results ok'); 500 | t.ok( 501 | (typeof (jobs[0].params) === 'object' && 502 | !util.isArray(jobs[0].params)), 503 | 'job params ok'); 504 | t.end(); 505 | }); 506 | }); 507 | 508 | 509 | test('get no jobs searching by execution and params', function (t) { 510 | backend.getJobs({execution: 'succeeded', foo: 'baz'}, function (err, jobs) { 511 | t.ifError(err, 'get succeeded jobs error'); 512 | t.ok(jobs, 'jobs ok'); 513 | t.equal(jobs.length, 0); 514 | t.end(); 515 | }); 516 | }); 517 | 518 | 519 | test('get queued jobs', function (t) { 520 | backend.getJobs({execution: 'queued'}, function (err, jobs) { 521 | t.ifError(err, 'get queued jobs error'); 522 | t.ok(jobs, 'jobs ok'); 523 | t.equal(jobs.length, 2); 524 | t.equal(jobs[0].execution, 'queued'); 525 | t.ok(util.isArray(jobs[0].chain), 'jobs chain ok'); 526 | t.ok(util.isArray(jobs[0].onerror), 'jobs onerror ok'); 527 | t.ok(util.isArray(jobs[0].chain_results), 'jobs chain_results ok'); 528 | t.ok( 529 | (typeof (jobs[0].params) === 'object' && 530 | !util.isArray(jobs[0].params)), 531 | 'job params ok'); 532 | t.end(); 533 | }); 534 | }); 535 | 536 | 537 | test('add job info', function (t) { 538 | t.test('to unexisting job', function (t) { 539 | backend.addInfo( 540 | uuid(), 541 | {'10%': 'Task completed step one'}, 542 | function (err) { 543 | t.ok(err); 544 | t.equal(typeof (err), 'object'); 545 | t.equal(err.name, 'BackendResourceNotFoundError'); 546 | t.equal(err.message, 'Job does not exist. Cannot Update.'); 547 | t.end(); 548 | }); 549 | }); 550 | t.test('to existing job without previous info', function (t) { 551 | backend.addInfo( 552 | aJob.uuid, 553 | {'10%': 'Task completed step one'}, 554 | function (err) { 555 | t.ifError(err); 556 | t.end(); 557 | }); 558 | }); 559 | t.test('to existing job with previous info', function (t) { 560 | backend.addInfo( 561 | aJob.uuid, 562 | {'20%': 'Task completed step two'}, 563 | function (err) { 564 | t.ifError(err); 565 | t.end(); 566 | }); 567 | }); 568 | t.end(); 569 | }); 570 | 571 | 572 | test('get job info', function (t) { 573 | t.test('from unexisting job', function (t) { 574 | backend.getInfo( 575 | uuid(), 576 | function (err, info) { 577 | t.ok(err); 578 | t.equal(typeof (err), 'object'); 579 | t.equal(err.name, 'BackendResourceNotFoundError'); 580 | t.equal(err.message, 'Job does not exist. Cannot get info.'); 581 | t.end(); 582 | }); 583 | }); 584 | t.test('from existing job', function (t) { 585 | backend.getInfo( 586 | aJob.uuid, 587 | function (err, info) { 588 | t.ifError(err); 589 | t.ok(info); 590 | t.ok(util.isArray(info)); 591 | t.equal(info.length, 2); 592 | t.equivalent({'10%': 'Task completed step one'}, info[0]); 593 | t.equivalent({'20%': 'Task completed step two'}, info[1]); 594 | t.end(); 595 | }); 596 | }); 597 | t.end(); 598 | }); 599 | 600 | 601 | test('delete workflow', function (t) { 602 | t.test('when the workflow exists', function (t) { 603 | backend.deleteWorkflow(aWorkflow, function (err, success) { 604 | t.ifError(err, 'delete existing workflow error'); 605 | t.ok(success); 606 | t.end(); 607 | }); 608 | }); 609 | t.test('when the workflow does not exist', function (t) { 610 | backend.deleteWorkflow(aWorkflow, function (err, success) { 611 | t.ifError(err, 'delete unexisting workflow error'); 612 | t.equal(success, false, 'no row deleted'); 613 | t.end(); 614 | }); 615 | }); 616 | t.end(); 617 | }); 618 | 619 | 620 | test('teardown', function (t) { 621 | backend.quit(function () { 622 | console.timeEnd('In Memory Backend'); 623 | t.end(); 624 | }); 625 | }); 626 | -------------------------------------------------------------------------------- /test/runner.test.js: -------------------------------------------------------------------------------- 1 | // Copyright 2012 Pedro P. Candel . All rights reserved. 2 | // Copyright (c) 2018, Joyent, Inc. 3 | 4 | var util = require('util'); 5 | var path = require('path'); 6 | var fs = require('fs'); 7 | var test = require('tap').test; 8 | var uuid = require('uuid'); 9 | var vasync = require('vasync'); 10 | var WorkflowRunner = require('../lib/runner'); 11 | var Factory = require('../lib/index').Factory; 12 | var exists = fs.exists || path.exists; 13 | var createDTrace = require('../lib/index').CreateDTrace; 14 | 15 | var backend, identifier, runner, factory; 16 | 17 | var config = {}; 18 | 19 | var okTask = { 20 | name: 'OK Task', 21 | retry: 1, 22 | body: function (_job, cb) { 23 | cb(null); 24 | } 25 | }; 26 | var failTask = { 27 | retry: 1, 28 | name: 'Fail Task', 29 | body: function (job, cb) { 30 | job.log.info('recording some info'); 31 | cb('Fail task error'); 32 | } 33 | }; 34 | var failTaskWithError = { 35 | retry: 1, 36 | name: 'Fail Task with error', 37 | body: function (job, cb) { 38 | job.log.info('recording some info'); 39 | cb(new Error('Fail task error')); 40 | } 41 | 42 | }; 43 | var failTaskWithJobRetry = { 44 | retry: 1, 45 | name: 'Fail Task with job retry', 46 | body: function (_job, cb) { 47 | cb('retry'); 48 | } 49 | }; 50 | var failTaskWithJobWait = { 51 | retry: 1, 52 | name: 'Fail Task with job wait', 53 | body: function (_job, cb) { 54 | cb('wait'); 55 | } 56 | }; 57 | var taskWithModules = { 58 | name: 'OK Task with modules', 59 | retry: 1, 60 | body: function (_job, cb) { 61 | if (typeof (uuid) !== 'function') { 62 | cb('uuid module is not defined'); 63 | return; 64 | } 65 | cb(null); 66 | }, 67 | modules: { 68 | uuid: 'uuid' 69 | } 70 | }; 71 | var okWf, failWf, theJob, failWfWithError, failWfWithRetry; 72 | 73 | var helper = require('./helper'); 74 | 75 | var DTRACE = createDTrace('workflow'); 76 | 77 | test('throws on missing opts', function (t) { 78 | t.throws(function () { 79 | WorkflowRunner(); 80 | }, 'The "opts" argument must be of type object'); 81 | t.end(); 82 | }); 83 | 84 | 85 | test('throws on missing backend', function (t) { 86 | t.throws(function () { 87 | WorkflowRunner(config); 88 | }, 'The "opts.backend" argument must be of type object'); 89 | t.end(); 90 | }); 91 | 92 | 93 | test('throws on missing dtrace', function (t) { 94 | config = helper.config(); 95 | t.throws(function () { 96 | WorkflowRunner(config); 97 | }, 'The "opts.dtrace" argument must be of type object'); 98 | t.end(); 99 | }); 100 | 101 | 102 | test('setup', function (t) { 103 | config.dtrace = DTRACE; 104 | identifier = config.runner.identifier; 105 | config.logger = { 106 | streams: [ { 107 | level: 'info', 108 | stream: process.stdout 109 | }, { 110 | level: 'trace', 111 | path: path.resolve(__dirname, './test.runner.log') 112 | }] 113 | }; 114 | runner = WorkflowRunner(config); 115 | t.ok(runner); 116 | t.ok(runner.backend, 'backend ok'); 117 | backend = runner.backend; 118 | runner.init(function (err0) { 119 | t.ifError(err0, 'runner init error'); 120 | factory = Factory(backend); 121 | t.ok(factory); 122 | 123 | // okWf: 124 | factory.workflow({ 125 | name: 'OK wf', 126 | chain: [okTask, failTaskWithJobWait, taskWithModules], 127 | timeout: 60 128 | }, function (err1, wf1) { 129 | t.ifError(err1, 'ok wf error'); 130 | t.ok(wf1, 'OK wf OK'); 131 | okWf = wf1; 132 | // failWf: 133 | factory.workflow({ 134 | name: 'Fail wf', 135 | chain: [failTask], 136 | timeout: 60 137 | }, function (err2, wf2) { 138 | t.ifError(err2, 'Fail wf error'); 139 | t.ok(wf2, 'Fail wf OK'); 140 | failWf = wf2; 141 | factory.workflow({ 142 | name: 'Fail wf with error', 143 | chain: [failTaskWithError], 144 | timeout: 60 145 | }, function (err3, wf3) { 146 | t.ifError(err3, 'Fail wf with error'); 147 | t.ok(wf3, 'Fail wf with error OK'); 148 | failWfWithError = wf3; 149 | factory.workflow({ 150 | name: 'Fail wf with retry', 151 | chain: [failTaskWithJobRetry], 152 | timeout: 60, 153 | max_attempts: 3 154 | }, function (err4, wf4) { 155 | t.ifError(err4, 'Fail wf with retry'); 156 | t.ok(wf4, 'Fail wf with retry OK'); 157 | failWfWithRetry = wf4; 158 | backend.getRunners(function (err5, runners) { 159 | t.ifError(err5, 'get runners error'); 160 | t.ok(runners[identifier], 'runner id ok'); 161 | t.ok(new Date(runners[identifier]), 162 | 'runner timestamp ok'); 163 | t.end(); 164 | }); 165 | }); 166 | }); 167 | }); 168 | }); 169 | }); 170 | }); 171 | 172 | 173 | test('runner identifier', function (t) { 174 | const cfg = { 175 | backend: helper.config().backend, 176 | dtrace: DTRACE 177 | }; 178 | const aRunner = WorkflowRunner(cfg); 179 | var ident; 180 | // run getIdentifier twice, one to create the file, 181 | // another to just read it: 182 | aRunner.init(function (err) { 183 | t.ifError(err, 'runner init error'); 184 | aRunner.getIdentifier(function (err1, id) { 185 | t.ifError(err1, 'get identifier error'); 186 | t.ok(id, 'get identifier id'); 187 | ident = id; 188 | aRunner.getIdentifier(function (err2, id2) { 189 | t.ifError(err2, 'get identifier error'); 190 | t.equal(id2, ident, 'correct id'); 191 | aRunner.backend.quit(function () { 192 | t.end(); 193 | }); 194 | }); 195 | }); 196 | }); 197 | }); 198 | 199 | 200 | test('runner run job now', function (t) { 201 | var d = new Date(); 202 | t.ok(runner.runNow({exec_after: d.toISOString()})); 203 | // Set to the future, so it shouldn't run: 204 | d.setTime(d.getTime() + 10000); 205 | t.ok(!runner.runNow({exec_after: d.toISOString()})); 206 | t.end(); 207 | }); 208 | 209 | 210 | test('runner next run', function (t) { 211 | var d = Date.now(); 212 | var job = { 213 | started: d, 214 | elapsed: 1.23, 215 | max_delay: 7000, 216 | initial_delay: 1000, 217 | num_attempts: 0 218 | }; 219 | t.equal(new Date(runner.nextRun(job)).getTime(), d + 1230 + 1000); 220 | job.num_attempts++; 221 | t.equal(new Date(runner.nextRun(job)).getTime(), d + 1230 + 2000); 222 | job.num_attempts++; 223 | t.equal(new Date(runner.nextRun(job)).getTime(), d + 1230 + 4000); 224 | job.num_attempts++; 225 | t.equal(new Date(runner.nextRun(job)).getTime(), d + 1230 + 7000); 226 | job.num_attempts++; 227 | t.equal(new Date(runner.nextRun(job)).getTime(), d + 1230 + 7000); 228 | t.end(); 229 | }); 230 | 231 | 232 | test('idle runner', function (t) { 233 | runner.run(); 234 | backend.idleRunner(runner.identifier, function (err) { 235 | t.ifError(err, 'idle runner error'); 236 | factory.job({ 237 | workflow: okWf.uuid, 238 | exec_after: '2012-01-03T12:54:05.788Z' 239 | }, function (err1, job1) { 240 | t.ifError(err1, 'job error'); 241 | t.ok(job1, 'run job ok'); 242 | theJob = job1; 243 | // The job is queued. The runner is idle. Job should remain queued: 244 | backend.getJob(theJob.uuid, function (err2, job2) { 245 | t.ifError(err2, 'run job get job error'); 246 | t.equal(job2.execution, 'queued', 'job execution'); 247 | runner.quit(function () { 248 | t.end(); 249 | }); 250 | }); 251 | }); 252 | }); 253 | }); 254 | 255 | 256 | test('run job', function (t) { 257 | // Let's remove the idleness of the runner so it will pick the job 258 | backend.wakeUpRunner(runner.identifier, function (err) { 259 | t.ifError(err, 'wake up runner error'); 260 | runner.run(); 261 | setTimeout(function () { 262 | runner.quit(function () { 263 | backend.getJob(theJob.uuid, function (err1, job1) { 264 | t.ifError(err1, 'run job get job error'); 265 | t.equal(job1.execution, 'waiting', 'job execution'); 266 | t.equal(job1.chain_results[0].result, 'OK'); 267 | t.equal(job1.chain_results[1].result, 'OK'); 268 | theJob = job1; 269 | t.end(); 270 | }); 271 | }); 272 | }, 1000); 273 | }); 274 | }); 275 | 276 | 277 | test('re-run waiting job', function (t) { 278 | backend.resumeJob(theJob, function (err, job) { 279 | t.ifError(err, 'resume job error'); 280 | theJob = job; 281 | runner.run(); 282 | setTimeout(function () { 283 | runner.quit(function () { 284 | backend.getJob(theJob.uuid, function (err1, job1) { 285 | t.ifError(err1, 'run job get job error'); 286 | t.equal(job1.execution, 'succeeded', 'job execution'); 287 | t.equal(job1.chain_results[0].result, 'OK'); 288 | t.equal(job1.chain_results[1].result, 'OK'); 289 | t.end(); 290 | }); 291 | }); 292 | }, 1000); 293 | }); 294 | }); 295 | 296 | 297 | test('run job which fails', function (t) { 298 | var aJob; 299 | factory.job({ 300 | workflow: failWf.uuid, 301 | exec_after: '2012-01-03T12:54:05.788Z' 302 | }, function (err, job) { 303 | t.ifError(err, 'job error'); 304 | t.ok(job, 'job ok'); 305 | aJob = job; 306 | runner.run(); 307 | setTimeout(function () { 308 | runner.quit(function () { 309 | backend.getJob(aJob.uuid, function (err1, job1) { 310 | t.ifError(err1, 'get job error'); 311 | t.equal(job1.execution, 'failed', 'job execution'); 312 | t.equal(job1.chain_results[0].error, 'Fail task error'); 313 | t.end(); 314 | }); 315 | }); 316 | }, 300); 317 | }); 318 | }); 319 | 320 | 321 | test('run job which fails with error instance', function (t) { 322 | var aJob; 323 | factory.job({ 324 | workflow: failWfWithError.uuid, 325 | exec_after: '2012-01-03T12:54:05.788Z' 326 | }, function (err, job) { 327 | t.ifError(err, 'job error'); 328 | t.ok(job, 'job ok'); 329 | aJob = job; 330 | runner.run(); 331 | setTimeout(function () { 332 | runner.quit(function () { 333 | backend.getJob(aJob.uuid, function (err1, job1) { 334 | t.ifError(err1, 'get job error'); 335 | t.equal(job1.execution, 'failed', 'job execution'); 336 | t.ok(job1.chain_results[0].error.name); 337 | t.ok(job1.chain_results[0].error.message); 338 | t.equal(job1.chain_results[0].error.message, 339 | 'Fail task error'); 340 | t.end(); 341 | }); 342 | }); 343 | }, 300); 344 | }); 345 | }); 346 | 347 | 348 | test('a job that is retried', function (t) { 349 | var aJob; 350 | factory.job({ 351 | workflow: failWfWithRetry.uuid, 352 | exec_after: '2012-01-03T12:54:05.788Z' 353 | }, function (err, job) { 354 | t.ifError(err, 'job error'); 355 | t.ok(job, 'job ok'); 356 | aJob = job; 357 | runner.run(); 358 | setTimeout(function () { 359 | runner.quit(function () { 360 | backend.getJob(aJob.uuid, function (err1, job1) { 361 | t.ifError(err1, 'get job error'); 362 | t.equal(job1.execution, 'retried', 'job execution'); 363 | t.equal(job1.chain_results[0].error, 'retry', 'error'); 364 | var prevJob = job1; 365 | backend.getJob(job1.next_attempt, function (err2, job2) { 366 | t.ifError(err2, 'get job error'); 367 | t.equal(job2.execution, 'retried', 'job execution'); 368 | t.equal(job2.chain_results[0].error, 'retry', 'error'); 369 | t.equal(prevJob.uuid, job2.prev_attempt); 370 | var midJob = job2; 371 | backend.getJob(job2.next_attempt, 372 | function getJCb(err3, job3) { 373 | t.ifError(err3, 'get job error'); 374 | t.equal(job3.execution, 'retried', 'job execution'); 375 | t.equal(job3.chain_results[0].error, 'retry'); 376 | t.equal(midJob.uuid, job3.prev_attempt); 377 | t.notOk(job3.next_attempt); 378 | t.end(); 379 | }); 380 | }); 381 | }); 382 | }); 383 | }, 2000); 384 | }); 385 | }); 386 | 387 | 388 | test('inactive runners', function (t) { 389 | // Add another runner, which we'll set as inactive 390 | var theUUID = uuid(), 391 | cfg = { 392 | backend: helper.config().backend, 393 | runner: { 394 | identifier: theUUID, 395 | forks: 2, 396 | run_interval: 250 397 | }, 398 | dtrace: DTRACE 399 | }, 400 | anotherRunner = WorkflowRunner(cfg); 401 | t.ok(anotherRunner, 'another runner ok'); 402 | // Init the new runner, then update it to make inactive 403 | anotherRunner.init(function (err) { 404 | t.ifError(err, 'another runner init error'); 405 | // Now we quit the new runner, and outdate it: 406 | anotherRunner.quit(function () { 407 | runner.inactiveRunners(function (err1, runners1) { 408 | t.ifError(err1, 'inactive runners error'); 409 | t.ok(util.isArray(runners1), 'runners is array'); 410 | t.equal(runners1.length, 0, 'runners length'); 411 | backend.runnerActive( 412 | anotherRunner.identifier, 413 | '2012-01-03T12:54:05.788Z', 414 | function (err2) { 415 | t.ifError(err2, 'set runner timestamp error'); 416 | runner.inactiveRunners(function (err3, runners3) { 417 | t.ifError(err3, 'inactive runners error'); 418 | t.ok(util.isArray(runners3), 'runners is array'); 419 | t.equal(runners3.length, 1, 'runners length'); 420 | t.equal(runners3[0], theUUID, 'runner uuid error'); 421 | anotherRunner.backend.quit(function () { 422 | t.end(); 423 | }); 424 | }); 425 | }); 426 | }); 427 | }); 428 | }); 429 | }); 430 | 431 | 432 | test('stale jobs', function (t) { 433 | // Add another runner, which we'll set as inactive 434 | var cfg = { 435 | backend: helper.config().backend, 436 | runner: { 437 | identifier: uuid(), 438 | forks: 2, 439 | run_interval: 250 440 | }, 441 | dtrace: DTRACE 442 | }; 443 | 444 | vasync.pipeline({ 445 | arg: { 446 | anotherRunner: WorkflowRunner(cfg) 447 | }, 448 | funcs: [ 449 | // Create a job and store as `ctx.aJob` 450 | function createJob(ctx, next) { 451 | factory.job({ 452 | workflow: okWf.uuid, 453 | exec_after: '2012-01-03T12:54:05.788Z' 454 | }, function (err, job) { 455 | t.ifError(err, 'job error'); 456 | t.ok(job, 'run job ok'); 457 | ctx.aJob = job; 458 | backend.getJob(ctx.aJob.uuid, function (err1, job1) { 459 | t.ifError(err1, 'get job err'); 460 | t.equal('queued', job1.execution, 'Job is queued'); 461 | ctx.aJob = job1; 462 | next(); 463 | }); 464 | }); 465 | }, 466 | function runJob(ctx, next) { 467 | backend.runJob( 468 | ctx.aJob.uuid, 469 | ctx.anotherRunner.identifier, 470 | function runJobCb(err, job) { 471 | t.ifError(err, 'backend run job error'); 472 | t.equal('running', job.execution, 'Job is running'); 473 | ctx.aJob = job; 474 | ctx.anotherRunner.quit(next); 475 | }); 476 | }, 477 | function checkStaleJobs(_, next) { 478 | runner.staleJobs(function (err, jobs) { 479 | t.ifError(err, 'stale jobs error'); 480 | t.equivalent(jobs, [], 'stale jobs empty'); 481 | next(); 482 | }); 483 | }, 484 | function outdateRunner(ctx, next) { 485 | // The runner will be inactive so, any job flagged 486 | // as owned by this runner will be stale 487 | backend.runnerActive( 488 | ctx.anotherRunner.identifier, 489 | '2012-01-03T12:54:05.788Z', 490 | function (err) { 491 | t.ifError(err, 'set runner timestamp error'); 492 | next(); 493 | }); 494 | }, 495 | function reCheckStaleJobs(ctx, next) { 496 | runner.staleJobs(function (err, jobs) { 497 | t.ifError(err, 'stale jobs error'); 498 | t.equivalent(jobs, [ctx.aJob.uuid], 'stale jobs not empty'); 499 | next(); 500 | }); 501 | }, 502 | function finishJob(ctx, next) { 503 | backend.finishJob(ctx.aJob, function finishJobCb(err, job) { 504 | t.ifError(err, 'finish job err'); 505 | t.ok(job, 'finish job ok'); 506 | next(); 507 | }); 508 | }, 509 | function triCheckStaleJobs(_, next) { 510 | runner.staleJobs(function (err, jobs) { 511 | t.ifError(err, 'stale jobs error'); 512 | t.equivalent(jobs, [], 513 | 'Only not finished jobs can be stale'); 514 | next(); 515 | }); 516 | } 517 | ] 518 | }, function pipeCb(pipeErr) { 519 | t.end(pipeErr); 520 | }); 521 | 522 | }); 523 | 524 | 525 | test('timeout job', function (t) { 526 | vasync.pipeline({ 527 | arg: {}, 528 | funcs: [ 529 | function createWorkflow(ctx, next) { 530 | factory.workflow({ 531 | name: 'Timeout wf', 532 | chain: [ 533 | { 534 | name: 'Timeout Task', 535 | retry: 0, 536 | body: function (_job, cb) { 537 | setTimeout(function () { 538 | cb(null); 539 | }, 10000); 540 | } 541 | } 542 | ], 543 | timeout: 1, 544 | max_attempts: 1 545 | }, function (err, wf) { 546 | t.ifError(err, 'Timeout wf error'); 547 | t.ok(wf, 'Timeout wf OK'); 548 | ctx.wf = wf; 549 | next(); 550 | }); 551 | }, 552 | function createJob(ctx, next) { 553 | factory.job({ 554 | workflow: ctx.wf.uuid, 555 | exec_after: '2012-01-03T12:54:05.788Z' 556 | }, function (err, job) { 557 | t.ifError(err, 'job error'); 558 | t.ok(job, 'job ok'); 559 | ctx.aJob = job; 560 | backend.getJob(ctx.aJob.uuid, function (err1, job1) { 561 | t.ifError(err1, 'get job err'); 562 | t.equal('queued', job1.execution, 'Job is queued'); 563 | ctx.aJob = job1; 564 | next(); 565 | }); 566 | }); 567 | }, 568 | function runJob(_, next) { 569 | backend.wakeUpRunner(runner.identifier, function (err) { 570 | t.ifError(err, 'wake up runner error'); 571 | runner.run(); 572 | next(); 573 | }); 574 | }, 575 | function checkJob(ctx, next) { 576 | // Give it room enough to timeout the job 577 | setTimeout(function () { 578 | backend.getJob(ctx.aJob.uuid, function (err, job) { 579 | t.ifError(err, 'get job err'); 580 | t.equal('failed', job.execution, 'Job is failed'); 581 | t.ok(job.chain_results, 'chain_results'); 582 | t.ok(job.chain_results[0].error, 'job error'); 583 | t.ok(job.chain_results[0].finished_at, 584 | 'job finished_at'); 585 | next(); 586 | }); 587 | }, 2000); 588 | 589 | }, 590 | function quitRunner(_, next) { 591 | runner.quit(next); 592 | } 593 | ] 594 | }, function pipeCb(pipeErr) { 595 | t.end(pipeErr); 596 | }); 597 | }); 598 | 599 | 600 | test('teardown', function (t) { 601 | var cfg_file = path.resolve(__dirname, '../workflow-indentifier'); 602 | runner.backend.quit(function () { 603 | exists(cfg_file, function (exist) { 604 | if (exist) { 605 | fs.unlink(cfg_file, function (err) { 606 | t.ifError(err); 607 | t.end(); 608 | }); 609 | } else { 610 | t.end(); 611 | } 612 | }); 613 | }); 614 | }); 615 | -------------------------------------------------------------------------------- /test/task-runner.test.js: -------------------------------------------------------------------------------- 1 | // Copyright 2014 Pedro P. Candel . All rights reserved. 2 | // Copyright (c) 2017, Joyent, Inc. 3 | 4 | var util = require('util'), 5 | test = require('tap').test, 6 | uuid = require('uuid'), 7 | WorkflowTaskRunner = require('../lib/task-runner'); 8 | 9 | var job = { 10 | timeout: 180, 11 | workflow_uuid: 'bdfa0821-5071-4682-b965-88293149a8d2', 12 | name: 'A workflow name', 13 | exec_after: '2012-01-03T12:54:05.788Z', 14 | params: { 15 | 'a': '1', 16 | 'b': '2' 17 | }, 18 | uuid: 'fb4c202d-19ed-4ed9-afda-8255aa7f38ad', 19 | target: '/foo/bar', 20 | execution: 'running', 21 | chain_results: [], 22 | chain: [], 23 | onerror: [] 24 | }; 25 | 26 | var task = { 27 | 'uuid': uuid(), 28 | 'name': 'A name', 29 | 'body': 'Fake body' 30 | }; 31 | 32 | 33 | var sandbox = { 34 | 'modules': { 35 | 'http': 'http', 36 | 'uuid': 'uuid', 37 | 'restify': 'restify' 38 | }, 39 | 'foo': 'bar', 40 | 'bool': true, 41 | 'aNumber': 5, 42 | 'someError': new Error('some error') 43 | }; 44 | 45 | test('throws on missing opts', function (t) { 46 | t.throws(function () { 47 | return WorkflowTaskRunner(); 48 | }, new TypeError('opts (Object) required')); 49 | t.end(); 50 | }); 51 | 52 | 53 | test('throws on missing opts.job', function (t) { 54 | t.throws(function () { 55 | return WorkflowTaskRunner({}); 56 | }, new TypeError('opts.job (Object) required')); 57 | t.end(); 58 | }); 59 | 60 | 61 | test('throws on missing opts.task', function (t) { 62 | t.throws(function () { 63 | return WorkflowTaskRunner({job: job}); 64 | }, new TypeError('opts.task (Object) required')); 65 | t.end(); 66 | }); 67 | 68 | 69 | test('throws on incorrect opts.sandbox', function (t) { 70 | t.throws(function () { 71 | return WorkflowTaskRunner({ 72 | job: job, 73 | task: task, 74 | sandbox: 'foo' 75 | }); 76 | }, new TypeError('opts.sandbox must be an Object')); 77 | t.end(); 78 | }); 79 | 80 | 81 | test('throws on opts.task.body not a function', function (t) { 82 | t.throws(function () { 83 | return WorkflowTaskRunner({ 84 | job: job, 85 | task: task 86 | }); 87 | }, new TypeError('opt.task.body (String) must be a Function source')); 88 | task.body = '5 === 5'; 89 | t.throws(function () { 90 | return WorkflowTaskRunner({ 91 | job: job, 92 | task: task 93 | }); 94 | }, new TypeError('opt.task.body (String) must be a Function source')); 95 | t.end(); 96 | }); 97 | 98 | 99 | test('a task which succeeds on 1st retry', function (t) { 100 | task.body = function (job, cb) { 101 | return cb(null); 102 | }.toString(); 103 | 104 | job.chain.push(task); 105 | 106 | var wf_task_runner = WorkflowTaskRunner({ 107 | job: job, 108 | task: task 109 | }); 110 | 111 | t.ok(wf_task_runner.name); 112 | t.equal(typeof (wf_task_runner.body), 'function'); 113 | 114 | wf_task_runner.runTask(function (msg) { 115 | t.ok(msg.result); 116 | t.ifError(msg.error, 'task error'); 117 | t.ok(msg.job); 118 | t.equal(msg.cmd, 'run'); 119 | t.equal(msg.task_name, task.name); 120 | t.end(); 121 | }); 122 | 123 | }); 124 | 125 | 126 | test('sandbox modules and variables', function (t) { 127 | // Or javascriptlint will complain regarding undefined variables: 128 | var foo, bool, aNumber, restify, info, someError; 129 | var task_body = function (job, cb) { 130 | if (typeof (uuid) !== 'function') { 131 | return cb('uuid module is not defined'); 132 | } 133 | if (typeof (foo) !== 'string') { 134 | return cb('sandbox value is not defined'); 135 | } 136 | if (typeof (bool) !== 'boolean') { 137 | return cb('sandbox value is not defined'); 138 | } 139 | if (typeof (aNumber) !== 'number') { 140 | return cb('sandbox value is not defined'); 141 | } 142 | if (typeof (restify.createJsonClient) !== 'function') { 143 | return cb('restify.createJsonClient is not defined'); 144 | } 145 | if (typeof (info) !== 'function') { 146 | return cb('sandbox info() is not defined'); 147 | } 148 | var client = restify.createJsonClient({ 149 | url: 'http://127.0.0.1' 150 | }); 151 | if (typeof (client.url) !== 'object') { 152 | return cb('restify is defined but cannot create a client'); 153 | } 154 | if (!(someError instanceof Error)) { 155 | return cb('"Error" JavaScript builtin object is not shared with ' + 156 | 'sandbox'); 157 | } 158 | return cb(null); 159 | }; 160 | 161 | task.body = task_body.toString(); 162 | 163 | job.chain.push(task); 164 | 165 | var wf_task_runner = WorkflowTaskRunner({ 166 | job: job, 167 | task: task, 168 | sandbox: sandbox 169 | }); 170 | 171 | t.ok(wf_task_runner.name); 172 | t.equal(typeof (wf_task_runner.body), 'function'); 173 | 174 | wf_task_runner.runTask(function (msg) { 175 | t.ok(msg.result); 176 | t.ifError(msg.error, 'task error'); 177 | t.ok(msg.job); 178 | t.equal(msg.cmd, 'run'); 179 | t.equal(msg.task_name, task.name); 180 | t.end(); 181 | }); 182 | 183 | }); 184 | 185 | 186 | test('a task which succeeds on 2nd retry', function (t) { 187 | task.body = function (job, cb) { 188 | if (!job.foo) { 189 | job.foo = true; 190 | return cb('Foo was not defined'); 191 | } 192 | return cb(null); 193 | }.toString(); 194 | task.retry = 2; 195 | job.chain.push(task); 196 | 197 | var wf_task_runner = WorkflowTaskRunner({ 198 | job: job, 199 | task: task 200 | }); 201 | 202 | t.ok(wf_task_runner.name); 203 | t.equal(typeof (wf_task_runner.body), 'function'); 204 | 205 | wf_task_runner.runTask(function (msg) { 206 | t.ok(msg.result); 207 | t.ifError(msg.error, 'task error'); 208 | t.ok(msg.job); 209 | t.ok(msg.job.foo); 210 | t.equal(msg.cmd, 'run'); 211 | t.equal(msg.task_name, task.name); 212 | t.end(); 213 | }); 214 | }); 215 | 216 | 217 | test('a task which fails and has no "fallback"', function (t) { 218 | task.body = function (job, cb) { 219 | return cb('Task body error'); 220 | }.toString(); 221 | 222 | job.chain.push(task); 223 | 224 | var wf_task_runner = WorkflowTaskRunner({ 225 | job: job, 226 | task: task 227 | }); 228 | 229 | t.ok(wf_task_runner.name); 230 | t.equal(typeof (wf_task_runner.body), 'function'); 231 | 232 | wf_task_runner.runTask(function (msg) { 233 | t.ifError(msg.result); 234 | t.equal(msg.error, 'Task body error', 'task error'); 235 | t.ok(msg.job); 236 | t.equal(msg.cmd, 'error'); 237 | t.equal(msg.task_name, task.name); 238 | t.end(); 239 | }); 240 | }); 241 | 242 | 243 | test('a task which fails and succeeds "fallback"', function (t) { 244 | task.fallback = function (err, job, cb) { 245 | job.the_err = err; 246 | return cb(null); 247 | }.toString(); 248 | 249 | job.chain.push(task); 250 | 251 | var wf_task_runner = WorkflowTaskRunner({ 252 | job: job, 253 | task: task 254 | }); 255 | 256 | t.ok(wf_task_runner.name); 257 | t.equal(typeof (wf_task_runner.body), 'function'); 258 | t.equal(typeof (wf_task_runner.fallback), 'function'); 259 | 260 | wf_task_runner.runTask(function (msg) { 261 | t.ok(msg.result); 262 | t.ifError(msg.error, 'task error'); 263 | t.ok(msg.job); 264 | t.equal(msg.job.the_err, 'Task body error'); 265 | t.equal(msg.cmd, 'run'); 266 | t.end(); 267 | }); 268 | }); 269 | 270 | 271 | test('a task which fails and "fallback" fails too', function (t) { 272 | task.fallback = function (err, job, cb) { 273 | return cb('fallback error'); 274 | }.toString(); 275 | 276 | job.chain.push(task); 277 | 278 | var wf_task_runner = WorkflowTaskRunner({ 279 | job: job, 280 | task: task 281 | }); 282 | 283 | t.ok(wf_task_runner.name); 284 | t.equal(typeof (wf_task_runner.body), 'function'); 285 | t.equal(typeof (wf_task_runner.fallback), 'function'); 286 | 287 | wf_task_runner.runTask(function (msg) { 288 | t.ifError(msg.result); 289 | t.equal(msg.error, 'fallback error', 'task error'); 290 | t.ok(msg.job); 291 | t.equal(msg.cmd, 'error'); 292 | t.end(); 293 | }); 294 | }); 295 | 296 | 297 | test('a task which fails after two retries and has no "fallback"', 298 | function (t) { 299 | task.body = function (job, cb) { 300 | if (!job.bar) { 301 | job.bar = true; 302 | return cb('Bar was not defined'); 303 | } else if (!job.baz) { 304 | job.baz = true; 305 | return cb('Baz was not defined'); 306 | } 307 | // Should not be called 308 | return cb(null); 309 | }.toString(); 310 | task.fallback = null; 311 | 312 | job.chain.push(task); 313 | 314 | var wf_task_runner = WorkflowTaskRunner({ 315 | job: job, 316 | task: task 317 | }); 318 | 319 | t.ok(wf_task_runner.name); 320 | t.equal(typeof (wf_task_runner.body), 'function'); 321 | 322 | wf_task_runner.runTask(function (msg) { 323 | t.ifError(msg.result); 324 | t.equal(msg.error, 'Baz was not defined', 'task error'); 325 | t.ok(msg.job, 'job ok'); 326 | t.ok(msg.job.bar, 'job.bar ok'); 327 | t.ok(msg.job.baz, 'job.baz ok'); 328 | t.equal(msg.cmd, 'error', 'job cmd ok'); 329 | t.end(); 330 | }); 331 | }); 332 | 333 | 334 | test('a task which time out and succeeds "fallback"', function (t) { 335 | task.body = function (job, cb) { 336 | setTimeout(function () { 337 | // Should not be called: 338 | return cb('Error within timeout'); 339 | }, 1050); 340 | }.toString(); 341 | task.fallback = function (err, job, cb) { 342 | job.the_err = err; 343 | return cb(null); 344 | }.toString(); 345 | task.timeout = 1; 346 | task.retry = 1; 347 | 348 | job.chain.push(task); 349 | 350 | var wf_task_runner = WorkflowTaskRunner({ 351 | job: job, 352 | task: task 353 | }); 354 | 355 | t.ok(wf_task_runner.name); 356 | t.equal(typeof (wf_task_runner.body), 'function'); 357 | t.equal(typeof (wf_task_runner.fallback), 'function'); 358 | 359 | t.equal(wf_task_runner.timeout, 1000); 360 | 361 | wf_task_runner.runTask(function (msg) { 362 | t.ok(msg.result); 363 | t.ifError(msg.error, 'task error'); 364 | t.ok(msg.job); 365 | t.equal(msg.job.the_err, 'task timeout error'); 366 | t.equal(msg.cmd, 'run'); 367 | t.end(); 368 | }); 369 | }); 370 | 371 | 372 | test('a task which times out and fallback does too', function (t) { 373 | task.body = function (job, cb) { 374 | job.timer = 'Timeout set'; 375 | setTimeout(function () { 376 | // Should not be called: 377 | return cb(null); 378 | }, 1050); 379 | }.toString(); 380 | task.retry = 1; 381 | task.fallback = function (err, job, cb) { 382 | job.fbtimer = 'Fallback timeout set'; 383 | setTimeout(function () { 384 | // Should not be called: 385 | return cb(null); 386 | }, 1025); 387 | }.toString(); 388 | 389 | job.chain.push(task); 390 | 391 | var wf_task_runner = WorkflowTaskRunner({ 392 | job: job, 393 | task: task 394 | }); 395 | 396 | t.ok(wf_task_runner.name, 'uuid ok'); 397 | t.equal(typeof (wf_task_runner.body), 'function', 'body ok'); 398 | t.equal(typeof (wf_task_runner.fallback), 'function', 'fallback ok'); 399 | t.equal(wf_task_runner.timeout, 1000, 'timeout ok'); 400 | 401 | wf_task_runner.runTask(function (msg) { 402 | t.ok(msg.error, 'task error'); 403 | t.equal(msg.error, 'task timeout error', 'task timeout error'); 404 | t.ifError(msg.result, 'task result'); 405 | t.ok(msg.job, 'job ok'); 406 | t.equal(msg.cmd, 'error', 'cmd ok'); 407 | t.end(); 408 | }); 409 | }); 410 | 411 | test('a task which succeeds and re-queues the workflow', function (t) { 412 | task.body = function (job, cb) { 413 | return cb('queue'); 414 | }.toString(); 415 | task.fallback = null; 416 | task.retry = 1; 417 | 418 | job.chain.push(task); 419 | 420 | var wf_task_runner = WorkflowTaskRunner({ 421 | job: job, 422 | task: task 423 | }); 424 | 425 | t.ok(wf_task_runner.name); 426 | t.equal(typeof (wf_task_runner.body), 'function'); 427 | 428 | wf_task_runner.runTask(function (msg) { 429 | t.ok(msg.result); 430 | t.ok(msg.error, 'task error'); 431 | t.ok(msg.job); 432 | t.equal(msg.cmd, 'queue'); 433 | t.end(); 434 | }); 435 | 436 | }); 437 | 438 | 439 | test('a task which times out and has no fallback', function (t) { 440 | task.body = function (job, cb) { 441 | job.timer = 'Timeout set'; 442 | setTimeout(function () { 443 | // Should not be called: 444 | return cb(null); 445 | }, 1050); 446 | }.toString(); 447 | task.retry = 1; 448 | task.fallback = null; 449 | job.chain.push(task); 450 | 451 | var wf_task_runner = WorkflowTaskRunner({ 452 | job: job, 453 | task: task 454 | }); 455 | 456 | t.ok(wf_task_runner.name, 'uuid ok'); 457 | t.equal(typeof (wf_task_runner.body), 'function', 'body ok'); 458 | t.equal(wf_task_runner.timeout, 1000, 'timeout ok'); 459 | 460 | wf_task_runner.runTask(function (msg) { 461 | t.ok(msg.error, 'task error'); 462 | t.equal(msg.error, 'task timeout error', 'task timeout error'); 463 | t.ifError(msg.result, 'task result'); 464 | t.ok(msg.job, 'job ok'); 465 | t.equal(msg.cmd, 'error', 'cmd ok'); 466 | t.end(); 467 | }); 468 | 469 | }); 470 | 471 | 472 | test('a task which timeout and is canceled', function (t) { 473 | task.body = function (job, cb) { 474 | job.timer = 'Timeout set'; 475 | setTimeout(function () { 476 | // Should not be called: 477 | return cb(null); 478 | }, 1550); 479 | }.toString(); 480 | task.retry = 2; 481 | task.fallback = null; 482 | job.chain.push(task); 483 | 484 | var wf_task_runner = WorkflowTaskRunner({ 485 | job: job, 486 | task: task 487 | }); 488 | 489 | t.ok(wf_task_runner.name, 'uuid ok'); 490 | t.equal(typeof (wf_task_runner.body), 'function', 'body ok'); 491 | t.equal(wf_task_runner.timeout, 1000, 'timeout ok'); 492 | 493 | setTimeout(function () { 494 | wf_task_runner.canceled = true; 495 | }, 750); 496 | 497 | wf_task_runner.runTask(function (msg) { 498 | t.ok(msg.error, 'task error'); 499 | t.equal(msg.error, 'cancel', 'task timeout error'); 500 | t.ifError(msg.result, 'task result'); 501 | t.ok(msg.job, 'job ok'); 502 | t.equal(msg.cmd, 'cancel', 'cmd ok'); 503 | t.end(); 504 | }); 505 | 506 | }); 507 | 508 | 509 | test('a task which fails and is canceled', function (t) { 510 | task.body = function (job, cb) { 511 | setTimeout(function () { 512 | return cb('Task body error'); 513 | }, 500); 514 | }.toString(); 515 | 516 | task.fallback = function (err, job, cb) { 517 | job.the_err = err; 518 | return cb(null); 519 | }.toString(); 520 | 521 | task.retry = 1; 522 | 523 | job.chain.push(task); 524 | 525 | var wf_task_runner = WorkflowTaskRunner({ 526 | job: job, 527 | task: task 528 | }); 529 | 530 | t.ok(wf_task_runner.name); 531 | t.equal(typeof (wf_task_runner.body), 'function'); 532 | t.equal(typeof (wf_task_runner.fallback), 'function'); 533 | 534 | setTimeout(function () { 535 | wf_task_runner.canceled = true; 536 | }, 350); 537 | 538 | wf_task_runner.runTask(function (msg) { 539 | t.ok(msg.error, 'task error'); 540 | t.equal(msg.error, 'cancel', 'task timeout error'); 541 | t.ifError(msg.result, 'task result'); 542 | t.ok(msg.job, 'job ok'); 543 | t.equal(msg.cmd, 'cancel', 'cmd ok'); 544 | t.end(); 545 | }); 546 | 547 | }); 548 | 549 | 550 | test('a task which calls job.info', function (t) { 551 | var info; 552 | task.body = function (job, cb) { 553 | job.log.info('an info string'); 554 | info('a second info string'); 555 | return cb(null); 556 | }.toString(); 557 | 558 | job.chain.push(task); 559 | 560 | var wf_task_runner = WorkflowTaskRunner({ 561 | job: job, 562 | task: task 563 | }); 564 | 565 | t.ok(wf_task_runner.name); 566 | t.equal(typeof (wf_task_runner.body), 'function'); 567 | 568 | // The callback will be called two times: once for info(), 569 | // and the 2nd time to finish the task. It will not be called 570 | // for job.log.info 571 | 572 | var num = 0; 573 | 574 | wf_task_runner.runTask(function (msg) { 575 | t.ifError(msg.error, 'task error'); 576 | t.ok(msg.job); 577 | t.equal(msg.task_name, task.name); 578 | num += 1; 579 | 580 | if (num === 1) { 581 | t.ok(msg.info, 'info present'); 582 | t.notOk(msg.result, 'result not present'); 583 | t.equal(msg.cmd, 'info', 'info cmd'); 584 | t.equal(msg.info, 'a second info string', 'info string'); 585 | return; 586 | } 587 | 588 | t.ok(msg.result, 'result present'); 589 | t.notOk(msg.info, 'info not present'); 590 | t.equal(msg.cmd, 'run', 'run cmd'); 591 | t.end(); 592 | }); 593 | 594 | }); 595 | 596 | 597 | // GH-82: Support cb(new Error()) on tasks callbacks to provide useful info, 598 | // the same than when we call with restify.Error() instances: 599 | test('a task which fails with restify.Error', function (t) { 600 | // Not really needed, already on the sandbox 601 | var restify = require('restify'), 602 | wf_task_runner; 603 | 604 | task.body = function (job, cb) { 605 | var error = new restify.ConflictError('Task body error'); 606 | return cb(error); 607 | }.toString(); 608 | 609 | task.fallback = null; 610 | task.retry = 1; 611 | task.timeout = 1000; 612 | 613 | job.chain = [task]; 614 | 615 | wf_task_runner = WorkflowTaskRunner({ 616 | job: job, 617 | task: task, 618 | sandbox: sandbox 619 | }); 620 | 621 | t.ok(wf_task_runner.name); 622 | t.equal(typeof (wf_task_runner.body), 'function'); 623 | 624 | wf_task_runner.runTask(function (msg) { 625 | t.ifError(msg.result); 626 | t.ok(msg.error); 627 | t.ok(msg.error.message); 628 | t.equal(msg.error.message, 'Task body error'); 629 | t.ok(msg.error.statusCode); 630 | t.equal(msg.cmd, 'error'); 631 | t.equal(msg.task_name, task.name); 632 | t.end(); 633 | }); 634 | }); 635 | 636 | test('a task which fails with generic (not restify) Error', function (t) { 637 | 638 | task.body = function (job, cb) { 639 | return cb(new ReferenceError('Task body error')); 640 | }.toString(); 641 | 642 | task.fallback = null; 643 | task.retry = 1; 644 | task.timeout = 1000; 645 | 646 | job.chain = [task]; 647 | 648 | var wf_task_runner = WorkflowTaskRunner({ 649 | job: job, 650 | task: task 651 | }); 652 | 653 | t.ok(wf_task_runner.name); 654 | t.equal(typeof (wf_task_runner.body), 'function'); 655 | 656 | wf_task_runner.runTask(function (msg) { 657 | t.ifError(msg.result); 658 | t.ok(msg.error); 659 | t.ok(msg.error.message); 660 | t.equal(msg.error.message, 'Task body error'); 661 | t.ok(msg.error.name); 662 | t.equal(msg.cmd, 'error'); 663 | t.equal(msg.task_name, task.name); 664 | t.end(); 665 | }); 666 | }); 667 | 668 | 669 | test('a task which defines its own modules', function (t) { 670 | // Or javascriptlint will complain regarding undefined variables: 671 | var foo, bool, aNumber, restify, http; 672 | var task_body = function (job, cb) { 673 | if (typeof (uuid) !== 'function') { 674 | return cb('uuid module is not defined'); 675 | } 676 | if (typeof (foo) !== 'string') { 677 | return cb('sandbox value is not defined'); 678 | } 679 | if (typeof (bool) !== 'boolean') { 680 | return cb('sandbox value is not defined'); 681 | } 682 | if (typeof (aNumber) !== 'number') { 683 | return cb('sandbox value is not defined'); 684 | } 685 | if (typeof (restify) !== 'undefined') { 686 | return cb('restify should be overriden by task modules'); 687 | } 688 | if (typeof (http) !== 'undefined') { 689 | return cb('http should be overriden by task modules'); 690 | } 691 | return cb(null); 692 | }; 693 | 694 | task.body = task_body.toString(); 695 | task.modules = { 696 | 'uuid': 'uuid' 697 | }; 698 | 699 | job.chain.push(task); 700 | 701 | var wf_task_runner = WorkflowTaskRunner({ 702 | job: job, 703 | task: task, 704 | sandbox: sandbox 705 | }); 706 | 707 | t.ok(wf_task_runner.name); 708 | t.equal(typeof (wf_task_runner.body), 'function'); 709 | 710 | wf_task_runner.runTask(function (msg) { 711 | t.ok(msg.result); 712 | t.ifError(msg.error, 'task error'); 713 | t.ok(msg.job); 714 | t.equal(msg.cmd, 'run'); 715 | t.equal(msg.task_name, task.name); 716 | t.end(); 717 | }); 718 | }); 719 | -------------------------------------------------------------------------------- /tools/bashstyle: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | /* 4 | * Copyright (c) 2012, Joyent, Inc. All rights reserved. 5 | * 6 | * bashstyle: check bash scripts for adherence to style guidelines, including: 7 | * 8 | * o no lines longer than 80 characters 9 | * o file does not end with a blank line 10 | * 11 | * Future enhancements could include: 12 | * o indents consistent with respect to tabs, spaces 13 | * o indents consistently sized (all are some multiple of the smallest 14 | * indent, which must be a tab or 4 or 8 spaces) 15 | */ 16 | 17 | var mod_assert = require('assert'); 18 | var mod_fs = require('fs'); 19 | 20 | var nerrors = 0; 21 | 22 | main(); 23 | process.exit(0); 24 | 25 | function main() 26 | { 27 | var files = process.argv.slice(2); 28 | 29 | if (files.length === 0) { 30 | console.error('usage: %s file1 [...]', 31 | process.argv.slice(0, 2).join(' ')); 32 | process.exit(2); 33 | } 34 | 35 | files.forEach(checkFile); 36 | 37 | if (nerrors != 0) 38 | process.exit(1); 39 | } 40 | 41 | function checkFile(filename) 42 | { 43 | var text = mod_fs.readFileSync(filename, 'utf-8'); 44 | var lines = text.split('\n'); 45 | var i; 46 | 47 | mod_assert.ok(lines.length > 0); 48 | 49 | /* 50 | * Expand tabs in each line and check for long lines. 51 | */ 52 | for (i = 1; i <= lines.length; i++) { 53 | var line = expandTabs(lines[i - 1]); 54 | 55 | if (line.length > 80) { 56 | nerrors++; 57 | console.log('%s: %d: line exceeds 80 columns', 58 | filename, i); 59 | } 60 | } 61 | 62 | /* 63 | * No sane editor lets you save a file without a newline at the very end. 64 | */ 65 | if (lines[lines.length - 1].length !== 0) { 66 | nerrors++; 67 | console.log('%s: %d: file does not end with newline', 68 | filename, lines.length); 69 | } 70 | 71 | /* 72 | * Since the file will always end with a newline, the last entry of 73 | * "lines" will actually be blank. 74 | */ 75 | if (lines.length > 1 && lines[lines.length - 2].length === 0) { 76 | nerrors++; 77 | console.log('%s: %d: file ends with a blank line', 78 | filename, lines.length - 1); 79 | } 80 | } 81 | 82 | function expandTabs(text) 83 | { 84 | var out = ''; 85 | var col = 0; 86 | var j, k; 87 | 88 | for (j = 0; j < text.length; j++) { 89 | if (text[j] != '\t') { 90 | out += text[j]; 91 | col++; 92 | continue; 93 | } 94 | 95 | k = 8 - (col % 8); 96 | col += k; 97 | 98 | do { 99 | out += ' '; 100 | } while (--k > 0); 101 | 102 | col += k; 103 | } 104 | 105 | return (out); 106 | } 107 | -------------------------------------------------------------------------------- /tools/jsl.node.conf: -------------------------------------------------------------------------------- 1 | # 2 | # Configuration File for JavaScript Lint 3 | # 4 | # This configuration file can be used to lint a collection of scripts, or to enable 5 | # or disable warnings for scripts that are linted via the command line. 6 | # 7 | 8 | ### Warnings 9 | # Enable or disable warnings based on requirements. 10 | # Use "+WarningName" to display or "-WarningName" to suppress. 11 | # 12 | +ambiguous_else_stmt # the else statement could be matched with one of multiple if statements (use curly braces to indicate intent 13 | +ambiguous_nested_stmt # block statements containing block statements should use curly braces to resolve ambiguity 14 | +ambiguous_newline # unexpected end of line; it is ambiguous whether these lines are part of the same statement 15 | +anon_no_return_value # anonymous function does not always return value 16 | +assign_to_function_call # assignment to a function call 17 | -block_without_braces # block statement without curly braces 18 | +comma_separated_stmts # multiple statements separated by commas (use semicolons?) 19 | +comparison_type_conv # comparisons against null, 0, true, false, or an empty string allowing implicit type conversion (use === or !==) 20 | +default_not_at_end # the default case is not at the end of the switch statement 21 | +dup_option_explicit # duplicate "option explicit" control comment 22 | +duplicate_case_in_switch # duplicate case in switch statement 23 | +duplicate_formal # duplicate formal argument {name} 24 | +empty_statement # empty statement or extra semicolon 25 | -identifier_hides_another # identifer {name} hides an identifier in a parent scope 26 | -inc_dec_within_stmt # increment (++) and decrement (--) operators used as part of greater statement 27 | +incorrect_version # Expected /*jsl:content-type*/ control comment. The script was parsed with the wrong version. 28 | +invalid_fallthru # unexpected "fallthru" control comment 29 | +invalid_pass # unexpected "pass" control comment 30 | +jsl_cc_not_understood # couldn't understand control comment using /*jsl:keyword*/ syntax 31 | +leading_decimal_point # leading decimal point may indicate a number or an object member 32 | +legacy_cc_not_understood # couldn't understand control comment using /*@keyword@*/ syntax 33 | +meaningless_block # meaningless block; curly braces have no impact 34 | +mismatch_ctrl_comments # mismatched control comment; "ignore" and "end" control comments must have a one-to-one correspondence 35 | +misplaced_regex # regular expressions should be preceded by a left parenthesis, assignment, colon, or comma 36 | +missing_break # missing break statement 37 | +missing_break_for_last_case # missing break statement for last case in switch 38 | +missing_default_case # missing default case in switch statement 39 | +missing_option_explicit # the "option explicit" control comment is missing 40 | +missing_semicolon # missing semicolon 41 | +missing_semicolon_for_lambda # missing semicolon for lambda assignment 42 | +multiple_plus_minus # unknown order of operations for successive plus (e.g. x+++y) or minus (e.g. x---y) signs 43 | +nested_comment # nested comment 44 | +no_return_value # function {name} does not always return a value 45 | +octal_number # leading zeros make an octal number 46 | +parseint_missing_radix # parseInt missing radix parameter 47 | +partial_option_explicit # the "option explicit" control comment, if used, must be in the first script tag 48 | +redeclared_var # redeclaration of {name} 49 | +trailing_comma_in_array # extra comma is not recommended in array initializers 50 | +trailing_decimal_point # trailing decimal point may indicate a number or an object member 51 | +undeclared_identifier # undeclared identifier: {name} 52 | +unreachable_code # unreachable code 53 | -unreferenced_argument # argument declared but never referenced: {name} 54 | -unreferenced_function # function is declared but never referenced: {name} 55 | +unreferenced_variable # variable is declared but never referenced: {name} 56 | +unsupported_version # JavaScript {version} is not supported 57 | +use_of_label # use of label 58 | +useless_assign # useless assignment 59 | +useless_comparison # useless comparison; comparing identical expressions 60 | -useless_quotes # the quotation marks are unnecessary 61 | +useless_void # use of the void type may be unnecessary (void is always undefined) 62 | +var_hides_arg # variable {name} hides argument 63 | +want_assign_or_call # expected an assignment or function call 64 | +with_statement # with statement hides undeclared variables; use temporary variable instead 65 | 66 | 67 | ### Output format 68 | # Customize the format of the error message. 69 | # __FILE__ indicates current file path 70 | # __FILENAME__ indicates current file name 71 | # __LINE__ indicates current line 72 | # __COL__ indicates current column 73 | # __ERROR__ indicates error message (__ERROR_PREFIX__: __ERROR_MSG__) 74 | # __ERROR_NAME__ indicates error name (used in configuration file) 75 | # __ERROR_PREFIX__ indicates error prefix 76 | # __ERROR_MSG__ indicates error message 77 | # 78 | # For machine-friendly output, the output format can be prefixed with 79 | # "encode:". If specified, all items will be encoded with C-slashes. 80 | # 81 | # Visual Studio syntax (default): 82 | +output-format __FILE__(__LINE__): __ERROR__ 83 | # Alternative syntax: 84 | #+output-format __FILE__:__LINE__: __ERROR__ 85 | 86 | 87 | ### Context 88 | # Show the in-line position of the error. 89 | # Use "+context" to display or "-context" to suppress. 90 | # 91 | +context 92 | 93 | 94 | ### Control Comments 95 | # Both JavaScript Lint and the JScript interpreter confuse each other with the syntax for 96 | # the /*@keyword@*/ control comments and JScript conditional comments. (The latter is 97 | # enabled in JScript with @cc_on@). The /*jsl:keyword*/ syntax is preferred for this reason, 98 | # although legacy control comments are enabled by default for backward compatibility. 99 | # 100 | -legacy_control_comments 101 | 102 | 103 | ### Defining identifiers 104 | # By default, "option explicit" is enabled on a per-file basis. 105 | # To enable this for all files, use "+always_use_option_explicit" 106 | -always_use_option_explicit 107 | 108 | # Define certain identifiers of which the lint is not aware. 109 | # (Use this in conjunction with the "undeclared identifier" warning.) 110 | # 111 | # Common uses for webpages might be: 112 | +define __dirname 113 | +define clearInterval 114 | +define clearTimeout 115 | +define console 116 | +define exports 117 | +define global 118 | +define process 119 | +define require 120 | +define module 121 | +define setInterval 122 | +define setTimeout 123 | +define Buffer 124 | +define JSON 125 | +define Math 126 | 127 | ### JavaScript Version 128 | # To change the default JavaScript version: 129 | #+default-type text/javascript;version=1.5 130 | #+default-type text/javascript;e4x=1 131 | 132 | ### Files 133 | # Specify which files to lint 134 | # Use "+recurse" to enable recursion (disabled by default). 135 | # To add a set of files, use "+process FileName", "+process Folder\Path\*.js", 136 | # or "+process Folder\Path\*.htm". 137 | # 138 | 139 | -------------------------------------------------------------------------------- /tools/jsl.web.conf: -------------------------------------------------------------------------------- 1 | # 2 | # Configuration File for JavaScript Lint 3 | # Developed by Matthias Miller (http://www.JavaScriptLint.com) 4 | # 5 | # This configuration file can be used to lint a collection of scripts, or to enable 6 | # or disable warnings for scripts that are linted via the command line. 7 | # 8 | 9 | ### Warnings 10 | # Enable or disable warnings based on requirements. 11 | # Use "+WarningName" to display or "-WarningName" to suppress. 12 | # 13 | +ambiguous_else_stmt # the else statement could be matched with one of multiple if statements (use curly braces to indicate intent 14 | +ambiguous_nested_stmt # block statements containing block statements should use curly braces to resolve ambiguity 15 | +ambiguous_newline # unexpected end of line; it is ambiguous whether these lines are part of the same statement 16 | +anon_no_return_value # anonymous function does not always return value 17 | +assign_to_function_call # assignment to a function call 18 | -block_without_braces # block statement without curly braces 19 | +comma_separated_stmts # multiple statements separated by commas (use semicolons?) 20 | +comparison_type_conv # comparisons against null, 0, true, false, or an empty string allowing implicit type conversion (use === or !==) 21 | +default_not_at_end # the default case is not at the end of the switch statement 22 | +dup_option_explicit # duplicate "option explicit" control comment 23 | +duplicate_case_in_switch # duplicate case in switch statement 24 | +duplicate_formal # duplicate formal argument {name} 25 | +empty_statement # empty statement or extra semicolon 26 | +identifier_hides_another # identifer {name} hides an identifier in a parent scope 27 | +inc_dec_within_stmt # increment (++) and decrement (--) operators used as part of greater statement 28 | +incorrect_version # Expected /*jsl:content-type*/ control comment. The script was parsed with the wrong version. 29 | +invalid_fallthru # unexpected "fallthru" control comment 30 | +invalid_pass # unexpected "pass" control comment 31 | +jsl_cc_not_understood # couldn't understand control comment using /*jsl:keyword*/ syntax 32 | +leading_decimal_point # leading decimal point may indicate a number or an object member 33 | +legacy_cc_not_understood # couldn't understand control comment using /*@keyword@*/ syntax 34 | +meaningless_block # meaningless block; curly braces have no impact 35 | +mismatch_ctrl_comments # mismatched control comment; "ignore" and "end" control comments must have a one-to-one correspondence 36 | +misplaced_regex # regular expressions should be preceded by a left parenthesis, assignment, colon, or comma 37 | +missing_break # missing break statement 38 | +missing_break_for_last_case # missing break statement for last case in switch 39 | +missing_default_case # missing default case in switch statement 40 | +missing_option_explicit # the "option explicit" control comment is missing 41 | +missing_semicolon # missing semicolon 42 | +missing_semicolon_for_lambda # missing semicolon for lambda assignment 43 | +multiple_plus_minus # unknown order of operations for successive plus (e.g. x+++y) or minus (e.g. x---y) signs 44 | +nested_comment # nested comment 45 | +no_return_value # function {name} does not always return a value 46 | +octal_number # leading zeros make an octal number 47 | +parseint_missing_radix # parseInt missing radix parameter 48 | +partial_option_explicit # the "option explicit" control comment, if used, must be in the first script tag 49 | +redeclared_var # redeclaration of {name} 50 | +trailing_comma_in_array # extra comma is not recommended in array initializers 51 | +trailing_decimal_point # trailing decimal point may indicate a number or an object member 52 | +undeclared_identifier # undeclared identifier: {name} 53 | +unreachable_code # unreachable code 54 | +unreferenced_argument # argument declared but never referenced: {name} 55 | +unreferenced_function # function is declared but never referenced: {name} 56 | +unreferenced_variable # variable is declared but never referenced: {name} 57 | +unsupported_version # JavaScript {version} is not supported 58 | +use_of_label # use of label 59 | +useless_assign # useless assignment 60 | +useless_comparison # useless comparison; comparing identical expressions 61 | +useless_quotes # the quotation marks are unnecessary 62 | +useless_void # use of the void type may be unnecessary (void is always undefined) 63 | +var_hides_arg # variable {name} hides argument 64 | +want_assign_or_call # expected an assignment or function call 65 | +with_statement # with statement hides undeclared variables; use temporary variable instead 66 | 67 | 68 | ### Output format 69 | # Customize the format of the error message. 70 | # __FILE__ indicates current file path 71 | # __FILENAME__ indicates current file name 72 | # __LINE__ indicates current line 73 | # __COL__ indicates current column 74 | # __ERROR__ indicates error message (__ERROR_PREFIX__: __ERROR_MSG__) 75 | # __ERROR_NAME__ indicates error name (used in configuration file) 76 | # __ERROR_PREFIX__ indicates error prefix 77 | # __ERROR_MSG__ indicates error message 78 | # 79 | # For machine-friendly output, the output format can be prefixed with 80 | # "encode:". If specified, all items will be encoded with C-slashes. 81 | # 82 | # Visual Studio syntax (default): 83 | +output-format __FILE__(__LINE__): __ERROR__ 84 | # Alternative syntax: 85 | #+output-format __FILE__:__LINE__: __ERROR__ 86 | 87 | 88 | ### Context 89 | # Show the in-line position of the error. 90 | # Use "+context" to display or "-context" to suppress. 91 | # 92 | +context 93 | 94 | 95 | ### Control Comments 96 | # Both JavaScript Lint and the JScript interpreter confuse each other with the syntax for 97 | # the /*@keyword@*/ control comments and JScript conditional comments. (The latter is 98 | # enabled in JScript with @cc_on@). The /*jsl:keyword*/ syntax is preferred for this reason, 99 | # although legacy control comments are enabled by default for backward compatibility. 100 | # 101 | -legacy_control_comments 102 | 103 | 104 | ### Defining identifiers 105 | # By default, "option explicit" is enabled on a per-file basis. 106 | # To enable this for all files, use "+always_use_option_explicit" 107 | +always_use_option_explicit 108 | 109 | # Define certain identifiers of which the lint is not aware. 110 | # (Use this in conjunction with the "undeclared identifier" warning.) 111 | # 112 | # Common uses for webpages might be: 113 | +define JSON 114 | +define Math 115 | +define $ 116 | +define XMLHttpRequest 117 | +define alert 118 | +define clearInterval 119 | +define clearTimeout 120 | +define confirm 121 | +define document 122 | +define setInterval 123 | +define setTimeout 124 | +define window 125 | 126 | ### JavaScript Version 127 | # To change the default JavaScript version: 128 | #+default-type text/javascript;version=1.5 129 | #+default-type text/javascript;e4x=1 130 | 131 | ### Files 132 | # Specify which files to lint 133 | # Use "+recurse" to enable recursion (disabled by default). 134 | # To add a set of files, use "+process FileName", "+process Folder\Path\*.js", 135 | # or "+process Folder\Path\*.htm". 136 | # 137 | 138 | -------------------------------------------------------------------------------- /tools/npmfreeze.js: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | // -*- mode: js -*- 3 | // 4 | // Copyright (c) 2012, Joyent, Inc. All rights reserved. 5 | // 6 | // Generate a "dependencies" block for a top-level package.json that includes 7 | // the explicit versions for all recursive npm modules. See "Method 3" in 8 | // for why this is interesting. 9 | // 10 | // Usage: 11 | // find . -name "package.json" | xargs ./tools/npmfreeze.js 12 | // 13 | // If two parts of the node_modules tree includes separate versions of a 14 | // particular module, then the greater version is used. 15 | 16 | var fs = require('fs'); 17 | var semver = require('semver'); 18 | var spawn = require('child_process').spawn; 19 | 20 | 21 | ///--- Globals 22 | var deps = {}; 23 | 24 | 25 | ///--- Helpers 26 | 27 | function done() { 28 | console.log(JSON.stringify(deps, null, 2)); 29 | } 30 | 31 | 32 | function waitForDone() { 33 | process.nextTick(function() { 34 | if (wait === 0) 35 | return done(); 36 | 37 | return waitForDone(); 38 | }); 39 | } 40 | 41 | 42 | ///--- Main 43 | 44 | process.argv.slice(2).forEach(function(fname) { 45 | var pkg = JSON.parse(fs.readFileSync(fname, 'utf8')); 46 | if (!pkg.dependencies) 47 | return; 48 | 49 | var tmp = pkg.dependencies; 50 | Object.keys(tmp).forEach(function(dep) { 51 | if (!deps[dep] || semver.gt(tmp[dep], deps[dep])) 52 | deps[dep] = semver.clean(tmp[dep]) || '*'; 53 | }); 54 | }); 55 | 56 | // Make a pass and clean up all the '*' 57 | var wait = 0; 58 | Object.keys(deps).forEach(function(k) { 59 | if (deps[k] !== '*') 60 | return; 61 | 62 | wait++; 63 | var npm = spawn('npm', ['info', k]); 64 | var json = ''; 65 | npm.stdout.on('data', function(data) { 66 | if (data) 67 | json += data; 68 | }); 69 | 70 | npm.stdout.on('end', function(code) { 71 | if (code) { 72 | console.error('npm info %s exited: %d', k, code); 73 | process.exit(code); 74 | } 75 | 76 | var val; 77 | eval('val = ' + json); 78 | 79 | deps[k] = val['dist-tags'].latest; 80 | wait--; 81 | }); 82 | }); 83 | 84 | return (wait === 0 ? done() : waitForDone()); 85 | 86 | --------------------------------------------------------------------------------