├── webui
├── internal
│ └── assets
│ │ ├── .gitignore
│ │ ├── .babelrc
│ │ ├── assets_generate.go
│ │ ├── src
│ │ ├── TestSetup.js
│ │ ├── cx.js
│ │ ├── ShortList.css
│ │ ├── UnixTime.js
│ │ ├── UnixTime.test.js
│ │ ├── Queues.test.js
│ │ ├── ShortList.test.js
│ │ ├── TestUtils.js
│ │ ├── ShortList.js
│ │ ├── PageList.test.js
│ │ ├── PageList.js
│ │ ├── RetryJobs.test.js
│ │ ├── ScheduledJobs.test.js
│ │ ├── Processes.test.js
│ │ ├── Queues.js
│ │ ├── ScheduledJobs.js
│ │ ├── RetryJobs.js
│ │ ├── index.js
│ │ ├── DeadJobs.test.js
│ │ ├── Processes.js
│ │ └── DeadJobs.js
│ │ ├── build
│ │ └── index.html
│ │ ├── webpack.config.js
│ │ ├── .eslintrc.json
│ │ ├── webpack.production.config.js
│ │ └── package.json
├── webui.go
└── webui_test.go
├── log.go
├── identifier_test.go
├── identifier.go
├── time.go
├── DEVELOPING.md
├── todo.txt
├── LICENSE
├── go.mod
├── cmd
├── workenqueue
│ └── main.go
├── workwebui
│ └── main.go
└── workfakedata
│ └── main.go
├── heartbeater_test.go
├── run.go
├── priority_sampler_test.go
├── requeuer_test.go
├── priority_sampler.go
├── requeuer.go
├── benches
├── bench_goworker
│ └── main.go
├── bench_work
│ └── main.go
├── bench_jobs
│ └── main.go
└── bench_goworkers
│ └── main.go
├── heartbeater.go
├── observer_test.go
├── periodic_enqueuer.go
├── run_test.go
├── periodic_enqueuer_test.go
├── go.sum
├── dead_pool_reaper.go
├── job.go
├── job_test.go
├── observer.go
├── enqueue.go
├── worker_pool_test.go
├── worker.go
├── dead_pool_reaper_test.go
├── redis.go
└── enqueue_test.go
/webui/internal/assets/.gitignore:
--------------------------------------------------------------------------------
1 | node_modules/
2 | coverage/
3 |
--------------------------------------------------------------------------------
/webui/internal/assets/.babelrc:
--------------------------------------------------------------------------------
1 | {
2 | "presets": ["es2015", "stage-0", "react"]
3 | }
4 |
--------------------------------------------------------------------------------
/webui/internal/assets/assets_generate.go:
--------------------------------------------------------------------------------
1 | package assets
2 |
3 | //go:generate go-bindata -prefix build -o assets.go -pkg assets build
4 |
--------------------------------------------------------------------------------
/log.go:
--------------------------------------------------------------------------------
1 | package work
2 |
3 | import "fmt"
4 |
5 | func logError(key string, err error) {
6 | fmt.Printf("ERROR: %s - %s\n", key, err.Error())
7 | }
8 |
--------------------------------------------------------------------------------
/webui/internal/assets/src/TestSetup.js:
--------------------------------------------------------------------------------
1 | import 'jsdom-global/register';
2 | import { configure } from 'enzyme';
3 | import Adapter from 'enzyme-adapter-react-16';
4 |
5 | configure({ adapter: new Adapter() });
--------------------------------------------------------------------------------
/webui/internal/assets/src/cx.js:
--------------------------------------------------------------------------------
1 |
2 | // react only supports className as a string.
3 | // See https://github.com/facebook/react/pull/1198.
4 | export default (...classNames) => {
5 | return classNames.join(' ');
6 | };
7 |
--------------------------------------------------------------------------------
/identifier_test.go:
--------------------------------------------------------------------------------
1 | package work
2 |
3 | import "testing"
4 |
5 | func TestMakeIdentifier(t *testing.T) {
6 | id := makeIdentifier()
7 | if len(id) < 10 {
8 | t.Errorf("expected a string of length 10 at least")
9 | }
10 | }
11 |
--------------------------------------------------------------------------------
/identifier.go:
--------------------------------------------------------------------------------
1 | package work
2 |
3 | import (
4 | "crypto/rand"
5 | "fmt"
6 | "io"
7 | )
8 |
9 | func makeIdentifier() string {
10 | b := make([]byte, 12)
11 | _, err := io.ReadFull(rand.Reader, b)
12 | if err != nil {
13 | return ""
14 | }
15 | return fmt.Sprintf("%x", b)
16 | }
17 |
--------------------------------------------------------------------------------
/webui/internal/assets/build/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
--------------------------------------------------------------------------------
/webui/internal/assets/src/ShortList.css:
--------------------------------------------------------------------------------
1 | .ul {
2 | display: inline;
3 | list-style: none;
4 | padding: 0;
5 | }
6 |
7 | .li {
8 | display: inline;
9 | }
10 |
11 | .li:after {
12 | content: ", ";
13 | }
14 |
15 | .li:last-child:before {
16 | content: "and "
17 | }
18 |
19 | .li:last-child:after {
20 | content: "";
21 | }
22 |
--------------------------------------------------------------------------------
/time.go:
--------------------------------------------------------------------------------
1 | package work
2 |
3 | import "time"
4 |
5 | var nowMock int64
6 |
7 | func nowEpochSeconds() int64 {
8 | if nowMock != 0 {
9 | return nowMock
10 | }
11 | return time.Now().Unix()
12 | }
13 |
14 | func setNowEpochSecondsMock(t int64) {
15 | nowMock = t
16 | }
17 |
18 | func resetNowEpochSecondsMock() {
19 | nowMock = 0
20 | }
21 |
22 | // convert epoch seconds to a time
23 | func epochSecondsToTime(t int64) time.Time {
24 | return time.Time{}
25 | }
26 |
--------------------------------------------------------------------------------
/webui/internal/assets/src/UnixTime.js:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import PropTypes from 'prop-types';
3 |
4 | export default class UnixTime extends React.Component {
5 | static propTypes = {
6 | ts: PropTypes.number.isRequired,
7 | }
8 |
9 | render() {
10 | let t = new Date(this.props.ts * 1e3);
11 | return (
12 |
13 | );
14 | }
15 | }
16 |
--------------------------------------------------------------------------------
/webui/internal/assets/src/UnixTime.test.js:
--------------------------------------------------------------------------------
1 | import './TestSetup';
2 | import expect from 'expect';
3 | import UnixTime from './UnixTime';
4 | import React from 'react';
5 | import { mount } from 'enzyme';
6 |
7 | describe('UnixTime', () => {
8 | it('formats human-readable time string', () => {
9 | let output = mount();
10 |
11 | let time = output.find('time');
12 | expect(time.props().dateTime).toEqual('2016-07-05T21:20:03.000Z');
13 | expect(time.text()).toEqual('2016/07/05 21:20:03');
14 | });
15 | });
16 |
--------------------------------------------------------------------------------
/webui/internal/assets/src/Queues.test.js:
--------------------------------------------------------------------------------
1 | import './TestSetup';
2 | import expect from 'expect';
3 | import Queues from './Queues';
4 | import React from 'react';
5 | import { mount } from 'enzyme';
6 |
7 | describe('Queues', () => {
8 | it('gets queued count', () => {
9 | let queues = mount();
10 | expect(queues.state().queues.length).toEqual(0);
11 |
12 | queues.setState({
13 | queues: [
14 | {job_name: 'test', count: 1, latency: 0},
15 | {job_name: 'test2', count: 2, latency: 0}
16 | ]
17 | });
18 |
19 | expect(queues.state().queues.length).toEqual(2);
20 | expect(queues.instance().queuedCount).toEqual(3);
21 | });
22 | });
23 |
--------------------------------------------------------------------------------
/webui/internal/assets/src/ShortList.test.js:
--------------------------------------------------------------------------------
1 | import './TestSetup';
2 | import expect from 'expect';
3 | import ShortList from './ShortList';
4 | import React from 'react';
5 | import { mount } from 'enzyme';
6 |
7 | describe('ShortList', () => {
8 | it('lists items', () => {
9 | let shortList = mount();
10 | let ul = shortList.find('ul');
11 |
12 | ul.props().children.map((el, i) => {
13 | expect(el.type).toEqual('li');
14 | if (i < 3) {
15 | expect(el.props.children).toEqual(String(i+1));
16 | } else {
17 | expect(el.props.children).toEqual([i-2, ' more']);
18 | }
19 | });
20 | });
21 | });
22 |
--------------------------------------------------------------------------------
/webui/internal/assets/src/TestUtils.js:
--------------------------------------------------------------------------------
1 |
2 | export let walkOutput = (root, walkFunc) => {
3 | if (root == undefined) {
4 | return;
5 | }
6 | if (Array.isArray(root)) {
7 | root.map((el) => {
8 | walkOutput(el, walkFunc);
9 | });
10 | return;
11 | }
12 |
13 | if (root.type) {
14 | walkFunc(root);
15 | walkOutput(root.props.children, walkFunc);
16 | }
17 | };
18 |
19 | export let findAllByTag = (root, tag) => {
20 | let found = [];
21 | walkOutput(root, (el) => {
22 | if (typeof el.type === 'function' && el.type.name == tag) {
23 | found.push(el);
24 | } else if (el.type === tag) {
25 | found.push(el);
26 | }
27 | });
28 | return found;
29 | };
30 |
--------------------------------------------------------------------------------
/webui/internal/assets/src/ShortList.js:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import PropTypes from 'prop-types';
3 | import styles from './ShortList.css';
4 |
5 | export default class ShortList extends React.Component {
6 | static propTypes = {
7 | item: PropTypes.arrayOf(PropTypes.string).isRequired,
8 | }
9 |
10 | render() {
11 | return (
12 |
13 | {
14 | this.props.item.map((item, i) => {
15 | if (i < 3) {
16 | return (- {item}
);
17 | } else if (i == 3) {
18 | return (- {this.props.item.length - 3} more
);
19 | }
20 | })
21 | }
22 |
23 | );
24 | }
25 | }
26 |
--------------------------------------------------------------------------------
/webui/internal/assets/webpack.config.js:
--------------------------------------------------------------------------------
1 | var path = require('path');
2 | var webpack = require('webpack');
3 |
4 | var BUILD_DIR = path.resolve(__dirname, 'build');
5 | var APP_DIR = path.resolve(__dirname, 'src');
6 |
7 | module.exports = {
8 | entry: APP_DIR + '/index.js',
9 | output: { path: BUILD_DIR, filename: 'work.js' },
10 | module: {
11 | loaders: [
12 | {
13 | test: /\.js$/,
14 | loader: 'babel-loader',
15 | exclude: /node_modules/,
16 | },
17 | {
18 | test: /\.css$/,
19 | loader: 'style-loader!css-loader?modules&camelCase&-url&localIdentName=[hash:base64:5]-[local]'
20 | },
21 | {
22 | test: /\.(png|woff|woff2|eot|ttf|svg)(\?[#a-z_]+)?$/,
23 | loader: 'url'
24 | }
25 | ]
26 | }
27 | };
28 |
29 |
--------------------------------------------------------------------------------
/DEVELOPING.md:
--------------------------------------------------------------------------------
1 | ## Web UI
2 |
3 | ```
4 | cd cmd/workwebui
5 | go run main.go
6 | open "http://localhost:5040/"
7 | ```
8 |
9 | ## Assets
10 |
11 | Web UI frontend is written in [react](https://facebook.github.io/react/). [Webpack](https://webpack.github.io/) is used to transpile and bundle es7 and jsx to run on modern browsers.
12 | Finally bundled js is embedded in a go file.
13 |
14 | All NPM commands can be found in `package.json`.
15 |
16 | - fetch dependency: `npm install`
17 | - test: `npm test`
18 | - generate test coverage: `npm run cover`
19 | - lint: `npm run lint`
20 | - bundle for production: `npm run build`
21 | - bundle for testing: `npm run dev`
22 |
23 | To embed bundled js, do
24 |
25 | ```
26 | go get -u github.com/jteeuwen/go-bindata/...
27 | cd webui/internal/assets
28 | go generate
29 | ```
30 |
--------------------------------------------------------------------------------
/webui/internal/assets/.eslintrc.json:
--------------------------------------------------------------------------------
1 | {
2 | "env": {
3 | "mocha": true,
4 | "browser": true,
5 | "es6": true
6 | },
7 | "extends": ["eslint:recommended", "plugin:react/recommended"],
8 | "parser": "babel-eslint",
9 | "parserOptions": {
10 | "ecmaVersion": 7,
11 | "ecmaFeatures": {
12 | "experimentalObjectRestSpread": true,
13 | "jsx": true
14 | },
15 | "sourceType": "module"
16 | },
17 | "plugins": [
18 | "react"
19 | ],
20 | "rules": {
21 | "indent": [
22 | "error",
23 | 2
24 | ],
25 | "linebreak-style": [
26 | "error",
27 | "unix"
28 | ],
29 | "quotes": [
30 | "error",
31 | "single"
32 | ],
33 | "semi": [
34 | "error",
35 | "always"
36 | ]
37 | }
38 | }
39 |
--------------------------------------------------------------------------------
/todo.txt:
--------------------------------------------------------------------------------
1 | IDEAS/TODO:
2 | ----
3 | - zero context each time -- see if that affects performance
4 | - benchmarks for memory allocations
5 | - benchmarks for runJob
6 | - investigate changing runJob to use a shared context, and zero'ing it's value each time
7 | - revisit the retry backoff
8 | - generally, look into process scalability. Eg, if we have 30 processes, each with concurrency=25, that's a lot of pinging redis
9 | - thought: what if we *scale up* to max workers if some are idle, should we shut them down?
10 | - thing we're guarding against: 100 goroutines all polling redis
11 | - alt: some clever mechanism to only check redis if we are busy?
12 | - is there some way to detect redis contention, or overall, just measure the latency of redis
13 | - both lock contention (not enuf redis pool)
14 | - enuf pool, but redis itself is overloaded
15 | - It could be cool to provide an API for that redis stuff.
16 | - latencies
17 | - lock contention
18 | - number of redis connections used by work
19 | - overall redis stuff: mem, avail, cxns
20 | - it might be nice to have an overall counter like sidekiq
21 |
--------------------------------------------------------------------------------
/webui/internal/assets/webpack.production.config.js:
--------------------------------------------------------------------------------
1 | var path = require('path');
2 | var webpack = require('webpack');
3 |
4 | var BUILD_DIR = path.resolve(__dirname, 'build');
5 | var APP_DIR = path.resolve(__dirname, 'src');
6 |
7 | module.exports = {
8 | entry: APP_DIR + '/index.js',
9 | output: { path: BUILD_DIR, filename: 'work.js' },
10 | plugins: [
11 | new webpack.DefinePlugin({
12 | 'process.env': {
13 | 'NODE_ENV': JSON.stringify('production')
14 | }
15 | }),
16 | new webpack.optimize.UglifyJsPlugin({
17 | compressor: {
18 | warnings: false
19 | },
20 | comments: false
21 | })
22 | ],
23 | module: {
24 | loaders: [
25 | {
26 | test: /\.js$/,
27 | loader: 'babel-loader',
28 | exclude: /node_modules/,
29 | },
30 | {
31 | test: /\.css$/,
32 | loader: 'style-loader!css-loader?modules&camelCase&-url&localIdentName=[hash:base64:5]-[local]'
33 | },
34 | {
35 | test: /\.(png|woff|woff2|eot|ttf|svg)(\?[#a-z_]+)?$/,
36 | loader: 'url'
37 | }
38 | ]
39 | }
40 | };
41 |
42 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | The MIT License (MIT)
2 |
3 | Copyright (c) 2016 Jonathan Novak
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy of
6 | this software and associated documentation files (the "Software"), to deal in
7 | the Software without restriction, including without limitation the rights to
8 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
9 | the Software, and to permit persons to whom the Software is furnished to do so,
10 | subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
17 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
18 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
19 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
21 |
--------------------------------------------------------------------------------
/webui/internal/assets/src/PageList.test.js:
--------------------------------------------------------------------------------
1 | import './TestSetup';
2 | import expect from 'expect';
3 | import PageList from './PageList';
4 | import React from 'react';
5 | import { mount } from 'enzyme';
6 |
7 | describe('PageList', () => {
8 | it('lists pages', () => {
9 | let assertPage = (n, expected) => {
10 | let pageList = mount( () => {}} />);
11 | let ul = pageList.find('ul');
12 |
13 | expect(ul.props().children.map((el) => {
14 | expect(el.type).toEqual('li');
15 | return el.props.children.props.children;
16 | })).toEqual(expected);
17 | };
18 |
19 | assertPage(1, [1, 2, '..', 7]);
20 | assertPage(2, [1, 2, 3, '..', 7]);
21 | assertPage(3, [1, 2, 3, 4, '..', 7]);
22 | assertPage(4, [1, '..', 3, 4, 5, '..', 7]);
23 | assertPage(5, [1, '..', 4, 5, 6, 7]);
24 | assertPage(6, [1, '..', 5, 6, 7]);
25 | assertPage(7, [1, '..', 6, 7]);
26 | });
27 |
28 | it('renders nothing if there is nothing', () => {
29 | let pageList = mount( () => {}} />);
30 |
31 | expect(pageList.html()).toEqual(null);
32 | });
33 | });
34 |
--------------------------------------------------------------------------------
/webui/internal/assets/src/PageList.js:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import PropTypes from 'prop-types';
3 | import styles from './bootstrap.min.css';
4 |
5 | export default class PageList extends React.Component {
6 | static propTypes = {
7 | page: PropTypes.number.isRequired,
8 | perPage: PropTypes.number.isRequired,
9 | totalCount: PropTypes.number.isRequired,
10 | jumpTo: PropTypes.func.isRequired,
11 | }
12 |
13 | get totalPage() {
14 | return Math.ceil(this.props.totalCount / this.props.perPage);
15 | }
16 |
17 | shouldShow(i) {
18 | if (i == 1 || i == this.totalPage) {
19 | return true;
20 | }
21 | return Math.abs(this.props.page - i) <= 1;
22 | }
23 |
24 | render() {
25 | if (this.totalPage == 0) {
26 | return null;
27 | }
28 | let pages = [];
29 | for (let i = 1; i <= this.totalPage; i++) {
30 | if (i == this.props.page) {
31 | pages.push({i});
32 | } else if (this.shouldShow(i)) {
33 | pages.push({i});
34 | } else if (this.shouldShow(i-1)) {
35 | pages.push(..);
36 | }
37 | }
38 | return (
39 |
40 | );
41 | }
42 | }
43 |
--------------------------------------------------------------------------------
/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/gocraft/work
2 |
3 | go 1.14
4 |
5 | require (
6 | github.com/albrow/jobs v0.4.2
7 | github.com/benmanns/goworker v0.1.3
8 | github.com/bitly/go-simplejson v0.5.0 // indirect
9 | github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect
10 | github.com/braintree/manners v0.0.0-20160418043613-82a8879fc5fd
11 | github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect
12 | github.com/customerio/gospec v0.0.0-20130710230057-a5cc0e48aa39 // indirect
13 | github.com/dchest/uniuri v0.0.0-20200228104902-7aecb25e1fe5 // indirect
14 | github.com/dustin/go-humanize v1.0.0 // indirect
15 | github.com/garyburd/redigo v1.6.0 // indirect
16 | github.com/gocraft/health v0.0.0-20170925182251-8675af27fef0
17 | github.com/gocraft/web v0.0.0-20190207150652-9707327fb69b
18 | github.com/gocraft/work v0.5.1
19 | github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b // indirect
20 | github.com/gomodule/redigo v2.0.0+incompatible
21 | github.com/jrallison/go-workers v0.0.0-20180112190529-dbf81d0b75bb
22 | github.com/kr/pretty v0.2.0 // indirect
23 | github.com/orfjackal/nanospec.go v0.0.0-20120727230329-de4694c1d701 // indirect
24 | github.com/robfig/cron v1.2.0 // indirect
25 | github.com/robfig/cron/v3 v3.0.1
26 | github.com/stretchr/testify v1.5.1
27 | github.com/youtube/vitess v2.1.1+incompatible // indirect
28 | golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0 // indirect
29 | )
30 |
--------------------------------------------------------------------------------
/webui/internal/assets/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "private": true,
3 | "scripts": {
4 | "test": "mocha 'src/**/*.test.js' --require babel-register --require ignore-styles; exit 0",
5 | "dev": "webpack --progress",
6 | "build": "webpack --progress --config webpack.production.config.js",
7 | "lint": "eslint src; exit 0",
8 | "cover": "istanbul cover _mocha --root src/ --include-all-sources -x '**/*.test.js' -x 'index.js' -- 'src/**/*.test.js' --require babel-register --require ignore-styles; exit 0"
9 | },
10 | "devDependencies": {
11 | "babel-core": "latest",
12 | "babel-eslint": "latest",
13 | "babel-loader": "latest",
14 | "babel-preset-es2015": "latest",
15 | "babel-preset-react": "latest",
16 | "babel-preset-stage-0": "latest",
17 | "css-loader": "latest",
18 | "enzyme": "^3.2.0",
19 | "enzyme-adapter-react-16": "^1.1.0",
20 | "eslint": "latest",
21 | "eslint-plugin-react": "latest",
22 | "expect": "latest",
23 | "file-loader": "latest",
24 | "ignore-styles": "latest",
25 | "istanbul": "^1.1.0-alpha.1",
26 | "jsdom": "^11.5.1",
27 | "jsdom-global": "^3.0.2",
28 | "mocha": "latest",
29 | "react": "latest",
30 | "react-addons-test-utils": "latest",
31 | "react-dom": "latest",
32 | "react-router": "3.2.0",
33 | "react-shallow-renderer-helpers": "^2.0.2",
34 | "style-loader": "latest",
35 | "url-loader": "latest",
36 | "webpack": "latest"
37 | }
38 | }
39 |
--------------------------------------------------------------------------------
/cmd/workenqueue/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "encoding/json"
5 | "flag"
6 | "fmt"
7 | "os"
8 | "time"
9 |
10 | "github.com/gocraft/work"
11 | "github.com/gomodule/redigo/redis"
12 | )
13 |
14 | var redisHostPort = flag.String("redis", ":6379", "redis hostport")
15 | var redisNamespace = flag.String("ns", "work", "redis namespace")
16 | var jobName = flag.String("job", "", "job name")
17 | var jobArgs = flag.String("args", "{}", "job arguments")
18 |
19 | func main() {
20 | flag.Parse()
21 |
22 | if *jobName == "" {
23 | fmt.Println("no job specified")
24 | os.Exit(1)
25 | }
26 |
27 | pool := newPool(*redisHostPort)
28 |
29 | var args map[string]interface{}
30 | err := json.Unmarshal([]byte(*jobArgs), &args)
31 | if err != nil {
32 | fmt.Println("invalid args:", err)
33 | os.Exit(1)
34 | }
35 |
36 | en := work.NewEnqueuer(*redisNamespace, pool)
37 | en.Enqueue(*jobName, args)
38 | }
39 |
40 | func newPool(addr string) *redis.Pool {
41 | return &redis.Pool{
42 | MaxActive: 20,
43 | MaxIdle: 20,
44 | IdleTimeout: 240 * time.Second,
45 | Dial: func() (redis.Conn, error) {
46 | c, err := redis.Dial("tcp", addr)
47 | if err != nil {
48 | return nil, err
49 | }
50 | return c, nil
51 | //return redis.NewLoggingConn(c, log.New(os.Stdout, "", 0), "redis"), err
52 | },
53 | Wait: true,
54 | //TestOnBorrow: func(c redis.Conn, t time.Time) error {
55 | // _, err := c.Do("PING")
56 | // return err
57 | //},
58 | }
59 | }
60 |
--------------------------------------------------------------------------------
/heartbeater_test.go:
--------------------------------------------------------------------------------
1 | package work
2 |
3 | import (
4 | "testing"
5 | "time"
6 |
7 | "github.com/gomodule/redigo/redis"
8 | "github.com/stretchr/testify/assert"
9 | )
10 |
11 | func TestHeartbeater(t *testing.T) {
12 | pool := newTestPool(":6379")
13 | ns := "work"
14 |
15 | tMock := int64(1425263409)
16 | setNowEpochSecondsMock(tMock)
17 | defer resetNowEpochSecondsMock()
18 |
19 | jobTypes := map[string]*jobType{
20 | "foo": nil,
21 | "bar": nil,
22 | }
23 |
24 | heart := newWorkerPoolHeartbeater(ns, pool, "abcd", jobTypes, 10, []string{"ccc", "bbb"})
25 | heart.start()
26 |
27 | time.Sleep(20 * time.Millisecond)
28 |
29 | assert.True(t, redisInSet(pool, redisKeyWorkerPools(ns), "abcd"))
30 |
31 | h := readHash(pool, redisKeyHeartbeat(ns, "abcd"))
32 | assert.Equal(t, "1425263409", h["heartbeat_at"])
33 | assert.Equal(t, "1425263409", h["started_at"])
34 | assert.Equal(t, "bar,foo", h["job_names"])
35 | assert.Equal(t, "bbb,ccc", h["worker_ids"])
36 | assert.Equal(t, "10", h["concurrency"])
37 |
38 | assert.True(t, h["pid"] != "")
39 | assert.True(t, h["host"] != "")
40 |
41 | heart.stop()
42 |
43 | assert.False(t, redisInSet(pool, redisKeyWorkerPools(ns), "abcd"))
44 | }
45 |
46 | func redisInSet(pool *redis.Pool, key, member string) bool {
47 | conn := pool.Get()
48 | defer conn.Close()
49 |
50 | v, err := redis.Bool(conn.Do("SISMEMBER", key, member))
51 | if err != nil {
52 | panic("could not delete retry/dead queue: " + err.Error())
53 | }
54 | return v
55 | }
56 |
--------------------------------------------------------------------------------
/webui/internal/assets/src/RetryJobs.test.js:
--------------------------------------------------------------------------------
1 | import './TestSetup';
2 | import expect from 'expect';
3 | import RetryJobs from './RetryJobs';
4 | import React from 'react';
5 | import { mount } from 'enzyme';
6 |
7 | describe('RetryJobs', () => {
8 | it('shows jobs', () => {
9 | let retryJobs = mount();
10 |
11 | expect(retryJobs.state().jobs.length).toEqual(0);
12 |
13 | retryJobs.setState({
14 | count: 2,
15 | jobs: [
16 | {id: 1, name: 'test', args: {}, t: 1467760821, err: 'err1'},
17 | {id: 2, name: 'test2', args: {}, t: 1467760822, err: 'err2'}
18 | ]
19 | });
20 |
21 | expect(retryJobs.state().jobs.length).toEqual(2);
22 | });
23 |
24 | it('has pages', () => {
25 | let retryJobs = mount();
26 |
27 | let genJob = (n) => {
28 | let job = [];
29 | for (let i = 1; i <= n; i++) {
30 | job.push({
31 | id: i,
32 | name: 'test',
33 | args: {},
34 | t: 1467760821,
35 | err: 'err',
36 | });
37 | }
38 | return job;
39 | };
40 | retryJobs.setState({
41 | count: 21,
42 | jobs: genJob(21)
43 | });
44 |
45 | expect(retryJobs.state().jobs.length).toEqual(21);
46 | expect(retryJobs.state().page).toEqual(1);
47 |
48 | let pageList = retryJobs.find('PageList');
49 | expect(pageList.length).toEqual(1);
50 |
51 | pageList.at(0).props().jumpTo(2)();
52 | expect(retryJobs.state().page).toEqual(2);
53 | });
54 | });
55 |
--------------------------------------------------------------------------------
/webui/internal/assets/src/ScheduledJobs.test.js:
--------------------------------------------------------------------------------
1 | import './TestSetup';
2 | import expect from 'expect';
3 | import ScheduledJobs from './ScheduledJobs';
4 | import React from 'react';
5 | import { mount } from 'enzyme';
6 |
7 | describe('ScheduledJobs', () => {
8 | it('shows jobs', () => {
9 | let scheduledJobs = mount();
10 |
11 | expect(scheduledJobs.state().jobs.length).toEqual(0);
12 |
13 | scheduledJobs.setState({
14 | count: 2,
15 | jobs: [
16 | {id: 1, name: 'test', args: {}, run_at: 1467760821, err: 'err1'},
17 | {id: 2, name: 'test2', args: {}, run_at: 1467760822, err: 'err2'}
18 | ]
19 | });
20 |
21 | expect(scheduledJobs.state().jobs.length).toEqual(2);
22 | });
23 |
24 | it('has pages', () => {
25 | let scheduledJobs = mount();
26 |
27 | let genJob = (n) => {
28 | let job = [];
29 | for (let i = 1; i <= n; i++) {
30 | job.push({
31 | id: i,
32 | name: 'test',
33 | args: {},
34 | run_at: 1467760821,
35 | err: 'err',
36 | });
37 | }
38 | return job;
39 | };
40 | scheduledJobs.setState({
41 | count: 21,
42 | jobs: genJob(21)
43 | });
44 |
45 | expect(scheduledJobs.state().jobs.length).toEqual(21);
46 | expect(scheduledJobs.state().page).toEqual(1);
47 |
48 | let pageList = scheduledJobs.find('PageList');
49 | expect(pageList.length).toEqual(1);
50 |
51 | pageList.at(0).props().jumpTo(2)();
52 | expect(scheduledJobs.state().page).toEqual(2);
53 | });
54 | });
55 |
--------------------------------------------------------------------------------
/cmd/workwebui/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "flag"
5 | "fmt"
6 | "os"
7 | "os/signal"
8 | "strconv"
9 | "time"
10 |
11 | "github.com/gocraft/work/webui"
12 | "github.com/gomodule/redigo/redis"
13 | )
14 |
15 | var (
16 | redisHostPort = flag.String("redis", ":6379", "redis hostport")
17 | redisDatabase = flag.String("database", "0", "redis database")
18 | redisNamespace = flag.String("ns", "work", "redis namespace")
19 | webHostPort = flag.String("listen", ":5040", "hostport to listen for HTTP JSON API")
20 | )
21 |
22 | func main() {
23 | flag.Parse()
24 |
25 | fmt.Println("Starting workwebui:")
26 | fmt.Println("redis = ", *redisHostPort)
27 | fmt.Println("database = ", *redisDatabase)
28 | fmt.Println("namespace = ", *redisNamespace)
29 | fmt.Println("listen = ", *webHostPort)
30 |
31 | database, err := strconv.Atoi(*redisDatabase)
32 | if err != nil {
33 | fmt.Printf("Error: %v is not a valid database value", *redisDatabase)
34 | return
35 | }
36 |
37 | pool := newPool(*redisHostPort, database)
38 |
39 | server := webui.NewServer(*redisNamespace, pool, *webHostPort)
40 | server.Start()
41 |
42 | c := make(chan os.Signal, 1)
43 | signal.Notify(c, os.Interrupt, os.Kill)
44 |
45 | <-c
46 |
47 | server.Stop()
48 |
49 | fmt.Println("\nQuitting...")
50 | }
51 |
52 | func newPool(addr string, database int) *redis.Pool {
53 | return &redis.Pool{
54 | MaxActive: 3,
55 | MaxIdle: 3,
56 | IdleTimeout: 240 * time.Second,
57 | Dial: func() (redis.Conn, error) {
58 | return redis.DialURL(addr, redis.DialDatabase(database))
59 | },
60 | Wait: true,
61 | }
62 | }
63 |
--------------------------------------------------------------------------------
/run.go:
--------------------------------------------------------------------------------
1 | package work
2 |
3 | import (
4 | "fmt"
5 | "reflect"
6 | )
7 |
8 | // returns an error if the job fails, or there's a panic, or we couldn't reflect correctly.
9 | // if we return an error, it signals we want the job to be retried.
10 | func runJob(job *Job, ctxType reflect.Type, middleware []*middlewareHandler, jt *jobType) (returnCtx reflect.Value, returnError error) {
11 | returnCtx = reflect.New(ctxType)
12 | currentMiddleware := 0
13 | maxMiddleware := len(middleware)
14 |
15 | var next NextMiddlewareFunc
16 | next = func() error {
17 | if currentMiddleware < maxMiddleware {
18 | mw := middleware[currentMiddleware]
19 | currentMiddleware++
20 | if mw.IsGeneric {
21 | return mw.GenericMiddlewareHandler(job, next)
22 | }
23 | res := mw.DynamicMiddleware.Call([]reflect.Value{returnCtx, reflect.ValueOf(job), reflect.ValueOf(next)})
24 | x := res[0].Interface()
25 | if x == nil {
26 | return nil
27 | }
28 | return x.(error)
29 | }
30 | if jt.IsGeneric {
31 | return jt.GenericHandler(job)
32 | }
33 | res := jt.DynamicHandler.Call([]reflect.Value{returnCtx, reflect.ValueOf(job)})
34 | x := res[0].Interface()
35 | if x == nil {
36 | return nil
37 | }
38 | return x.(error)
39 | }
40 |
41 | defer func() {
42 | if panicErr := recover(); panicErr != nil {
43 | // err turns out to be interface{}, of actual type "runtime.errorCString"
44 | // Luckily, the err sprints nicely via fmt.
45 | errorishError := fmt.Errorf("%v", panicErr)
46 | logError("runJob.panic", errorishError)
47 | returnError = errorishError
48 | }
49 | }()
50 |
51 | returnError = next()
52 |
53 | return
54 | }
55 |
--------------------------------------------------------------------------------
/priority_sampler_test.go:
--------------------------------------------------------------------------------
1 | package work
2 |
3 | import (
4 | "fmt"
5 | "testing"
6 |
7 | "github.com/stretchr/testify/assert"
8 | )
9 |
10 | func TestPrioritySampler(t *testing.T) {
11 | ps := prioritySampler{}
12 |
13 | ps.add(5, "jobs.5", "jobsinprog.5", "jobspaused.5", "jobslock.5", "jobslockinfo.5", "jobsconcurrency.5")
14 | ps.add(2, "jobs.2a", "jobsinprog.2a", "jobspaused.2a", "jobslock.2a", "jobslockinfo.2a", "jobsconcurrency.2a")
15 | ps.add(1, "jobs.1b", "jobsinprog.1b", "jobspaused.1b", "jobslock.1b", "jobslockinfo.1b", "jobsconcurrency.1b")
16 |
17 | var c5 = 0
18 | var c2 = 0
19 | var c1 = 0
20 | var c1end = 0
21 | var total = 200
22 | for i := 0; i < total; i++ {
23 | ret := ps.sample()
24 | if ret[0].priority == 5 {
25 | c5++
26 | } else if ret[0].priority == 2 {
27 | c2++
28 | } else if ret[0].priority == 1 {
29 | c1++
30 | }
31 | if ret[2].priority == 1 {
32 | c1end++
33 | }
34 | }
35 |
36 | // make sure these numbers are roughly correct. note that probability is a thing.
37 | assert.True(t, c5 > (2*c2))
38 | assert.True(t, float64(c2) > (1.5*float64(c1)))
39 | assert.True(t, c1 >= (total/13), fmt.Sprintf("c1 = %d total = %d total/13=%d", c1, total, total/13))
40 | assert.True(t, float64(c1end) > (float64(total)*0.50))
41 | }
42 |
43 | func BenchmarkPrioritySampler(b *testing.B) {
44 | ps := prioritySampler{}
45 | for i := 0; i < 200; i++ {
46 | ps.add(uint(i)+1,
47 | "jobs."+fmt.Sprint(i),
48 | "jobsinprog."+fmt.Sprint(i),
49 | "jobspaused."+fmt.Sprint(i),
50 | "jobslock."+fmt.Sprint(i),
51 | "jobslockinfo."+fmt.Sprint(i),
52 | "jobsmaxconcurrency."+fmt.Sprint(i))
53 | }
54 |
55 | b.ResetTimer()
56 | for i := 0; i < b.N; i++ {
57 | ps.sample()
58 | }
59 | }
60 |
--------------------------------------------------------------------------------
/webui/internal/assets/src/Processes.test.js:
--------------------------------------------------------------------------------
1 | import './TestSetup';
2 | import expect from 'expect';
3 | import Processes from './Processes';
4 | import React from 'react';
5 | import { mount } from 'enzyme';
6 |
7 | describe('Processes', () => {
8 | it('shows workers', () => {
9 | let processes = mount();
10 |
11 | expect(processes.state().busyWorker.length).toEqual(0);
12 | expect(processes.state().workerPool.length).toEqual(0);
13 |
14 | processes.setState({
15 | busyWorker: [
16 | {
17 | worker_id: '2',
18 | job_name: 'job1',
19 | started_at: 1467753603,
20 | checkin_at: 1467753603,
21 | checkin: '123',
22 | args_json: '{}'
23 | }
24 | ],
25 | workerPool: [
26 | {
27 | worker_pool_id: '1',
28 | started_at: 1467753603,
29 | heartbeat_at: 1467753603,
30 | job_names: ['job1', 'job2', 'job3', 'job4'],
31 | concurrency: 10,
32 | host: 'web51',
33 | pid: 123,
34 | worker_ids: [
35 | '1', '2', '3'
36 | ]
37 | }
38 | ]
39 | });
40 |
41 | expect(processes.state().busyWorker.length).toEqual(1);
42 | expect(processes.state().workerPool.length).toEqual(1);
43 | expect(processes.instance().workerCount).toEqual(3);
44 |
45 | const expectedBusyWorker = [ { args_json: '{}', checkin: '123', checkin_at: 1467753603, job_name: 'job1', started_at: 1467753603, worker_id: '2' } ];
46 |
47 | let busyWorkers = processes.find('BusyWorkers');
48 | expect(busyWorkers.length).toEqual(1);
49 | expect(busyWorkers.at(0).props().worker).toEqual(expectedBusyWorker);
50 | expect(processes.instance().getBusyPoolWorker(processes.state().workerPool[0])).toEqual(expectedBusyWorker);
51 | });
52 | });
53 |
--------------------------------------------------------------------------------
/webui/internal/assets/src/Queues.js:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import PropTypes from 'prop-types';
3 | import styles from './bootstrap.min.css';
4 | import cx from './cx';
5 |
6 | export default class Queues extends React.Component {
7 | static propTypes = {
8 | url: PropTypes.string,
9 | }
10 |
11 | state = {
12 | queues: []
13 | }
14 |
15 | componentWillMount() {
16 | if (!this.props.url) {
17 | return;
18 | }
19 | fetch(this.props.url).
20 | then((resp) => resp.json()).
21 | then((data) => {
22 | this.setState({queues: data});
23 | });
24 | }
25 |
26 | get queuedCount() {
27 | let count = 0;
28 | this.state.queues.map((queue) => {
29 | count += queue.count;
30 | });
31 | return count;
32 | }
33 |
34 | render() {
35 | return (
36 |
37 |
queues
38 |
39 |
{this.state.queues.length} queue(s) with a total of {this.queuedCount} item(s) queued.
40 |
41 |
42 |
43 |
44 |
45 | | Name |
46 | Count |
47 | Latency (seconds) |
48 |
49 | {
50 | this.state.queues.map((queue) => {
51 | return (
52 |
53 | | {queue.job_name} |
54 | {queue.count} |
55 | {queue.latency} |
56 |
57 | );
58 | })
59 | }
60 |
61 |
62 |
63 |
64 | );
65 | }
66 | }
67 |
--------------------------------------------------------------------------------
/webui/internal/assets/src/ScheduledJobs.js:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import PropTypes from 'prop-types';
3 | import PageList from './PageList';
4 | import UnixTime from './UnixTime';
5 | import styles from './bootstrap.min.css';
6 | import cx from './cx';
7 |
8 | export default class ScheduledJobs extends React.Component {
9 | static propTypes = {
10 | url: PropTypes.string,
11 | }
12 |
13 | state = {
14 | page: 1,
15 | count: 0,
16 | jobs: []
17 | }
18 |
19 | fetch() {
20 | if (!this.props.url) {
21 | return;
22 | }
23 | fetch(`${this.props.url}?page=${this.state.page}`).
24 | then((resp) => resp.json()).
25 | then((data) => {
26 | this.setState({
27 | count: data.count,
28 | jobs: data.jobs
29 | });
30 | });
31 | }
32 |
33 | componentWillMount() {
34 | this.fetch();
35 | }
36 |
37 | updatePage(page) {
38 | this.setState({page: page}, this.fetch);
39 | }
40 |
41 | render() {
42 | return (
43 |
44 |
Scheduled Jobs
45 |
46 |
{this.state.count} job(s) scheduled.
47 |
() => this.updatePage(page)}/>
48 |
49 |
50 |
51 |
52 |
53 | | Name |
54 | Arguments |
55 | Scheduled For |
56 |
57 | {
58 | this.state.jobs.map((job) => {
59 | return (
60 |
61 | | {job.name} |
62 | {JSON.stringify(job.args)} |
63 | |
64 |
65 | );
66 | })
67 | }
68 |
69 |
70 |
71 |
72 | );
73 | }
74 | }
75 |
--------------------------------------------------------------------------------
/webui/internal/assets/src/RetryJobs.js:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import PropTypes from 'prop-types';
3 | import PageList from './PageList';
4 | import UnixTime from './UnixTime';
5 | import styles from './bootstrap.min.css';
6 | import cx from './cx';
7 |
8 | export default class RetryJobs extends React.Component {
9 | static propTypes = {
10 | url: PropTypes.string,
11 | }
12 |
13 | state = {
14 | page: 1,
15 | count: 0,
16 | jobs: []
17 | }
18 |
19 | fetch() {
20 | if (!this.props.url) {
21 | return;
22 | }
23 | fetch(`${this.props.url}?page=${this.state.page}`).
24 | then((resp) => resp.json()).
25 | then((data) => {
26 | this.setState({
27 | count: data.count,
28 | jobs: data.jobs
29 | });
30 | });
31 | }
32 |
33 | componentWillMount() {
34 | this.fetch();
35 | }
36 |
37 | updatePage(page) {
38 | this.setState({page: page}, this.fetch);
39 | }
40 |
41 | render() {
42 | return (
43 |
44 |
Retry Jobs
45 |
46 |
{this.state.count} job(s) scheduled to be retried.
47 |
() => this.updatePage(page)}/>
48 |
49 |
50 |
51 |
52 |
53 | | Name |
54 | Arguments |
55 | Error |
56 | Retry At |
57 |
58 | {
59 | this.state.jobs.map((job) => {
60 | return (
61 |
62 | | {job.name} |
63 | {JSON.stringify(job.args)} |
64 | {job.err} |
65 | |
66 |
67 | );
68 | })
69 | }
70 |
71 |
72 |
73 |
74 | );
75 | }
76 | }
77 |
--------------------------------------------------------------------------------
/cmd/workfakedata/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "flag"
5 | "fmt"
6 | "math/rand"
7 | "time"
8 |
9 | "github.com/gocraft/work"
10 | "github.com/gomodule/redigo/redis"
11 | )
12 |
13 | var redisHostPort = flag.String("redis", ":6379", "redis hostport")
14 | var redisNamespace = flag.String("ns", "work", "redis namespace")
15 |
16 | func epsilonHandler(job *work.Job) error {
17 | fmt.Println("epsilon")
18 | time.Sleep(time.Second)
19 |
20 | if rand.Intn(2) == 0 {
21 | return fmt.Errorf("random error")
22 | }
23 | return nil
24 | }
25 |
26 | type context struct{}
27 |
28 | func main() {
29 | flag.Parse()
30 | fmt.Println("Installing some fake data")
31 |
32 | pool := newPool(*redisHostPort)
33 | cleanKeyspace(pool, *redisNamespace)
34 |
35 | // Enqueue some jobs:
36 | go func() {
37 | conn := pool.Get()
38 | defer conn.Close()
39 | conn.Do("SADD", *redisNamespace+":known_jobs", "foobar")
40 | }()
41 |
42 | go func() {
43 | for {
44 | en := work.NewEnqueuer(*redisNamespace, pool)
45 | for i := 0; i < 20; i++ {
46 | en.Enqueue("foobar", work.Q{"i": i})
47 | }
48 |
49 | time.Sleep(1 * time.Second)
50 | }
51 | }()
52 |
53 | wp := work.NewWorkerPool(context{}, 5, *redisNamespace, pool)
54 | wp.Job("foobar", epsilonHandler)
55 | wp.Start()
56 |
57 | select {}
58 | }
59 |
60 | func newPool(addr string) *redis.Pool {
61 | return &redis.Pool{
62 | MaxActive: 20,
63 | MaxIdle: 20,
64 | IdleTimeout: 240 * time.Second,
65 | Dial: func() (redis.Conn, error) {
66 | c, err := redis.Dial("tcp", addr)
67 | if err != nil {
68 | return nil, err
69 | }
70 | return c, nil
71 | //return redis.NewLoggingConn(c, log.New(os.Stdout, "", 0), "redis"), err
72 | },
73 | Wait: true,
74 | //TestOnBorrow: func(c redis.Conn, t time.Time) error {
75 | // _, err := c.Do("PING")
76 | // return err
77 | //},
78 | }
79 | }
80 |
81 | func cleanKeyspace(pool *redis.Pool, namespace string) {
82 | conn := pool.Get()
83 | defer conn.Close()
84 |
85 | keys, err := redis.Strings(conn.Do("KEYS", namespace+"*"))
86 | if err != nil {
87 | panic("could not get keys: " + err.Error())
88 | }
89 | for _, k := range keys {
90 | if _, err := conn.Do("DEL", k); err != nil {
91 | panic("could not del: " + err.Error())
92 | }
93 | }
94 | }
95 |
--------------------------------------------------------------------------------
/requeuer_test.go:
--------------------------------------------------------------------------------
1 | package work
2 |
3 | import (
4 | "testing"
5 |
6 | "github.com/stretchr/testify/assert"
7 | )
8 |
9 | func TestRequeue(t *testing.T) {
10 | pool := newTestPool(":6379")
11 | ns := "work"
12 | cleanKeyspace(ns, pool)
13 |
14 | tMock := nowEpochSeconds() - 10
15 | setNowEpochSecondsMock(tMock)
16 | defer resetNowEpochSecondsMock()
17 |
18 | enqueuer := NewEnqueuer(ns, pool)
19 | _, err := enqueuer.EnqueueIn("wat", -9, nil)
20 | assert.NoError(t, err)
21 | _, err = enqueuer.EnqueueIn("wat", -9, nil)
22 | assert.NoError(t, err)
23 | _, err = enqueuer.EnqueueIn("foo", 10, nil)
24 | assert.NoError(t, err)
25 | _, err = enqueuer.EnqueueIn("foo", 14, nil)
26 | assert.NoError(t, err)
27 | _, err = enqueuer.EnqueueIn("bar", 19, nil)
28 | assert.NoError(t, err)
29 |
30 | resetNowEpochSecondsMock()
31 |
32 | re := newRequeuer(ns, pool, redisKeyScheduled(ns), []string{"wat", "foo", "bar"})
33 | re.start()
34 | re.drain()
35 | re.stop()
36 |
37 | assert.EqualValues(t, 2, listSize(pool, redisKeyJobs(ns, "wat")))
38 | assert.EqualValues(t, 1, listSize(pool, redisKeyJobs(ns, "foo")))
39 | assert.EqualValues(t, 0, listSize(pool, redisKeyJobs(ns, "bar")))
40 | assert.EqualValues(t, 2, zsetSize(pool, redisKeyScheduled(ns)))
41 |
42 | j := jobOnQueue(pool, redisKeyJobs(ns, "foo"))
43 | assert.Equal(t, j.Name, "foo")
44 |
45 | // Because we mocked time to 10 seconds ago above, the job was put on the zset with t=10 secs ago
46 | // We want to ensure it's requeued with t=now.
47 | // On boundary conditions with the VM, nowEpochSeconds() might be 1 or 2 secs ahead of EnqueuedAt
48 | assert.True(t, (j.EnqueuedAt+2) >= nowEpochSeconds())
49 |
50 | }
51 |
52 | func TestRequeueUnknown(t *testing.T) {
53 | pool := newTestPool(":6379")
54 | ns := "work"
55 | cleanKeyspace(ns, pool)
56 |
57 | tMock := nowEpochSeconds() - 10
58 | setNowEpochSecondsMock(tMock)
59 | defer resetNowEpochSecondsMock()
60 |
61 | enqueuer := NewEnqueuer(ns, pool)
62 | _, err := enqueuer.EnqueueIn("wat", -9, nil)
63 | assert.NoError(t, err)
64 |
65 | nowish := nowEpochSeconds()
66 | setNowEpochSecondsMock(nowish)
67 |
68 | re := newRequeuer(ns, pool, redisKeyScheduled(ns), []string{"bar"})
69 | re.start()
70 | re.drain()
71 | re.stop()
72 |
73 | assert.EqualValues(t, 0, zsetSize(pool, redisKeyScheduled(ns)))
74 | assert.EqualValues(t, 1, zsetSize(pool, redisKeyDead(ns)))
75 |
76 | rank, job := jobOnZset(pool, redisKeyDead(ns))
77 |
78 | assert.Equal(t, nowish, rank)
79 | assert.Equal(t, nowish, job.FailedAt)
80 | assert.Equal(t, "unknown job when requeueing", job.LastErr)
81 | }
82 |
--------------------------------------------------------------------------------
/priority_sampler.go:
--------------------------------------------------------------------------------
1 | package work
2 |
3 | import (
4 | "math/rand"
5 | )
6 |
7 | type prioritySampler struct {
8 | sum uint
9 | samples []sampleItem
10 | }
11 |
12 | type sampleItem struct {
13 | priority uint
14 |
15 | // payload:
16 | redisJobs string
17 | redisJobsInProg string
18 | redisJobsPaused string
19 | redisJobsLock string
20 | redisJobsLockInfo string
21 | redisJobsMaxConcurrency string
22 | }
23 |
24 | func (s *prioritySampler) add(priority uint, redisJobs, redisJobsInProg, redisJobsPaused, redisJobsLock, redisJobsLockInfo, redisJobsMaxConcurrency string) {
25 | sample := sampleItem{
26 | priority: priority,
27 | redisJobs: redisJobs,
28 | redisJobsInProg: redisJobsInProg,
29 | redisJobsPaused: redisJobsPaused,
30 | redisJobsLock: redisJobsLock,
31 | redisJobsLockInfo: redisJobsLockInfo,
32 | redisJobsMaxConcurrency: redisJobsMaxConcurrency,
33 | }
34 | s.samples = append(s.samples, sample)
35 | s.sum += priority
36 | }
37 |
38 | // sample re-sorts s.samples, modifying it in-place. Higher weighted things will tend to go towards the beginning.
39 | // NOTE: as written currently makes 0 allocations.
40 | // NOTE2: this is an O(n^2 algorithm) that is:
41 | // 5492ns for 50 jobs (50 is a large number of unique jobs in my experience)
42 | // 54966ns for 200 jobs
43 | // ~1ms for 1000 jobs
44 | // ~4ms for 2000 jobs
45 | func (s *prioritySampler) sample() []sampleItem {
46 | lenSamples := len(s.samples)
47 | remaining := lenSamples
48 | sumRemaining := s.sum
49 | lastValidIdx := 0
50 |
51 | // Algorithm is as follows:
52 | // Loop until we sort everything. We're going to sort it in-place, probabilistically moving the highest weights to the front of the slice.
53 | // Pick a random number
54 | // Move backwards through the slice on each iteration,
55 | // and see where the random number fits in the continuum.
56 | // If we find where it fits, sort the item to the next slot towards the front of the slice.
57 | for remaining > 1 {
58 | // rn from [0 to sumRemaining)
59 | rn := uint(rand.Uint32()) % sumRemaining
60 |
61 | prevSum := uint(0)
62 | for i := lenSamples - 1; i >= lastValidIdx; i-- {
63 | sample := s.samples[i]
64 | if rn < (sample.priority + prevSum) {
65 | // move the sample to the beginning
66 | s.samples[i], s.samples[lastValidIdx] = s.samples[lastValidIdx], s.samples[i]
67 |
68 | sumRemaining -= sample.priority
69 | break
70 | } else {
71 | prevSum += sample.priority
72 | }
73 | }
74 |
75 | lastValidIdx++
76 | remaining--
77 | }
78 |
79 | return s.samples
80 | }
81 |
--------------------------------------------------------------------------------
/webui/internal/assets/src/index.js:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import PropTypes from 'prop-types';
3 | import { render } from 'react-dom';
4 | import Processes from './Processes';
5 | import DeadJobs from './DeadJobs';
6 | import Queues from './Queues';
7 | import RetryJobs from './RetryJobs';
8 | import ScheduledJobs from './ScheduledJobs';
9 | import { Router, Route, Link, IndexRedirect, hashHistory } from 'react-router';
10 | import styles from './bootstrap.min.css';
11 | import cx from './cx';
12 |
13 | class App extends React.Component {
14 | static propTypes = {
15 | children: PropTypes.element.isRequired,
16 | }
17 |
18 | render() {
19 | return (
20 |
21 |
22 |
23 |
24 |
25 | {this.props.children}
26 |
27 |
38 |
39 |
40 | );
41 | }
42 | }
43 |
44 | // react-router's route cannot be used to specify props to children component.
45 | // See https://github.com/reactjs/react-router/issues/1857.
46 | render(
47 |
48 |
49 | } />
50 | } />
51 | } />
52 | } />
53 |
54 |
61 | } />
62 |
63 |
64 | ,
65 | document.getElementById('app')
66 | );
67 |
--------------------------------------------------------------------------------
/requeuer.go:
--------------------------------------------------------------------------------
1 | package work
2 |
3 | import (
4 | "fmt"
5 | "time"
6 |
7 | "github.com/gomodule/redigo/redis"
8 | )
9 |
10 | type requeuer struct {
11 | namespace string
12 | pool *redis.Pool
13 |
14 | redisRequeueScript *redis.Script
15 | redisRequeueArgs []interface{}
16 |
17 | stopChan chan struct{}
18 | doneStoppingChan chan struct{}
19 |
20 | drainChan chan struct{}
21 | doneDrainingChan chan struct{}
22 | }
23 |
24 | func newRequeuer(namespace string, pool *redis.Pool, requeueKey string, jobNames []string) *requeuer {
25 | args := make([]interface{}, 0, len(jobNames)+2+2)
26 | args = append(args, requeueKey) // KEY[1]
27 | args = append(args, redisKeyDead(namespace)) // KEY[2]
28 | for _, jobName := range jobNames {
29 | args = append(args, redisKeyJobs(namespace, jobName)) // KEY[3, 4, ...]
30 | }
31 | args = append(args, redisKeyJobsPrefix(namespace)) // ARGV[1]
32 | args = append(args, 0) // ARGV[2] -- NOTE: We're going to change this one on every call
33 |
34 | return &requeuer{
35 | namespace: namespace,
36 | pool: pool,
37 |
38 | redisRequeueScript: redis.NewScript(len(jobNames)+2, redisLuaZremLpushCmd),
39 | redisRequeueArgs: args,
40 |
41 | stopChan: make(chan struct{}),
42 | doneStoppingChan: make(chan struct{}),
43 |
44 | drainChan: make(chan struct{}),
45 | doneDrainingChan: make(chan struct{}),
46 | }
47 | }
48 |
49 | func (r *requeuer) start() {
50 | go r.loop()
51 | }
52 |
53 | func (r *requeuer) stop() {
54 | r.stopChan <- struct{}{}
55 | <-r.doneStoppingChan
56 | }
57 |
58 | func (r *requeuer) drain() {
59 | r.drainChan <- struct{}{}
60 | <-r.doneDrainingChan
61 | }
62 |
63 | func (r *requeuer) loop() {
64 | // Just do this simple thing for now.
65 | // If we have 100 processes all running requeuers,
66 | // there's probably too much hitting redis.
67 | // So later on we'l have to implement exponential backoff
68 | ticker := time.Tick(1000 * time.Millisecond)
69 |
70 | for {
71 | select {
72 | case <-r.stopChan:
73 | r.doneStoppingChan <- struct{}{}
74 | return
75 | case <-r.drainChan:
76 | for r.process() {
77 | }
78 | r.doneDrainingChan <- struct{}{}
79 | case <-ticker:
80 | for r.process() {
81 | }
82 | }
83 | }
84 | }
85 |
86 | func (r *requeuer) process() bool {
87 | conn := r.pool.Get()
88 | defer conn.Close()
89 |
90 | r.redisRequeueArgs[len(r.redisRequeueArgs)-1] = nowEpochSeconds()
91 |
92 | res, err := redis.String(r.redisRequeueScript.Do(conn, r.redisRequeueArgs...))
93 | if err == redis.ErrNil {
94 | return false
95 | } else if err != nil {
96 | logError("requeuer.process", err)
97 | return false
98 | }
99 |
100 | if res == "" {
101 | return false
102 | } else if res == "dead" {
103 | logError("requeuer.process.dead", fmt.Errorf("no job name"))
104 | return true
105 | } else if res == "ok" {
106 | return true
107 | }
108 |
109 | return false
110 | }
111 |
--------------------------------------------------------------------------------
/benches/bench_goworker/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "os"
6 | "sync/atomic"
7 | "time"
8 |
9 | "github.com/benmanns/goworker"
10 | "github.com/gocraft/health"
11 | "github.com/gomodule/redigo/redis"
12 | )
13 |
14 | func myJob(queue string, args ...interface{}) error {
15 | atomic.AddInt64(&totcount, 1)
16 | //fmt.Println("job! ", queue)
17 | return nil
18 | }
19 |
20 | var namespace = "bench_test"
21 | var pool = newPool(":6379")
22 |
23 | // go run *.go -queues="myqueue,myqueue2,myqueue3,myqueue4,myqueue5" -namespace="bench_test:" -concurrency=50 -use-nuber
24 | func main() {
25 |
26 | stream := health.NewStream().AddSink(&health.WriterSink{os.Stdout})
27 | stream.Event("wat")
28 | cleanKeyspace()
29 |
30 | queues := []string{"myqueue", "myqueue2", "myqueue3", "myqueue4", "myqueue5"}
31 | numJobs := 100000 / len(queues)
32 |
33 | job := stream.NewJob("enqueue_all")
34 | for _, q := range queues {
35 | enqueueJobs(q, numJobs)
36 | }
37 | job.Complete(health.Success)
38 |
39 | goworker.Register("MyClass", myJob)
40 |
41 | go monitor()
42 |
43 | // Blocks until process is told to exit via unix signal
44 | goworker.Work()
45 | }
46 |
47 | var totcount int64
48 |
49 | func monitor() {
50 | t := time.Tick(1 * time.Second)
51 |
52 | curT := 0
53 | c1 := int64(0)
54 | c2 := int64(0)
55 | prev := int64(0)
56 |
57 | DALOOP:
58 | for {
59 | select {
60 | case <-t:
61 | curT++
62 | v := atomic.AddInt64(&totcount, 0)
63 | fmt.Printf("after %d seconds, count is %d\n", curT, v)
64 | if curT == 1 {
65 | c1 = v
66 | } else if curT == 3 {
67 | c2 = v
68 | }
69 | if v == prev {
70 | break DALOOP
71 | }
72 | prev = v
73 | }
74 | }
75 | fmt.Println("Jobs/sec: ", float64(c2-c1)/2.0)
76 | os.Exit(0)
77 | }
78 |
79 | func enqueueJobs(queue string, count int) {
80 | conn := pool.Get()
81 | defer conn.Close()
82 |
83 | for i := 0; i < count; i++ {
84 | //workers.Enqueue(queue, "Foo", []int{i})
85 | conn.Do("RPUSH", "bench_test:queue:"+queue, `{"class":"MyClass","args":[]}`)
86 | }
87 | }
88 |
89 | func cleanKeyspace() {
90 | conn := pool.Get()
91 | defer conn.Close()
92 |
93 | keys, err := redis.Strings(conn.Do("KEYS", namespace+"*"))
94 | if err != nil {
95 | panic("could not get keys: " + err.Error())
96 | }
97 | for _, k := range keys {
98 | //fmt.Println("deleting ", k)
99 | if _, err := conn.Do("DEL", k); err != nil {
100 | panic("could not del: " + err.Error())
101 | }
102 | }
103 | }
104 |
105 | func newPool(addr string) *redis.Pool {
106 | return &redis.Pool{
107 | MaxActive: 3,
108 | MaxIdle: 3,
109 | IdleTimeout: 240 * time.Second,
110 | Dial: func() (redis.Conn, error) {
111 | c, err := redis.Dial("tcp", addr)
112 | if err != nil {
113 | return nil, err
114 | }
115 | return c, nil
116 | //return redis.NewLoggingConn(c, log.New(os.Stdout, "", 0), "redis"), err
117 | },
118 | Wait: true,
119 | //TestOnBorrow: func(c redis.Conn, t time.Time) error {
120 | // _, err := c.Do("PING")
121 | // return err
122 | //},
123 | }
124 | }
125 |
--------------------------------------------------------------------------------
/benches/bench_work/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "os"
6 | "sync/atomic"
7 | "time"
8 |
9 | "github.com/gocraft/health"
10 | "github.com/gocraft/work"
11 | "github.com/gomodule/redigo/redis"
12 | )
13 |
14 | var namespace = "bench_test"
15 | var pool = newPool(":6379")
16 |
17 | type context struct{}
18 |
19 | func epsilonHandler(job *work.Job) error {
20 | //fmt.Println("hi")
21 | //a := job.Args[0]
22 | //fmt.Printf("job: %s arg: %v\n", job.Name, a)
23 | atomic.AddInt64(&totcount, 1)
24 | return nil
25 | }
26 |
27 | func main() {
28 | stream := health.NewStream().AddSink(&health.WriterSink{os.Stdout})
29 | cleanKeyspace()
30 |
31 | numJobs := 10
32 | jobNames := []string{}
33 |
34 | for i := 0; i < numJobs; i++ {
35 | jobNames = append(jobNames, fmt.Sprintf("job%d", i))
36 | }
37 |
38 | job := stream.NewJob("enqueue_all")
39 | enqueueJobs(jobNames, 10000)
40 | job.Complete(health.Success)
41 |
42 | workerPool := work.NewWorkerPool(context{}, 20, namespace, pool)
43 | for _, jobName := range jobNames {
44 | workerPool.Job(jobName, epsilonHandler)
45 | }
46 | go monitor()
47 |
48 | job = stream.NewJob("run_all")
49 | workerPool.Start()
50 | workerPool.Drain()
51 | job.Complete(health.Success)
52 | select {}
53 | }
54 |
55 | var totcount int64
56 |
57 | func monitor() {
58 | t := time.Tick(1 * time.Second)
59 |
60 | curT := 0
61 | c1 := int64(0)
62 | c2 := int64(0)
63 | prev := int64(0)
64 |
65 | DALOOP:
66 | for {
67 | select {
68 | case <-t:
69 | curT++
70 | v := atomic.AddInt64(&totcount, 0)
71 | fmt.Printf("after %d seconds, count is %d\n", curT, v)
72 | if curT == 1 {
73 | c1 = v
74 | } else if curT == 3 {
75 | c2 = v
76 | }
77 | if v == prev {
78 | break DALOOP
79 | }
80 | prev = v
81 | }
82 | }
83 | fmt.Println("Jobs/sec: ", float64(c2-c1)/2.0)
84 | os.Exit(0)
85 | }
86 |
87 | func enqueueJobs(jobs []string, count int) {
88 | enq := work.NewEnqueuer(namespace, pool)
89 | for _, jobName := range jobs {
90 | for i := 0; i < count; i++ {
91 | enq.Enqueue(jobName, work.Q{"i": i})
92 | }
93 | }
94 | }
95 |
96 | func cleanKeyspace() {
97 | conn := pool.Get()
98 | defer conn.Close()
99 |
100 | keys, err := redis.Strings(conn.Do("KEYS", namespace+"*"))
101 | if err != nil {
102 | panic("could not get keys: " + err.Error())
103 | }
104 | for _, k := range keys {
105 | //fmt.Println("deleting ", k)
106 | if _, err := conn.Do("DEL", k); err != nil {
107 | panic("could not del: " + err.Error())
108 | }
109 | }
110 | }
111 |
112 | func newPool(addr string) *redis.Pool {
113 | return &redis.Pool{
114 | MaxActive: 20,
115 | MaxIdle: 20,
116 | IdleTimeout: 240 * time.Second,
117 | Dial: func() (redis.Conn, error) {
118 | c, err := redis.Dial("tcp", addr)
119 | if err != nil {
120 | return nil, err
121 | }
122 | return c, nil
123 | //return redis.NewLoggingConn(c, log.New(os.Stdout, "", 0), "redis"), err
124 | },
125 | Wait: true,
126 | //TestOnBorrow: func(c redis.Conn, t time.Time) error {
127 | // _, err := c.Do("PING")
128 | // return err
129 | //},
130 | }
131 | }
132 |
--------------------------------------------------------------------------------
/benches/bench_jobs/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "os"
6 | "sync/atomic"
7 | "time"
8 |
9 | "github.com/albrow/jobs"
10 | "github.com/gocraft/health"
11 | "github.com/gomodule/redigo/redis"
12 | )
13 |
14 | var namespace = "jobs"
15 | var pool = newPool(":6379")
16 |
17 | func epsilonHandler(i int) error {
18 | atomic.AddInt64(&totcount, 1)
19 | return nil
20 | }
21 |
22 | func main() {
23 | stream := health.NewStream().AddSink(&health.WriterSink{os.Stdout})
24 | cleanKeyspace()
25 |
26 | queueNames := []string{"myqueue", "myqueue2", "myqueue3", "myqueue4", "myqueue5"}
27 | queues := []*jobs.Type{}
28 |
29 | for _, qn := range queueNames {
30 | q, err := jobs.RegisterType(qn, 3, epsilonHandler)
31 | if err != nil {
32 | panic(err)
33 | }
34 | queues = append(queues, q)
35 | }
36 |
37 | job := stream.NewJob("enqueue_all")
38 |
39 | numJobs := 40000 / len(queues)
40 | for _, q := range queues {
41 | for i := 0; i < numJobs; i++ {
42 | _, err := q.Schedule(100, time.Now(), i)
43 | if err != nil {
44 | panic(err)
45 | }
46 | }
47 | }
48 |
49 | job.Complete(health.Success)
50 |
51 | go monitor()
52 |
53 | job = stream.NewJob("run_all")
54 | pool, err := jobs.NewPool(&jobs.PoolConfig{
55 | // NumWorkers: 1000,
56 | // BatchSize: 3000,
57 | })
58 | if err != nil {
59 | panic(err)
60 | }
61 | defer func() {
62 | pool.Close()
63 | if err := pool.Wait(); err != nil {
64 | panic(err)
65 | }
66 | }()
67 | if err := pool.Start(); err != nil {
68 | panic(err)
69 | }
70 | job.Complete(health.Success)
71 | select {}
72 | }
73 |
74 | var totcount int64
75 |
76 | func monitor() {
77 | t := time.Tick(1 * time.Second)
78 |
79 | curT := 0
80 | c1 := int64(0)
81 | c2 := int64(0)
82 | prev := int64(0)
83 |
84 | DALOOP:
85 | for {
86 | select {
87 | case <-t:
88 | curT++
89 | v := atomic.AddInt64(&totcount, 0)
90 | fmt.Printf("after %d seconds, count is %d\n", curT, v)
91 | if curT == 1 {
92 | c1 = v
93 | } else if curT == 3 {
94 | c2 = v
95 | }
96 | if v == prev {
97 | break DALOOP
98 | }
99 | prev = v
100 | }
101 | }
102 | fmt.Println("Jobs/sec: ", float64(c2-c1)/2.0)
103 | os.Exit(0)
104 | }
105 |
106 | func cleanKeyspace() {
107 | conn := pool.Get()
108 | defer conn.Close()
109 |
110 | keys, err := redis.Strings(conn.Do("KEYS", namespace+"*"))
111 | if err != nil {
112 | panic("could not get keys: " + err.Error())
113 | }
114 | for _, k := range keys {
115 | //fmt.Println("deleting ", k)
116 | if _, err := conn.Do("DEL", k); err != nil {
117 | panic("could not del: " + err.Error())
118 | }
119 | }
120 | }
121 |
122 | func newPool(addr string) *redis.Pool {
123 | return &redis.Pool{
124 | MaxActive: 20,
125 | MaxIdle: 20,
126 | IdleTimeout: 240 * time.Second,
127 | Dial: func() (redis.Conn, error) {
128 | c, err := redis.Dial("tcp", addr)
129 | if err != nil {
130 | return nil, err
131 | }
132 | return c, nil
133 | //return redis.NewLoggingConn(c, log.New(os.Stdout, "", 0), "redis"), err
134 | },
135 | Wait: true,
136 | //TestOnBorrow: func(c redis.Conn, t time.Time) error {
137 | // _, err := c.Do("PING")
138 | // return err
139 | //},
140 | }
141 | }
142 |
--------------------------------------------------------------------------------
/benches/bench_goworkers/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "os"
6 | "sync/atomic"
7 | "time"
8 |
9 | "github.com/gocraft/health"
10 | "github.com/gomodule/redigo/redis"
11 | "github.com/jrallison/go-workers"
12 | )
13 |
14 | func myJob(m *workers.Msg) {
15 | atomic.AddInt64(&totcount, 1)
16 | }
17 |
18 | var namespace = "bench_test"
19 | var pool = newPool(":6379")
20 |
21 | func main() {
22 |
23 | stream := health.NewStream().AddSink(&health.WriterSink{os.Stdout})
24 | stream.Event("wat")
25 | cleanKeyspace()
26 |
27 | workers.Configure(map[string]string{
28 | // location of redis instance
29 | "server": "localhost:6379",
30 | // instance of the database
31 | "database": "0",
32 | // number of connections to keep open with redis
33 | "pool": "10",
34 | // unique process id for this instance of workers (for proper recovery of inprogress jobs on crash)
35 | "process": "1",
36 | "namespace": namespace,
37 | })
38 | workers.Middleware = &workers.Middlewares{}
39 |
40 | queues := []string{"myqueue", "myqueue2", "myqueue3", "myqueue4", "myqueue5"}
41 | numJobs := 100000 / len(queues)
42 |
43 | job := stream.NewJob("enqueue_all")
44 | for _, q := range queues {
45 | enqueueJobs(q, numJobs)
46 | }
47 | job.Complete(health.Success)
48 |
49 | for _, q := range queues {
50 | workers.Process(q, myJob, 10)
51 | }
52 |
53 | go monitor()
54 |
55 | // Blocks until process is told to exit via unix signal
56 | workers.Run()
57 | }
58 |
59 | var totcount int64
60 |
61 | func monitor() {
62 | t := time.Tick(1 * time.Second)
63 |
64 | curT := 0
65 | c1 := int64(0)
66 | c2 := int64(0)
67 | prev := int64(0)
68 |
69 | DALOOP:
70 | for {
71 | select {
72 | case <-t:
73 | curT++
74 | v := atomic.AddInt64(&totcount, 0)
75 | fmt.Printf("after %d seconds, count is %d\n", curT, v)
76 | if curT == 1 {
77 | c1 = v
78 | } else if curT == 3 {
79 | c2 = v
80 | }
81 | if v == prev {
82 | break DALOOP
83 | }
84 | prev = v
85 | }
86 | }
87 | fmt.Println("Jobs/sec: ", float64(c2-c1)/2.0)
88 | os.Exit(0)
89 | }
90 |
91 | func enqueueJobs(queue string, count int) {
92 | for i := 0; i < count; i++ {
93 | workers.Enqueue(queue, "Foo", []int{i})
94 | }
95 | }
96 |
97 | func cleanKeyspace() {
98 | conn := pool.Get()
99 | defer conn.Close()
100 |
101 | keys, err := redis.Strings(conn.Do("KEYS", namespace+"*"))
102 | if err != nil {
103 | panic("could not get keys: " + err.Error())
104 | }
105 | for _, k := range keys {
106 | //fmt.Println("deleting ", k)
107 | if _, err := conn.Do("DEL", k); err != nil {
108 | panic("could not del: " + err.Error())
109 | }
110 | }
111 | }
112 |
113 | func newPool(addr string) *redis.Pool {
114 | return &redis.Pool{
115 | MaxActive: 3,
116 | MaxIdle: 3,
117 | IdleTimeout: 240 * time.Second,
118 | Dial: func() (redis.Conn, error) {
119 | c, err := redis.Dial("tcp", addr)
120 | if err != nil {
121 | return nil, err
122 | }
123 | return c, nil
124 | //return redis.NewLoggingConn(c, log.New(os.Stdout, "", 0), "redis"), err
125 | },
126 | Wait: true,
127 | //TestOnBorrow: func(c redis.Conn, t time.Time) error {
128 | // _, err := c.Do("PING")
129 | // return err
130 | //},
131 | }
132 | }
133 |
--------------------------------------------------------------------------------
/heartbeater.go:
--------------------------------------------------------------------------------
1 | package work
2 |
3 | import (
4 | "os"
5 | "sort"
6 | "strings"
7 | "time"
8 |
9 | "github.com/gomodule/redigo/redis"
10 | )
11 |
12 | const (
13 | beatPeriod = 5 * time.Second
14 | )
15 |
16 | type workerPoolHeartbeater struct {
17 | workerPoolID string
18 | namespace string // eg, "myapp-work"
19 | pool *redis.Pool
20 | beatPeriod time.Duration
21 | concurrency uint
22 | jobNames string
23 | startedAt int64
24 | pid int
25 | hostname string
26 | workerIDs string
27 |
28 | stopChan chan struct{}
29 | doneStoppingChan chan struct{}
30 | }
31 |
32 | func newWorkerPoolHeartbeater(namespace string, pool *redis.Pool, workerPoolID string, jobTypes map[string]*jobType, concurrency uint, workerIDs []string) *workerPoolHeartbeater {
33 | h := &workerPoolHeartbeater{
34 | workerPoolID: workerPoolID,
35 | namespace: namespace,
36 | pool: pool,
37 | beatPeriod: beatPeriod,
38 | concurrency: concurrency,
39 | stopChan: make(chan struct{}),
40 | doneStoppingChan: make(chan struct{}),
41 | }
42 |
43 | jobNames := make([]string, 0, len(jobTypes))
44 | for k := range jobTypes {
45 | jobNames = append(jobNames, k)
46 | }
47 | sort.Strings(jobNames)
48 | h.jobNames = strings.Join(jobNames, ",")
49 |
50 | sort.Strings(workerIDs)
51 | h.workerIDs = strings.Join(workerIDs, ",")
52 |
53 | h.pid = os.Getpid()
54 | host, err := os.Hostname()
55 | if err != nil {
56 | logError("heartbeat.hostname", err)
57 | host = "hostname_errored"
58 | }
59 | h.hostname = host
60 |
61 | return h
62 | }
63 |
64 | func (h *workerPoolHeartbeater) start() {
65 | go h.loop()
66 | }
67 |
68 | func (h *workerPoolHeartbeater) stop() {
69 | h.stopChan <- struct{}{}
70 | <-h.doneStoppingChan
71 | }
72 |
73 | func (h *workerPoolHeartbeater) loop() {
74 | h.startedAt = nowEpochSeconds()
75 | h.heartbeat() // do it right away
76 | ticker := time.Tick(h.beatPeriod)
77 | for {
78 | select {
79 | case <-h.stopChan:
80 | h.removeHeartbeat()
81 | h.doneStoppingChan <- struct{}{}
82 | return
83 | case <-ticker:
84 | h.heartbeat()
85 | }
86 | }
87 | }
88 |
89 | func (h *workerPoolHeartbeater) heartbeat() {
90 | conn := h.pool.Get()
91 | defer conn.Close()
92 |
93 | workerPoolsKey := redisKeyWorkerPools(h.namespace)
94 | heartbeatKey := redisKeyHeartbeat(h.namespace, h.workerPoolID)
95 |
96 | conn.Send("SADD", workerPoolsKey, h.workerPoolID)
97 | conn.Send("HMSET", heartbeatKey,
98 | "heartbeat_at", nowEpochSeconds(),
99 | "started_at", h.startedAt,
100 | "job_names", h.jobNames,
101 | "concurrency", h.concurrency,
102 | "worker_ids", h.workerIDs,
103 | "host", h.hostname,
104 | "pid", h.pid,
105 | )
106 |
107 | if err := conn.Flush(); err != nil {
108 | logError("heartbeat", err)
109 | }
110 | }
111 |
112 | func (h *workerPoolHeartbeater) removeHeartbeat() {
113 | conn := h.pool.Get()
114 | defer conn.Close()
115 |
116 | workerPoolsKey := redisKeyWorkerPools(h.namespace)
117 | heartbeatKey := redisKeyHeartbeat(h.namespace, h.workerPoolID)
118 |
119 | conn.Send("SREM", workerPoolsKey, h.workerPoolID)
120 | conn.Send("DEL", heartbeatKey)
121 |
122 | if err := conn.Flush(); err != nil {
123 | logError("remove_heartbeat", err)
124 | }
125 | }
126 |
--------------------------------------------------------------------------------
/webui/internal/assets/src/DeadJobs.test.js:
--------------------------------------------------------------------------------
1 | import './TestSetup';
2 | import expect from 'expect';
3 | import DeadJobs from './DeadJobs';
4 | import React from 'react';
5 | import { mount } from 'enzyme';
6 |
7 | describe('DeadJobs', () => {
8 | it('shows dead jobs', () => {
9 | let deadJobs = mount();
10 |
11 | expect(deadJobs.state().selected.length).toEqual(0);
12 | expect(deadJobs.state().jobs.length).toEqual(0);
13 |
14 | deadJobs.setState({
15 | count: 2,
16 | jobs: [
17 | {id: 1, name: 'test', args: {}, t: 1467760821, err: 'err1'},
18 | {id: 2, name: 'test2', args: {}, t: 1467760822, err: 'err2'}
19 | ]
20 | });
21 |
22 | expect(deadJobs.state().selected.length).toEqual(0);
23 | expect(deadJobs.state().jobs.length).toEqual(2);
24 |
25 | let checkbox = deadJobs.find('input');
26 | expect(checkbox.length).toEqual(3);
27 | expect(checkbox.at(0).props().checked).toEqual(false);
28 | expect(checkbox.at(1).props().checked).toEqual(false);
29 | expect(checkbox.at(2).props().checked).toEqual(false);
30 |
31 | checkbox.at(0).simulate('change');
32 | checkbox = deadJobs.find('input');
33 | expect(checkbox.length).toEqual(3);
34 | expect(checkbox.at(0).props().checked).toEqual(true);
35 | expect(checkbox.at(1).props().checked).toEqual(true);
36 | expect(checkbox.at(2).props().checked).toEqual(true);
37 |
38 | checkbox.at(1).simulate('change');
39 | checkbox = deadJobs.find('input');
40 | expect(checkbox.length).toEqual(3);
41 | expect(checkbox.at(0).props().checked).toEqual(true);
42 | expect(checkbox.at(1).props().checked).toEqual(false);
43 | expect(checkbox.at(2).props().checked).toEqual(true);
44 |
45 | checkbox.at(1).simulate('change');
46 | checkbox = deadJobs.find('input');
47 | expect(checkbox.length).toEqual(3);
48 | expect(checkbox.at(0).props().checked).toEqual(true);
49 | expect(checkbox.at(1).props().checked).toEqual(true);
50 | expect(checkbox.at(2).props().checked).toEqual(true);
51 |
52 | let button = deadJobs.find('button');
53 | expect(button.length).toEqual(4);
54 | button.at(0).simulate('click');
55 | button.at(1).simulate('click');
56 | button.at(2).simulate('click');
57 | button.at(3).simulate('click');
58 |
59 | checkbox.at(0).simulate('change');
60 |
61 | checkbox = deadJobs.find('input');
62 | expect(checkbox.length).toEqual(3);
63 | expect(checkbox.at(0).props().checked).toEqual(false);
64 | expect(checkbox.at(1).props().checked).toEqual(false);
65 | expect(checkbox.at(2).props().checked).toEqual(false);
66 | });
67 |
68 | it('has pages', () => {
69 | let deadJobs = mount();
70 |
71 | let genJob = (n) => {
72 | let job = [];
73 | for (let i = 1; i <= n; i++) {
74 | job.push({
75 | id: i,
76 | name: 'test',
77 | args: {},
78 | t: 1467760821,
79 | err: 'err',
80 | });
81 | }
82 | return job;
83 | };
84 | deadJobs.setState({
85 | count: 21,
86 | jobs: genJob(21)
87 | });
88 |
89 | expect(deadJobs.state().jobs.length).toEqual(21);
90 | expect(deadJobs.state().page).toEqual(1);
91 |
92 | let pageList = deadJobs.find('PageList');
93 | expect(pageList.length).toEqual(1);
94 |
95 | pageList.at(0).props().jumpTo(2)();
96 | expect(deadJobs.state().page).toEqual(2);
97 | });
98 | });
99 |
--------------------------------------------------------------------------------
/observer_test.go:
--------------------------------------------------------------------------------
1 | package work
2 |
3 | import (
4 | "fmt"
5 | "testing"
6 |
7 | "github.com/gomodule/redigo/redis"
8 | "github.com/stretchr/testify/assert"
9 | )
10 |
11 | func TestObserverStarted(t *testing.T) {
12 | pool := newTestPool(":6379")
13 | ns := "work"
14 |
15 | tMock := int64(1425263401)
16 | setNowEpochSecondsMock(tMock)
17 | defer resetNowEpochSecondsMock()
18 |
19 | observer := newObserver(ns, pool, "abcd")
20 | observer.start()
21 | observer.observeStarted("foo", "bar", Q{"a": 1, "b": "wat"})
22 | //observer.observeDone("foo", "bar", nil)
23 | observer.drain()
24 | observer.stop()
25 |
26 | h := readHash(pool, redisKeyWorkerObservation(ns, "abcd"))
27 | assert.Equal(t, "foo", h["job_name"])
28 | assert.Equal(t, "bar", h["job_id"])
29 | assert.Equal(t, fmt.Sprint(tMock), h["started_at"])
30 | assert.Equal(t, `{"a":1,"b":"wat"}`, h["args"])
31 | }
32 |
33 | func TestObserverStartedDone(t *testing.T) {
34 | pool := newTestPool(":6379")
35 | ns := "work"
36 |
37 | tMock := int64(1425263401)
38 | setNowEpochSecondsMock(tMock)
39 | defer resetNowEpochSecondsMock()
40 |
41 | observer := newObserver(ns, pool, "abcd")
42 | observer.start()
43 | observer.observeStarted("foo", "bar", Q{"a": 1, "b": "wat"})
44 | observer.observeDone("foo", "bar", nil)
45 | observer.drain()
46 | observer.stop()
47 |
48 | h := readHash(pool, redisKeyWorkerObservation(ns, "abcd"))
49 | assert.Equal(t, 0, len(h))
50 | }
51 |
52 | func TestObserverCheckin(t *testing.T) {
53 | pool := newTestPool(":6379")
54 | ns := "work"
55 |
56 | observer := newObserver(ns, pool, "abcd")
57 | observer.start()
58 |
59 | tMock := int64(1425263401)
60 | setNowEpochSecondsMock(tMock)
61 | defer resetNowEpochSecondsMock()
62 | observer.observeStarted("foo", "bar", Q{"a": 1, "b": "wat"})
63 |
64 | tMockCheckin := int64(1425263402)
65 | setNowEpochSecondsMock(tMockCheckin)
66 | observer.observeCheckin("foo", "bar", "doin it")
67 | observer.drain()
68 | observer.stop()
69 |
70 | h := readHash(pool, redisKeyWorkerObservation(ns, "abcd"))
71 | assert.Equal(t, "foo", h["job_name"])
72 | assert.Equal(t, "bar", h["job_id"])
73 | assert.Equal(t, fmt.Sprint(tMock), h["started_at"])
74 | assert.Equal(t, `{"a":1,"b":"wat"}`, h["args"])
75 | assert.Equal(t, "doin it", h["checkin"])
76 | assert.Equal(t, fmt.Sprint(tMockCheckin), h["checkin_at"])
77 | }
78 |
79 | func TestObserverCheckinFromJob(t *testing.T) {
80 | pool := newTestPool(":6379")
81 | ns := "work"
82 |
83 | observer := newObserver(ns, pool, "abcd")
84 | observer.start()
85 |
86 | tMock := int64(1425263401)
87 | setNowEpochSecondsMock(tMock)
88 | defer resetNowEpochSecondsMock()
89 | observer.observeStarted("foo", "barbar", Q{"a": 1, "b": "wat"})
90 |
91 | tMockCheckin := int64(1425263402)
92 | setNowEpochSecondsMock(tMockCheckin)
93 |
94 | j := &Job{Name: "foo", ID: "barbar", observer: observer}
95 | j.Checkin("sup")
96 |
97 | observer.drain()
98 | observer.stop()
99 |
100 | h := readHash(pool, redisKeyWorkerObservation(ns, "abcd"))
101 | assert.Equal(t, "foo", h["job_name"])
102 | assert.Equal(t, "barbar", h["job_id"])
103 | assert.Equal(t, fmt.Sprint(tMock), h["started_at"])
104 | assert.Equal(t, "sup", h["checkin"])
105 | assert.Equal(t, fmt.Sprint(tMockCheckin), h["checkin_at"])
106 | }
107 |
108 | func readHash(pool *redis.Pool, key string) map[string]string {
109 | m := make(map[string]string)
110 |
111 | conn := pool.Get()
112 | defer conn.Close()
113 |
114 | v, err := redis.Strings(conn.Do("HGETALL", key))
115 | if err != nil {
116 | panic("could not delete retry/dead queue: " + err.Error())
117 | }
118 |
119 | for i, l := 0, len(v); i < l; i += 2 {
120 | m[v[i]] = v[i+1]
121 | }
122 |
123 | return m
124 | }
125 |
--------------------------------------------------------------------------------
/periodic_enqueuer.go:
--------------------------------------------------------------------------------
1 | package work
2 |
3 | import (
4 | "fmt"
5 | "math/rand"
6 | "time"
7 |
8 | "github.com/gomodule/redigo/redis"
9 | "github.com/robfig/cron/v3"
10 | )
11 |
12 | const (
13 | periodicEnqueuerSleep = 2 * time.Minute
14 | periodicEnqueuerHorizon = 4 * time.Minute
15 | )
16 |
17 | type periodicEnqueuer struct {
18 | namespace string
19 | pool *redis.Pool
20 | periodicJobs []*periodicJob
21 | scheduledPeriodicJobs []*scheduledPeriodicJob
22 | stopChan chan struct{}
23 | doneStoppingChan chan struct{}
24 | }
25 |
26 | type periodicJob struct {
27 | jobName string
28 | spec string
29 | schedule cron.Schedule
30 | }
31 |
32 | type scheduledPeriodicJob struct {
33 | scheduledAt time.Time
34 | scheduledAtEpoch int64
35 | *periodicJob
36 | }
37 |
38 | func newPeriodicEnqueuer(namespace string, pool *redis.Pool, periodicJobs []*periodicJob) *periodicEnqueuer {
39 | return &periodicEnqueuer{
40 | namespace: namespace,
41 | pool: pool,
42 | periodicJobs: periodicJobs,
43 | stopChan: make(chan struct{}),
44 | doneStoppingChan: make(chan struct{}),
45 | }
46 | }
47 |
48 | func (pe *periodicEnqueuer) start() {
49 | go pe.loop()
50 | }
51 |
52 | func (pe *periodicEnqueuer) stop() {
53 | pe.stopChan <- struct{}{}
54 | <-pe.doneStoppingChan
55 | }
56 |
57 | func (pe *periodicEnqueuer) loop() {
58 | // Begin reaping periodically
59 | timer := time.NewTimer(periodicEnqueuerSleep + time.Duration(rand.Intn(30))*time.Second)
60 | defer timer.Stop()
61 |
62 | if pe.shouldEnqueue() {
63 | err := pe.enqueue()
64 | if err != nil {
65 | logError("periodic_enqueuer.loop.enqueue", err)
66 | }
67 | }
68 |
69 | for {
70 | select {
71 | case <-pe.stopChan:
72 | pe.doneStoppingChan <- struct{}{}
73 | return
74 | case <-timer.C:
75 | timer.Reset(periodicEnqueuerSleep + time.Duration(rand.Intn(30))*time.Second)
76 | if pe.shouldEnqueue() {
77 | err := pe.enqueue()
78 | if err != nil {
79 | logError("periodic_enqueuer.loop.enqueue", err)
80 | }
81 | }
82 | }
83 | }
84 | }
85 |
86 | func (pe *periodicEnqueuer) enqueue() error {
87 | now := nowEpochSeconds()
88 | nowTime := time.Unix(now, 0)
89 | horizon := nowTime.Add(periodicEnqueuerHorizon)
90 |
91 | conn := pe.pool.Get()
92 | defer conn.Close()
93 |
94 | for _, pj := range pe.periodicJobs {
95 | for t := pj.schedule.Next(nowTime); t.Before(horizon); t = pj.schedule.Next(t) {
96 | epoch := t.Unix()
97 | id := makeUniquePeriodicID(pj.jobName, pj.spec, epoch)
98 |
99 | job := &Job{
100 | Name: pj.jobName,
101 | ID: id,
102 |
103 | // This is technically wrong, but this lets the bytes be identical for the same periodic job instance. If we don't do this, we'd need to use a different approach -- probably giving each periodic job its own history of the past 100 periodic jobs, and only scheduling a job if it's not in the history.
104 | EnqueuedAt: epoch,
105 | Args: nil,
106 | }
107 |
108 | rawJSON, err := job.serialize()
109 | if err != nil {
110 | return err
111 | }
112 |
113 | _, err = conn.Do("ZADD", redisKeyScheduled(pe.namespace), epoch, rawJSON)
114 | if err != nil {
115 | return err
116 | }
117 | }
118 | }
119 |
120 | _, err := conn.Do("SET", redisKeyLastPeriodicEnqueue(pe.namespace), now)
121 |
122 | return err
123 | }
124 |
125 | func (pe *periodicEnqueuer) shouldEnqueue() bool {
126 | conn := pe.pool.Get()
127 | defer conn.Close()
128 |
129 | lastEnqueue, err := redis.Int64(conn.Do("GET", redisKeyLastPeriodicEnqueue(pe.namespace)))
130 | if err == redis.ErrNil {
131 | return true
132 | } else if err != nil {
133 | logError("periodic_enqueuer.should_enqueue", err)
134 | return true
135 | }
136 |
137 | return lastEnqueue < (nowEpochSeconds() - int64(periodicEnqueuerSleep/time.Minute))
138 | }
139 |
140 | func makeUniquePeriodicID(name, spec string, epoch int64) string {
141 | return fmt.Sprintf("periodic:%s:%s:%d", name, spec, epoch)
142 | }
143 |
--------------------------------------------------------------------------------
/run_test.go:
--------------------------------------------------------------------------------
1 | package work
2 |
3 | import (
4 | "fmt"
5 | "reflect"
6 | "testing"
7 |
8 | "github.com/stretchr/testify/assert"
9 | )
10 |
11 | func TestRunBasicMiddleware(t *testing.T) {
12 | mw1 := func(j *Job, next NextMiddlewareFunc) error {
13 | j.setArg("mw1", "mw1")
14 | return next()
15 | }
16 |
17 | mw2 := func(c *tstCtx, j *Job, next NextMiddlewareFunc) error {
18 | c.record(j.Args["mw1"].(string))
19 | c.record("mw2")
20 | return next()
21 | }
22 |
23 | mw3 := func(c *tstCtx, j *Job, next NextMiddlewareFunc) error {
24 | c.record("mw3")
25 | return next()
26 | }
27 |
28 | h1 := func(c *tstCtx, j *Job) error {
29 | c.record("h1")
30 | c.record(j.Args["a"].(string))
31 | return nil
32 | }
33 |
34 | middleware := []*middlewareHandler{
35 | {IsGeneric: true, GenericMiddlewareHandler: mw1},
36 | {IsGeneric: false, DynamicMiddleware: reflect.ValueOf(mw2)},
37 | {IsGeneric: false, DynamicMiddleware: reflect.ValueOf(mw3)},
38 | }
39 |
40 | jt := &jobType{
41 | Name: "foo",
42 | IsGeneric: false,
43 | DynamicHandler: reflect.ValueOf(h1),
44 | }
45 |
46 | job := &Job{
47 | Name: "foo",
48 | Args: map[string]interface{}{"a": "foo"},
49 | }
50 |
51 | v, err := runJob(job, tstCtxType, middleware, jt)
52 | assert.NoError(t, err)
53 | c := v.Interface().(*tstCtx)
54 | assert.Equal(t, "mw1mw2mw3h1foo", c.String())
55 | }
56 |
57 | func TestRunHandlerError(t *testing.T) {
58 | mw1 := func(j *Job, next NextMiddlewareFunc) error {
59 | return next()
60 | }
61 | h1 := func(c *tstCtx, j *Job) error {
62 | c.record("h1")
63 | return fmt.Errorf("h1_err")
64 | }
65 |
66 | middleware := []*middlewareHandler{
67 | {IsGeneric: true, GenericMiddlewareHandler: mw1},
68 | }
69 |
70 | jt := &jobType{
71 | Name: "foo",
72 | IsGeneric: false,
73 | DynamicHandler: reflect.ValueOf(h1),
74 | }
75 |
76 | job := &Job{
77 | Name: "foo",
78 | }
79 |
80 | v, err := runJob(job, tstCtxType, middleware, jt)
81 | assert.Error(t, err)
82 | assert.Equal(t, "h1_err", err.Error())
83 |
84 | c := v.Interface().(*tstCtx)
85 | assert.Equal(t, "h1", c.String())
86 | }
87 |
88 | func TestRunMwError(t *testing.T) {
89 | mw1 := func(j *Job, next NextMiddlewareFunc) error {
90 | return fmt.Errorf("mw1_err")
91 | }
92 | h1 := func(c *tstCtx, j *Job) error {
93 | c.record("h1")
94 | return fmt.Errorf("h1_err")
95 | }
96 |
97 | middleware := []*middlewareHandler{
98 | {IsGeneric: true, GenericMiddlewareHandler: mw1},
99 | }
100 |
101 | jt := &jobType{
102 | Name: "foo",
103 | IsGeneric: false,
104 | DynamicHandler: reflect.ValueOf(h1),
105 | }
106 |
107 | job := &Job{
108 | Name: "foo",
109 | }
110 |
111 | _, err := runJob(job, tstCtxType, middleware, jt)
112 | assert.Error(t, err)
113 | assert.Equal(t, "mw1_err", err.Error())
114 | }
115 |
116 | func TestRunHandlerPanic(t *testing.T) {
117 | mw1 := func(j *Job, next NextMiddlewareFunc) error {
118 | return next()
119 | }
120 | h1 := func(c *tstCtx, j *Job) error {
121 | c.record("h1")
122 |
123 | panic("dayam")
124 | }
125 |
126 | middleware := []*middlewareHandler{
127 | {IsGeneric: true, GenericMiddlewareHandler: mw1},
128 | }
129 |
130 | jt := &jobType{
131 | Name: "foo",
132 | IsGeneric: false,
133 | DynamicHandler: reflect.ValueOf(h1),
134 | }
135 |
136 | job := &Job{
137 | Name: "foo",
138 | }
139 |
140 | _, err := runJob(job, tstCtxType, middleware, jt)
141 | assert.Error(t, err)
142 | assert.Equal(t, "dayam", err.Error())
143 | }
144 |
145 | func TestRunMiddlewarePanic(t *testing.T) {
146 | mw1 := func(j *Job, next NextMiddlewareFunc) error {
147 | panic("dayam")
148 | }
149 | h1 := func(c *tstCtx, j *Job) error {
150 | c.record("h1")
151 | return nil
152 | }
153 |
154 | middleware := []*middlewareHandler{
155 | {IsGeneric: true, GenericMiddlewareHandler: mw1},
156 | }
157 |
158 | jt := &jobType{
159 | Name: "foo",
160 | IsGeneric: false,
161 | DynamicHandler: reflect.ValueOf(h1),
162 | }
163 |
164 | job := &Job{
165 | Name: "foo",
166 | }
167 |
168 | _, err := runJob(job, tstCtxType, middleware, jt)
169 | assert.Error(t, err)
170 | assert.Equal(t, "dayam", err.Error())
171 | }
172 |
--------------------------------------------------------------------------------
/webui/internal/assets/src/Processes.js:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import PropTypes from 'prop-types';
3 | import UnixTime from './UnixTime';
4 | import ShortList from './ShortList';
5 | import styles from './bootstrap.min.css';
6 | import cx from './cx';
7 |
8 | class BusyWorkers extends React.Component {
9 | static propTypes = {
10 | worker: PropTypes.arrayOf(PropTypes.object).isRequired,
11 | }
12 |
13 | render() {
14 | return (
15 |
16 |
17 |
18 |
19 | | Name |
20 | Arguments |
21 | Started At |
22 | Check-in At |
23 | Check-in |
24 |
25 | {
26 | this.props.worker.map((worker) => {
27 | return (
28 |
29 | | {worker.job_name} |
30 | {worker.args_json} |
31 | |
32 | |
33 | {worker.checkin} |
34 |
35 | );
36 | })
37 | }
38 |
39 |
40 |
41 | );
42 | }
43 | }
44 |
45 | export default class Processes extends React.Component {
46 | static propTypes = {
47 | busyWorkerURL: PropTypes.string,
48 | workerPoolURL: PropTypes.string,
49 | }
50 |
51 | state = {
52 | busyWorker: [],
53 | workerPool: []
54 | }
55 |
56 | componentWillMount() {
57 | if (this.props.busyWorkerURL) {
58 | fetch(this.props.busyWorkerURL).
59 | then((resp) => resp.json()).
60 | then((data) => {
61 | if (data) {
62 | this.setState({
63 | busyWorker: data
64 | });
65 | }
66 | });
67 | }
68 | if (this.props.workerPoolURL) {
69 | fetch(this.props.workerPoolURL).
70 | then((resp) => resp.json()).
71 | then((data) => {
72 | let workers = [];
73 | data.map((worker) => {
74 | if (worker.host != '') {
75 | workers.push(worker);
76 | }
77 | });
78 | this.setState({
79 | workerPool: workers
80 | });
81 | });
82 | }
83 | }
84 |
85 | get workerCount() {
86 | let count = 0;
87 | this.state.workerPool.map((pool) => {
88 | count += pool.worker_ids.length;
89 | });
90 | return count;
91 | }
92 |
93 | getBusyPoolWorker(pool) {
94 | let workers = [];
95 | this.state.busyWorker.map((worker) => {
96 | if (pool.worker_ids.includes(worker.worker_id)) {
97 | workers.push(worker);
98 | }
99 | });
100 | return workers;
101 | }
102 |
103 | render() {
104 | return (
105 |
106 |
107 | {this.state.workerPool.length} Worker process(es). {this.state.busyWorker.length} active worker(s) out of {this.workerCount}.
108 | {
109 | this.state.workerPool.map((pool) => {
110 | let busyWorker = this.getBusyPoolWorker(pool);
111 | return (
112 |
113 |
114 |
115 |
116 |
117 | | {pool.host}: {pool.pid} |
118 | Started |
119 | Last Heartbeat |
120 | Concurrency {pool.concurrency} |
121 |
122 |
123 | | Servicing . |
124 |
125 |
126 | | {busyWorker.length} active worker(s) and {pool.worker_ids.length - busyWorker.length} idle. |
127 |
128 |
129 | |
130 |
131 |
132 |
133 | |
134 |
135 |
136 |
137 |
138 |
139 | );
140 | })
141 | }
142 |
143 | );
144 | }
145 | }
146 |
--------------------------------------------------------------------------------
/periodic_enqueuer_test.go:
--------------------------------------------------------------------------------
1 | package work
2 |
3 | import (
4 | "testing"
5 | "time"
6 |
7 | "github.com/gomodule/redigo/redis"
8 | "github.com/robfig/cron/v3"
9 | "github.com/stretchr/testify/assert"
10 | )
11 |
12 | func TestPeriodicEnqueuer(t *testing.T) {
13 | pool := newTestPool(":6379")
14 | ns := "work"
15 | cleanKeyspace(ns, pool)
16 |
17 | var pjs []*periodicJob
18 | pjs = appendPeriodicJob(pjs, "0/29 * * * * *", "foo") // Every 29 seconds
19 | pjs = appendPeriodicJob(pjs, "3/49 * * * * *", "bar") // Every 49 seconds
20 | pjs = appendPeriodicJob(pjs, "* * * 2 * *", "baz") // Every 2nd of the month seconds
21 |
22 | setNowEpochSecondsMock(1468359453)
23 | defer resetNowEpochSecondsMock()
24 |
25 | pe := newPeriodicEnqueuer(ns, pool, pjs)
26 | err := pe.enqueue()
27 | assert.NoError(t, err)
28 |
29 | c := NewClient(ns, pool)
30 | scheduledJobs, count, err := c.ScheduledJobs(1)
31 | assert.NoError(t, err)
32 | assert.EqualValues(t, 20, count)
33 |
34 | expected := []struct {
35 | name string
36 | id string
37 | scheduledFor int64
38 | }{
39 | {name: "bar", id: "periodic:bar:3/49 * * * * *:1468359472", scheduledFor: 1468359472},
40 | {name: "foo", id: "periodic:foo:0/29 * * * * *:1468359478", scheduledFor: 1468359478},
41 | {name: "foo", id: "periodic:foo:0/29 * * * * *:1468359480", scheduledFor: 1468359480},
42 | {name: "bar", id: "periodic:bar:3/49 * * * * *:1468359483", scheduledFor: 1468359483},
43 | {name: "foo", id: "periodic:foo:0/29 * * * * *:1468359509", scheduledFor: 1468359509},
44 | {name: "bar", id: "periodic:bar:3/49 * * * * *:1468359532", scheduledFor: 1468359532},
45 | {name: "foo", id: "periodic:foo:0/29 * * * * *:1468359538", scheduledFor: 1468359538},
46 | {name: "foo", id: "periodic:foo:0/29 * * * * *:1468359540", scheduledFor: 1468359540},
47 | {name: "bar", id: "periodic:bar:3/49 * * * * *:1468359543", scheduledFor: 1468359543},
48 | {name: "foo", id: "periodic:foo:0/29 * * * * *:1468359569", scheduledFor: 1468359569},
49 | {name: "bar", id: "periodic:bar:3/49 * * * * *:1468359592", scheduledFor: 1468359592},
50 | {name: "foo", id: "periodic:foo:0/29 * * * * *:1468359598", scheduledFor: 1468359598},
51 | {name: "foo", id: "periodic:foo:0/29 * * * * *:1468359600", scheduledFor: 1468359600},
52 | {name: "bar", id: "periodic:bar:3/49 * * * * *:1468359603", scheduledFor: 1468359603},
53 | {name: "foo", id: "periodic:foo:0/29 * * * * *:1468359629", scheduledFor: 1468359629},
54 | {name: "bar", id: "periodic:bar:3/49 * * * * *:1468359652", scheduledFor: 1468359652},
55 | {name: "foo", id: "periodic:foo:0/29 * * * * *:1468359658", scheduledFor: 1468359658},
56 | {name: "foo", id: "periodic:foo:0/29 * * * * *:1468359660", scheduledFor: 1468359660},
57 | {name: "bar", id: "periodic:bar:3/49 * * * * *:1468359663", scheduledFor: 1468359663},
58 | {name: "foo", id: "periodic:foo:0/29 * * * * *:1468359689", scheduledFor: 1468359689},
59 | }
60 |
61 | for i, e := range expected {
62 | assert.EqualValues(t, scheduledJobs[i].RunAt, scheduledJobs[i].EnqueuedAt)
63 | assert.Nil(t, scheduledJobs[i].Args)
64 |
65 | assert.Equal(t, e.name, scheduledJobs[i].Name)
66 | assert.Equal(t, e.id, scheduledJobs[i].ID)
67 | assert.Equal(t, e.scheduledFor, scheduledJobs[i].RunAt)
68 | }
69 |
70 | conn := pool.Get()
71 | defer conn.Close()
72 |
73 | // Make sure the last periodic enqueued was set
74 | lastEnqueue, err := redis.Int64(conn.Do("GET", redisKeyLastPeriodicEnqueue(ns)))
75 | assert.NoError(t, err)
76 | assert.EqualValues(t, 1468359453, lastEnqueue)
77 |
78 | setNowEpochSecondsMock(1468359454)
79 |
80 | // Now do it again, and make sure nothing happens!
81 | err = pe.enqueue()
82 | assert.NoError(t, err)
83 |
84 | _, count, err = c.ScheduledJobs(1)
85 | assert.NoError(t, err)
86 | assert.EqualValues(t, 20, count)
87 |
88 | // Make sure the last periodic enqueued was set
89 | lastEnqueue, err = redis.Int64(conn.Do("GET", redisKeyLastPeriodicEnqueue(ns)))
90 | assert.NoError(t, err)
91 | assert.EqualValues(t, 1468359454, lastEnqueue)
92 |
93 | assert.False(t, pe.shouldEnqueue())
94 |
95 | setNowEpochSecondsMock(1468359454 + int64(periodicEnqueuerSleep/time.Minute) + 10)
96 |
97 | assert.True(t, pe.shouldEnqueue())
98 | }
99 |
100 | func TestPeriodicEnqueuerSpawn(t *testing.T) {
101 | pool := newTestPool(":6379")
102 | ns := "work"
103 | cleanKeyspace(ns, pool)
104 |
105 | pe := newPeriodicEnqueuer(ns, pool, nil)
106 | pe.start()
107 | pe.stop()
108 | }
109 |
110 | func appendPeriodicJob(pjs []*periodicJob, spec, jobName string) []*periodicJob {
111 | p := cron.NewParser(cron.SecondOptional | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow | cron.Descriptor)
112 |
113 | sched, err := p.Parse(spec)
114 | if err != nil {
115 | panic(err)
116 | }
117 |
118 | pj := &periodicJob{jobName: jobName, spec: spec, schedule: sched}
119 | return append(pjs, pj)
120 | }
121 |
--------------------------------------------------------------------------------
/webui/internal/assets/src/DeadJobs.js:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import PropTypes from 'prop-types';
3 | import PageList from './PageList';
4 | import UnixTime from './UnixTime';
5 | import styles from './bootstrap.min.css';
6 | import cx from './cx';
7 |
8 | export default class DeadJobs extends React.Component {
9 | static propTypes = {
10 | fetchURL: PropTypes.string,
11 | deleteURL: PropTypes.string,
12 | deleteAllURL: PropTypes.string,
13 | retryURL: PropTypes.string,
14 | retryAllURL: PropTypes.string,
15 | }
16 |
17 | state = {
18 | selected: [],
19 | page: 1,
20 | count: 0,
21 | jobs: []
22 | }
23 |
24 | fetch() {
25 | if (!this.props.fetchURL) {
26 | return;
27 | }
28 | fetch(`${this.props.fetchURL}?page=${this.state.page}`).
29 | then((resp) => resp.json()).
30 | then((data) => {
31 | this.setState({
32 | selected: [],
33 | count: data.count,
34 | jobs: data.jobs
35 | });
36 | });
37 | }
38 |
39 | componentWillMount() {
40 | this.fetch();
41 | }
42 |
43 | updatePage(page) {
44 | this.setState({page: page}, this.fetch);
45 | }
46 |
47 | checked(job) {
48 | return this.state.selected.includes(job);
49 | }
50 |
51 | check(job) {
52 | var index = this.state.selected.indexOf(job);
53 | if (index >= 0) {
54 | this.state.selected.splice(index, 1);
55 | } else {
56 | this.state.selected.push(job);
57 | }
58 | this.setState({
59 | selected: this.state.selected
60 | });
61 | }
62 |
63 | checkAll() {
64 | if (this.state.selected.length > 0) {
65 | this.setState({selected: []});
66 | } else {
67 | this.state.jobs.map((job) => {
68 | this.state.selected.push(job);
69 | });
70 | this.setState({
71 | selected: this.state.selected
72 | });
73 | }
74 | }
75 |
76 | deleteAll() {
77 | if (!this.props.deleteAllURL) {
78 | return;
79 | }
80 | fetch(this.props.deleteAllURL, {method: 'post'}).then(() => {
81 | this.updatePage(1);
82 | });
83 | }
84 |
85 | deleteSelected() {
86 | let p = [];
87 | this.state.selected.map((job) => {
88 | if (!this.props.deleteURL) {
89 | return;
90 | }
91 | p.push(fetch(`${this.props.deleteURL}/${job.died_at}/${job.id}`, {method: 'post'}));
92 | });
93 |
94 | Promise.all(p).then(() => {
95 | this.fetch();
96 | });
97 | }
98 |
99 | retryAll() {
100 | if (!this.props.retryAllURL) {
101 | return;
102 | }
103 | fetch(this.props.retryAllURL, {method: 'post'}).then(() => {
104 | this.updatePage(1);
105 | });
106 | }
107 |
108 | retrySelected() {
109 | let p = [];
110 | this.state.selected.map((job) => {
111 | if (!this.props.retryURL) {
112 | return;
113 | }
114 | p.push(fetch(`${this.props.retryURL}/${job.died_at}/${job.id}`, {method: 'post'}));
115 | });
116 |
117 | Promise.all(p).then(() => {
118 | this.fetch();
119 | });
120 | }
121 |
122 | render() {
123 | return (
124 |
125 |
126 |
Dead Jobs
127 |
128 |
{this.state.count} job(s) are dead.
129 |
() => this.updatePage(page)}/>
130 |
131 |
157 |
158 |
159 |
160 |
161 |
162 |
163 |
164 |
165 | );
166 | }
167 | }
168 |
--------------------------------------------------------------------------------
/go.sum:
--------------------------------------------------------------------------------
1 | github.com/albrow/jobs v0.4.2 h1:AhhNNgtnOz3h+Grt6uuRJP+uj/AVq+ZhIBY8Mzkf4TM=
2 | github.com/albrow/jobs v0.4.2/go.mod h1:e4sWh7D1DxPbpxrzJhNo/cMARAljpTYF/osgh2j3+r8=
3 | github.com/benmanns/goworker v0.1.3 h1:ekwn7WiKsn8oUOKfbHDqsA6g5bXz/uEZ9AdnKgtAECY=
4 | github.com/benmanns/goworker v0.1.3/go.mod h1:Gj3m7lTyCswE3+Kta7c79CMOmm5rHJmj2qh/GAmojJ4=
5 | github.com/bitly/go-simplejson v0.5.0 h1:6IH+V8/tVMab511d5bn4M7EwGXZf9Hj6i2xSwkNEM+Y=
6 | github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
7 | github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY=
8 | github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
9 | github.com/braintree/manners v0.0.0-20160418043613-82a8879fc5fd h1:ePesaBzdTmoMQjwqRCLP2jY+jjWMBpwws/LEQdt1fMM=
10 | github.com/braintree/manners v0.0.0-20160418043613-82a8879fc5fd/go.mod h1:TNehV1AhBwtT7Bd+rh8G6MoGDbBLNs/sKdk3nvr4Yzg=
11 | github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 h1:kHaBemcxl8o/pQ5VM1c8PVE1PubbNx3mjUr09OqWGCs=
12 | github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575/go.mod h1:9d6lWj8KzO/fd/NrVaLscBKmPigpZpn5YawRPw+e3Yo=
13 | github.com/customerio/gospec v0.0.0-20130710230057-a5cc0e48aa39 h1:O0YTztXI3XeJXlFhSo4wNb0VBVqSgT+hi/CjNWKvMnY=
14 | github.com/customerio/gospec v0.0.0-20130710230057-a5cc0e48aa39/go.mod h1:OzYUFhPuL2JbjwFwrv6CZs23uBawekc6OZs+g19F0mY=
15 | github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
16 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
17 | github.com/dchest/uniuri v0.0.0-20200228104902-7aecb25e1fe5 h1:RAV05c0xOkJ3dZGS0JFybxFKZ2WMLabgx3uXnd7rpGs=
18 | github.com/dchest/uniuri v0.0.0-20200228104902-7aecb25e1fe5/go.mod h1:GgB8SF9nRG+GqaDtLcwJZsQFhcogVCJ79j4EdT0c2V4=
19 | github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
20 | github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
21 | github.com/garyburd/redigo v1.6.0 h1:0VruCpn7yAIIu7pWVClQC8wxCJEcG3nyzpMSHKi1PQc=
22 | github.com/garyburd/redigo v1.6.0/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
23 | github.com/gocraft/health v0.0.0-20170925182251-8675af27fef0 h1:pKjeDsx7HGGbjr7VGI1HksxDJqSjaGED3cSw9GeSI98=
24 | github.com/gocraft/health v0.0.0-20170925182251-8675af27fef0/go.mod h1:rWibcVfwbUxi/QXW84U7vNTcIcZFd6miwbt8ritxh/Y=
25 | github.com/gocraft/web v0.0.0-20190207150652-9707327fb69b h1:g2Qcs0B+vOQE1L3a7WQ/JUUSzJnHbTz14qkJSqEWcF4=
26 | github.com/gocraft/web v0.0.0-20190207150652-9707327fb69b/go.mod h1:Ag7UMbZNGrnHwaXPJOUKJIVgx4QOWMOWZngrvsN6qak=
27 | github.com/gocraft/work v0.5.1 h1:3bRjMiOo6N4zcRgZWV3Y7uX7R22SF+A9bPTk4xRXr34=
28 | github.com/gocraft/work v0.5.1/go.mod h1:pc3n9Pb5FAESPPGfM0nL+7Q1xtgtRnF8rr/azzhQVlM=
29 | github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
30 | github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
31 | github.com/gomodule/redigo v2.0.0+incompatible h1:K/R+8tc58AaqLkqG2Ol3Qk+DR/TlNuhuh457pBFPtt0=
32 | github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
33 | github.com/jrallison/go-workers v0.0.0-20180112190529-dbf81d0b75bb h1:y9LFhCM3gwK94Xz9/h7GcSVLteky9pFHEkP04AqQupA=
34 | github.com/jrallison/go-workers v0.0.0-20180112190529-dbf81d0b75bb/go.mod h1:ziQRRNHCWZe0wVNzF8y8kCWpso0VMpqHJjB19DSenbE=
35 | github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs=
36 | github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
37 | github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
38 | github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
39 | github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
40 | github.com/orfjackal/nanospec.go v0.0.0-20120727230329-de4694c1d701 h1:yOXfzNV7qkZ3nf2NPqy4BMzlCmnQzIEbI1vuqKb2FkQ=
41 | github.com/orfjackal/nanospec.go v0.0.0-20120727230329-de4694c1d701/go.mod h1:VtBIF1XX0c1nKkeAPk8i4aXkYopqQgfDqolHUIHPwNI=
42 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
43 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
44 | github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ=
45 | github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k=
46 | github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
47 | github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
48 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
49 | github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
50 | github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
51 | github.com/youtube/vitess v2.1.1+incompatible h1:SE+P7DNX/jw5RHFs5CHRhZQjq402EJFCD33JhzQMdDw=
52 | github.com/youtube/vitess v2.1.1+incompatible/go.mod h1:hpMim5/30F1r+0P8GGtB29d0gWHr0IZ5unS+CG0zMx8=
53 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
54 | golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0 h1:Jcxah/M+oLZ/R4/z5RzfPzGbPXnVDPkEDtf2JnuxN+U=
55 | golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
56 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
57 | golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
58 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
59 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
60 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
61 | gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
62 | gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
63 |
--------------------------------------------------------------------------------
/dead_pool_reaper.go:
--------------------------------------------------------------------------------
1 | package work
2 |
3 | import (
4 | "fmt"
5 | "math/rand"
6 | "strings"
7 | "time"
8 |
9 | "github.com/gomodule/redigo/redis"
10 | )
11 |
12 | const (
13 | deadTime = 10 * time.Second // 2 x heartbeat
14 | reapPeriod = 10 * time.Minute
15 | reapJitterSecs = 30
16 | requeueKeysPerJob = 4
17 | )
18 |
19 | type deadPoolReaper struct {
20 | namespace string
21 | pool *redis.Pool
22 | deadTime time.Duration
23 | reapPeriod time.Duration
24 | curJobTypes []string
25 |
26 | stopChan chan struct{}
27 | doneStoppingChan chan struct{}
28 | }
29 |
30 | func newDeadPoolReaper(namespace string, pool *redis.Pool, curJobTypes []string) *deadPoolReaper {
31 | return &deadPoolReaper{
32 | namespace: namespace,
33 | pool: pool,
34 | deadTime: deadTime,
35 | reapPeriod: reapPeriod,
36 | curJobTypes: curJobTypes,
37 | stopChan: make(chan struct{}),
38 | doneStoppingChan: make(chan struct{}),
39 | }
40 | }
41 |
42 | func (r *deadPoolReaper) start() {
43 | go r.loop()
44 | }
45 |
46 | func (r *deadPoolReaper) stop() {
47 | r.stopChan <- struct{}{}
48 | <-r.doneStoppingChan
49 | }
50 |
51 | func (r *deadPoolReaper) loop() {
52 | // Reap immediately after we provide some time for initialization
53 | timer := time.NewTimer(r.deadTime)
54 | defer timer.Stop()
55 |
56 | for {
57 | select {
58 | case <-r.stopChan:
59 | r.doneStoppingChan <- struct{}{}
60 | return
61 | case <-timer.C:
62 | // Schedule next occurrence periodically with jitter
63 | timer.Reset(r.reapPeriod + time.Duration(rand.Intn(reapJitterSecs))*time.Second)
64 |
65 | // Reap
66 | if err := r.reap(); err != nil {
67 | logError("dead_pool_reaper.reap", err)
68 | }
69 | }
70 | }
71 | }
72 |
73 | func (r *deadPoolReaper) reap() error {
74 | // Get dead pools
75 | deadPoolIDs, err := r.findDeadPools()
76 | if err != nil {
77 | return err
78 | }
79 |
80 | conn := r.pool.Get()
81 | defer conn.Close()
82 |
83 | workerPoolsKey := redisKeyWorkerPools(r.namespace)
84 |
85 | // Cleanup all dead pools
86 | for deadPoolID, jobTypes := range deadPoolIDs {
87 | lockJobTypes := jobTypes
88 | // if we found jobs from the heartbeat, requeue them and remove the heartbeat
89 | if len(jobTypes) > 0 {
90 | r.requeueInProgressJobs(deadPoolID, jobTypes)
91 | if _, err = conn.Do("DEL", redisKeyHeartbeat(r.namespace, deadPoolID)); err != nil {
92 | return err
93 | }
94 | } else {
95 | // try to clean up locks for the current set of jobs if heartbeat was not found
96 | lockJobTypes = r.curJobTypes
97 | }
98 | // Remove dead pool from worker pools set
99 | if _, err = conn.Do("SREM", workerPoolsKey, deadPoolID); err != nil {
100 | return err
101 | }
102 | // Cleanup any stale lock info
103 | if err = r.cleanStaleLockInfo(deadPoolID, lockJobTypes); err != nil {
104 | return err
105 | }
106 | }
107 |
108 | return nil
109 | }
110 |
111 | func (r *deadPoolReaper) cleanStaleLockInfo(poolID string, jobTypes []string) error {
112 | numKeys := len(jobTypes) * 2
113 | redisReapLocksScript := redis.NewScript(numKeys, redisLuaReapStaleLocks)
114 | var scriptArgs = make([]interface{}, 0, numKeys+1) // +1 for argv[1]
115 |
116 | for _, jobType := range jobTypes {
117 | scriptArgs = append(scriptArgs, redisKeyJobsLock(r.namespace, jobType), redisKeyJobsLockInfo(r.namespace, jobType))
118 | }
119 | scriptArgs = append(scriptArgs, poolID) // ARGV[1]
120 |
121 | conn := r.pool.Get()
122 | defer conn.Close()
123 | if _, err := redisReapLocksScript.Do(conn, scriptArgs...); err != nil {
124 | return err
125 | }
126 |
127 | return nil
128 | }
129 |
130 | func (r *deadPoolReaper) requeueInProgressJobs(poolID string, jobTypes []string) error {
131 | numKeys := len(jobTypes) * requeueKeysPerJob
132 | redisRequeueScript := redis.NewScript(numKeys, redisLuaReenqueueJob)
133 | var scriptArgs = make([]interface{}, 0, numKeys+1)
134 |
135 | for _, jobType := range jobTypes {
136 | // pops from in progress, push into job queue and decrement the queue lock
137 | scriptArgs = append(scriptArgs, redisKeyJobsInProgress(r.namespace, poolID, jobType), redisKeyJobs(r.namespace, jobType), redisKeyJobsLock(r.namespace, jobType), redisKeyJobsLockInfo(r.namespace, jobType)) // KEYS[1-4 * N]
138 | }
139 | scriptArgs = append(scriptArgs, poolID) // ARGV[1]
140 |
141 | conn := r.pool.Get()
142 | defer conn.Close()
143 |
144 | // Keep moving jobs until all queues are empty
145 | for {
146 | values, err := redis.Values(redisRequeueScript.Do(conn, scriptArgs...))
147 | if err == redis.ErrNil {
148 | return nil
149 | } else if err != nil {
150 | return err
151 | }
152 |
153 | if len(values) != 3 {
154 | return fmt.Errorf("need 3 elements back")
155 | }
156 | }
157 | }
158 |
159 | func (r *deadPoolReaper) findDeadPools() (map[string][]string, error) {
160 | conn := r.pool.Get()
161 | defer conn.Close()
162 |
163 | workerPoolsKey := redisKeyWorkerPools(r.namespace)
164 |
165 | workerPoolIDs, err := redis.Strings(conn.Do("SMEMBERS", workerPoolsKey))
166 | if err != nil {
167 | return nil, err
168 | }
169 |
170 | deadPools := map[string][]string{}
171 | for _, workerPoolID := range workerPoolIDs {
172 | heartbeatKey := redisKeyHeartbeat(r.namespace, workerPoolID)
173 | heartbeatAt, err := redis.Int64(conn.Do("HGET", heartbeatKey, "heartbeat_at"))
174 | if err == redis.ErrNil {
175 | // heartbeat expired, save dead pool and use cur set of jobs from reaper
176 | deadPools[workerPoolID] = []string{}
177 | continue
178 | }
179 | if err != nil {
180 | return nil, err
181 | }
182 |
183 | // Check that last heartbeat was long enough ago to consider the pool dead
184 | if time.Unix(heartbeatAt, 0).Add(r.deadTime).After(time.Now()) {
185 | continue
186 | }
187 |
188 | jobTypesList, err := redis.String(conn.Do("HGET", heartbeatKey, "job_names"))
189 | if err == redis.ErrNil {
190 | continue
191 | }
192 | if err != nil {
193 | return nil, err
194 | }
195 |
196 | deadPools[workerPoolID] = strings.Split(jobTypesList, ",")
197 | }
198 |
199 | return deadPools, nil
200 | }
201 |
--------------------------------------------------------------------------------
/job.go:
--------------------------------------------------------------------------------
1 | package work
2 |
3 | import (
4 | "encoding/json"
5 | "fmt"
6 | "math"
7 | "reflect"
8 | )
9 |
10 | // Job represents a job.
11 | type Job struct {
12 | // Inputs when making a new job
13 | Name string `json:"name,omitempty"`
14 | ID string `json:"id"`
15 | EnqueuedAt int64 `json:"t"`
16 | Args map[string]interface{} `json:"args"`
17 | Unique bool `json:"unique,omitempty"`
18 | UniqueKey string `json:"unique_key,omitempty"`
19 |
20 | // Inputs when retrying
21 | Fails int64 `json:"fails,omitempty"` // number of times this job has failed
22 | LastErr string `json:"err,omitempty"`
23 | FailedAt int64 `json:"failed_at,omitempty"`
24 |
25 | rawJSON []byte
26 | dequeuedFrom []byte
27 | inProgQueue []byte
28 | argError error
29 | observer *observer
30 | }
31 |
32 | // Q is a shortcut to easily specify arguments for jobs when enqueueing them.
33 | // Example: e.Enqueue("send_email", work.Q{"addr": "test@example.com", "track": true})
34 | type Q map[string]interface{}
35 |
36 | func newJob(rawJSON, dequeuedFrom, inProgQueue []byte) (*Job, error) {
37 | var job Job
38 | err := json.Unmarshal(rawJSON, &job)
39 | if err != nil {
40 | return nil, err
41 | }
42 | job.rawJSON = rawJSON
43 | job.dequeuedFrom = dequeuedFrom
44 | job.inProgQueue = inProgQueue
45 | return &job, nil
46 | }
47 |
48 | func (j *Job) serialize() ([]byte, error) {
49 | return json.Marshal(j)
50 | }
51 |
52 | // setArg sets a single named argument on the job.
53 | func (j *Job) setArg(key string, val interface{}) {
54 | if j.Args == nil {
55 | j.Args = make(map[string]interface{})
56 | }
57 | j.Args[key] = val
58 | }
59 |
60 | func (j *Job) failed(err error) {
61 | j.Fails++
62 | j.LastErr = err.Error()
63 | j.FailedAt = nowEpochSeconds()
64 | }
65 |
66 | // Checkin will update the status of the executing job to the specified messages. This message is visible within the web UI. This is useful for indicating some sort of progress on very long running jobs. For instance, on a job that has to process a million records over the course of an hour, the job could call Checkin with the current job number every 10k jobs.
67 | func (j *Job) Checkin(msg string) {
68 | if j.observer != nil {
69 | j.observer.observeCheckin(j.Name, j.ID, msg)
70 | }
71 | }
72 |
73 | // ArgString returns j.Args[key] typed to a string. If the key is missing or of the wrong type, it sets an argument error
74 | // on the job. This function is meant to be used in the body of a job handling function while extracting arguments,
75 | // followed by a single call to j.ArgError().
76 | func (j *Job) ArgString(key string) string {
77 | v, ok := j.Args[key]
78 | if ok {
79 | typedV, ok := v.(string)
80 | if ok {
81 | return typedV
82 | }
83 | j.argError = typecastError("string", key, v)
84 | } else {
85 | j.argError = missingKeyError("string", key)
86 | }
87 | return ""
88 | }
89 |
90 | // ArgInt64 returns j.Args[key] typed to an int64. If the key is missing or of the wrong type, it sets an argument error
91 | // on the job. This function is meant to be used in the body of a job handling function while extracting arguments,
92 | // followed by a single call to j.ArgError().
93 | func (j *Job) ArgInt64(key string) int64 {
94 | v, ok := j.Args[key]
95 | if ok {
96 | rVal := reflect.ValueOf(v)
97 | if isIntKind(rVal) {
98 | return rVal.Int()
99 | } else if isUintKind(rVal) {
100 | vUint := rVal.Uint()
101 | if vUint <= math.MaxInt64 {
102 | return int64(vUint)
103 | }
104 | } else if isFloatKind(rVal) {
105 | vFloat64 := rVal.Float()
106 | vInt64 := int64(vFloat64)
107 | if vFloat64 == math.Trunc(vFloat64) && vInt64 <= 9007199254740892 && vInt64 >= -9007199254740892 {
108 | return vInt64
109 | }
110 | }
111 | j.argError = typecastError("int64", key, v)
112 | } else {
113 | j.argError = missingKeyError("int64", key)
114 | }
115 | return 0
116 | }
117 |
118 | // ArgFloat64 returns j.Args[key] typed to a float64. If the key is missing or of the wrong type, it sets an argument error
119 | // on the job. This function is meant to be used in the body of a job handling function while extracting arguments,
120 | // followed by a single call to j.ArgError().
121 | func (j *Job) ArgFloat64(key string) float64 {
122 | v, ok := j.Args[key]
123 | if ok {
124 | rVal := reflect.ValueOf(v)
125 | if isIntKind(rVal) {
126 | return float64(rVal.Int())
127 | } else if isUintKind(rVal) {
128 | return float64(rVal.Uint())
129 | } else if isFloatKind(rVal) {
130 | return rVal.Float()
131 | }
132 | j.argError = typecastError("float64", key, v)
133 | } else {
134 | j.argError = missingKeyError("float64", key)
135 | }
136 | return 0.0
137 | }
138 |
139 | // ArgBool returns j.Args[key] typed to a bool. If the key is missing or of the wrong type, it sets an argument error
140 | // on the job. This function is meant to be used in the body of a job handling function while extracting arguments,
141 | // followed by a single call to j.ArgError().
142 | func (j *Job) ArgBool(key string) bool {
143 | v, ok := j.Args[key]
144 | if ok {
145 | typedV, ok := v.(bool)
146 | if ok {
147 | return typedV
148 | }
149 | j.argError = typecastError("bool", key, v)
150 | } else {
151 | j.argError = missingKeyError("bool", key)
152 | }
153 | return false
154 | }
155 |
156 | // ArgError returns the last error generated when extracting typed params. Returns nil if extracting the args went fine.
157 | func (j *Job) ArgError() error {
158 | return j.argError
159 | }
160 |
161 | func isIntKind(v reflect.Value) bool {
162 | k := v.Kind()
163 | return k == reflect.Int || k == reflect.Int8 || k == reflect.Int16 || k == reflect.Int32 || k == reflect.Int64
164 | }
165 |
166 | func isUintKind(v reflect.Value) bool {
167 | k := v.Kind()
168 | return k == reflect.Uint || k == reflect.Uint8 || k == reflect.Uint16 || k == reflect.Uint32 || k == reflect.Uint64
169 | }
170 |
171 | func isFloatKind(v reflect.Value) bool {
172 | k := v.Kind()
173 | return k == reflect.Float32 || k == reflect.Float64
174 | }
175 |
176 | func missingKeyError(jsonType, key string) error {
177 | return fmt.Errorf("looking for a %s in job.Arg[%s] but key wasn't found", jsonType, key)
178 | }
179 |
180 | func typecastError(jsonType, key string, v interface{}) error {
181 | actualType := reflect.TypeOf(v)
182 | return fmt.Errorf("looking for a %s in job.Arg[%s] but value wasn't right type: %v(%v)", jsonType, key, actualType, v)
183 | }
184 |
--------------------------------------------------------------------------------
/job_test.go:
--------------------------------------------------------------------------------
1 | package work
2 |
3 | import (
4 | "math"
5 | "testing"
6 |
7 | "github.com/stretchr/testify/assert"
8 | )
9 |
10 | func TestJobArgumentExtraction(t *testing.T) {
11 | j := Job{}
12 | j.setArg("str1", "bar")
13 |
14 | j.setArg("int1", int64(77))
15 | j.setArg("int2", 77)
16 | j.setArg("int3", uint64(77))
17 | j.setArg("int4", float64(77.0))
18 |
19 | j.setArg("bool1", true)
20 |
21 | j.setArg("float1", 3.14)
22 |
23 | //
24 | // Success cases:
25 | //
26 | vString := j.ArgString("str1")
27 | assert.Equal(t, vString, "bar")
28 | assert.NoError(t, j.ArgError())
29 |
30 | vInt64 := j.ArgInt64("int1")
31 | assert.EqualValues(t, vInt64, 77)
32 | assert.NoError(t, j.ArgError())
33 |
34 | vInt64 = j.ArgInt64("int2")
35 | assert.EqualValues(t, vInt64, 77)
36 | assert.NoError(t, j.ArgError())
37 |
38 | vInt64 = j.ArgInt64("int3")
39 | assert.EqualValues(t, vInt64, 77)
40 | assert.NoError(t, j.ArgError())
41 |
42 | vInt64 = j.ArgInt64("int4")
43 | assert.EqualValues(t, vInt64, 77)
44 | assert.NoError(t, j.ArgError())
45 |
46 | vBool := j.ArgBool("bool1")
47 | assert.Equal(t, vBool, true)
48 | assert.NoError(t, j.ArgError())
49 |
50 | vFloat := j.ArgFloat64("float1")
51 | assert.Equal(t, vFloat, 3.14)
52 | assert.NoError(t, j.ArgError())
53 |
54 | // Missing key results in error:
55 | vString = j.ArgString("str_missing")
56 | assert.Equal(t, vString, "")
57 | assert.Error(t, j.ArgError())
58 | j.argError = nil
59 | assert.NoError(t, j.ArgError())
60 |
61 | vInt64 = j.ArgInt64("int_missing")
62 | assert.EqualValues(t, vInt64, 0)
63 | assert.Error(t, j.ArgError())
64 | j.argError = nil
65 | assert.NoError(t, j.ArgError())
66 |
67 | vBool = j.ArgBool("bool_missing")
68 | assert.Equal(t, vBool, false)
69 | assert.Error(t, j.ArgError())
70 | j.argError = nil
71 | assert.NoError(t, j.ArgError())
72 |
73 | vFloat = j.ArgFloat64("float_missing")
74 | assert.Equal(t, vFloat, 0.0)
75 | assert.Error(t, j.ArgError())
76 | j.argError = nil
77 | assert.NoError(t, j.ArgError())
78 |
79 | // Missing string; Make sure we don't reset it with successes after
80 | vString = j.ArgString("str_missing")
81 | assert.Equal(t, vString, "")
82 | assert.Error(t, j.ArgError())
83 | _ = j.ArgString("str1")
84 | _ = j.ArgInt64("int1")
85 | _ = j.ArgBool("bool1")
86 | _ = j.ArgFloat64("float1")
87 | assert.Error(t, j.ArgError())
88 | }
89 |
90 | func TestJobArgumentExtractionBadString(t *testing.T) {
91 | var testCases = []struct {
92 | key string
93 | val interface{}
94 | good bool
95 | }{
96 | {"a", 1, false},
97 | {"b", false, false},
98 | {"c", "yay", true},
99 | }
100 |
101 | j := Job{}
102 |
103 | for _, tc := range testCases {
104 | j.setArg(tc.key, tc.val)
105 | }
106 |
107 | for _, tc := range testCases {
108 | r := j.ArgString(tc.key)
109 | err := j.ArgError()
110 | if tc.good {
111 | if err != nil {
112 | t.Errorf("Failed test case: %v; err = %v\n", tc, err)
113 | }
114 | if r != tc.val.(string) {
115 | t.Errorf("Failed test case: %v; r = %v\n", tc, r)
116 | }
117 | } else {
118 | if err == nil {
119 | t.Errorf("Failed test case: %v; but err was nil\n", tc)
120 | }
121 | if r != "" {
122 | t.Errorf("Failed test case: %v; but r was %v\n", tc, r)
123 | }
124 | }
125 | j.argError = nil
126 | }
127 | }
128 |
129 | func TestJobArgumentExtractionBadBool(t *testing.T) {
130 | var testCases = []struct {
131 | key string
132 | val interface{}
133 | good bool
134 | }{
135 | {"a", 1, false},
136 | {"b", "boo", false},
137 | {"c", true, true},
138 | {"d", false, true},
139 | }
140 |
141 | j := Job{}
142 |
143 | for _, tc := range testCases {
144 | j.setArg(tc.key, tc.val)
145 | }
146 |
147 | for _, tc := range testCases {
148 | r := j.ArgBool(tc.key)
149 | err := j.ArgError()
150 | if tc.good {
151 | if err != nil {
152 | t.Errorf("Failed test case: %v; err = %v\n", tc, err)
153 | }
154 | if r != tc.val.(bool) {
155 | t.Errorf("Failed test case: %v; r = %v\n", tc, r)
156 | }
157 | } else {
158 | if err == nil {
159 | t.Errorf("Failed test case: %v; but err was nil\n", tc)
160 | }
161 | if r != false {
162 | t.Errorf("Failed test case: %v; but r was %v\n", tc, r)
163 | }
164 | }
165 | j.argError = nil
166 | }
167 | }
168 |
169 | func TestJobArgumentExtractionBadInt(t *testing.T) {
170 | var testCases = []struct {
171 | key string
172 | val interface{}
173 | good bool
174 | }{
175 | {"a", "boo", false},
176 | {"b", true, false},
177 | {"c", 1.1, false},
178 | {"d", 19007199254740892.0, false},
179 | {"e", -19007199254740892.0, false},
180 | {"f", uint64(math.MaxInt64) + 1, false},
181 |
182 | {"z", 0, true},
183 | {"y", 9007199254740892, true},
184 | {"x", 9007199254740892.0, true},
185 | {"w", 573839921, true},
186 | {"v", -573839921, true},
187 | {"u", uint64(math.MaxInt64), true},
188 | }
189 |
190 | j := Job{}
191 |
192 | for _, tc := range testCases {
193 | j.setArg(tc.key, tc.val)
194 | }
195 |
196 | for _, tc := range testCases {
197 | r := j.ArgInt64(tc.key)
198 | err := j.ArgError()
199 | if tc.good {
200 | if err != nil {
201 | t.Errorf("Failed test case: %v; err = %v\n", tc, err)
202 | }
203 | } else {
204 | if err == nil {
205 | t.Errorf("Failed test case: %v; but err was nil\n", tc)
206 | }
207 | if r != 0 {
208 | t.Errorf("Failed test case: %v; but r was %v\n", tc, r)
209 | }
210 | }
211 | j.argError = nil
212 | }
213 | }
214 |
215 | func TestJobArgumentExtractionBadFloat(t *testing.T) {
216 | var testCases = []struct {
217 | key string
218 | val interface{}
219 | good bool
220 | }{
221 | {"a", "boo", false},
222 | {"b", true, false},
223 |
224 | {"z", 0, true},
225 | {"y", 9007199254740892, true},
226 | {"x", 9007199254740892.0, true},
227 | {"w", 573839921, true},
228 | {"v", -573839921, true},
229 | {"u", math.MaxFloat64, true},
230 | {"t", math.SmallestNonzeroFloat64, true},
231 | }
232 |
233 | j := Job{}
234 |
235 | for _, tc := range testCases {
236 | j.setArg(tc.key, tc.val)
237 | }
238 |
239 | for _, tc := range testCases {
240 | r := j.ArgFloat64(tc.key)
241 | err := j.ArgError()
242 | if tc.good {
243 | if err != nil {
244 | t.Errorf("Failed test case: %v; err = %v\n", tc, err)
245 | }
246 | } else {
247 | if err == nil {
248 | t.Errorf("Failed test case: %v; but err was nil\n", tc)
249 | }
250 | if r != 0 {
251 | t.Errorf("Failed test case: %v; but r was %v\n", tc, r)
252 | }
253 | }
254 | j.argError = nil
255 | }
256 | }
257 |
--------------------------------------------------------------------------------
/observer.go:
--------------------------------------------------------------------------------
1 | package work
2 |
3 | import (
4 | "encoding/json"
5 | "fmt"
6 | "time"
7 |
8 | "github.com/gomodule/redigo/redis"
9 | )
10 |
11 | // An observer observes a single worker. Each worker has its own observer.
12 | type observer struct {
13 | namespace string
14 | workerID string
15 | pool *redis.Pool
16 |
17 | // nil: worker isn't doing anything that we know of
18 | // not nil: the last started observation that we received on the channel.
19 | // if we get an checkin, we'll just update the existing observation
20 | currentStartedObservation *observation
21 |
22 | // version of the data that we wrote to redis.
23 | // each observation we get, we'll update version. When we flush it to redis, we'll update lastWrittenVersion.
24 | // This will keep us from writing to redis unless necessary
25 | version, lastWrittenVersion int64
26 |
27 | observationsChan chan *observation
28 |
29 | stopChan chan struct{}
30 | doneStoppingChan chan struct{}
31 |
32 | drainChan chan struct{}
33 | doneDrainingChan chan struct{}
34 | }
35 |
36 | type observationKind int
37 |
38 | const (
39 | observationKindStarted observationKind = iota
40 | observationKindDone
41 | observationKindCheckin
42 | )
43 |
44 | type observation struct {
45 | kind observationKind
46 |
47 | // These fields always need to be set
48 | jobName string
49 | jobID string
50 |
51 | // These need to be set when starting a job
52 | startedAt int64
53 | arguments map[string]interface{}
54 |
55 | // If we're done w/ the job, err will indicate the success/failure of it
56 | err error // nil: success. not nil: the error we got when running the job
57 |
58 | // If this is a checkin, set these.
59 | checkin string
60 | checkinAt int64
61 | }
62 |
63 | const observerBufferSize = 1024
64 |
65 | func newObserver(namespace string, pool *redis.Pool, workerID string) *observer {
66 | return &observer{
67 | namespace: namespace,
68 | workerID: workerID,
69 | pool: pool,
70 | observationsChan: make(chan *observation, observerBufferSize),
71 |
72 | stopChan: make(chan struct{}),
73 | doneStoppingChan: make(chan struct{}),
74 |
75 | drainChan: make(chan struct{}),
76 | doneDrainingChan: make(chan struct{}),
77 | }
78 | }
79 |
80 | func (o *observer) start() {
81 | go o.loop()
82 | }
83 |
84 | func (o *observer) stop() {
85 | o.stopChan <- struct{}{}
86 | <-o.doneStoppingChan
87 | }
88 |
89 | func (o *observer) drain() {
90 | o.drainChan <- struct{}{}
91 | <-o.doneDrainingChan
92 | }
93 |
94 | func (o *observer) observeStarted(jobName, jobID string, arguments map[string]interface{}) {
95 | o.observationsChan <- &observation{
96 | kind: observationKindStarted,
97 | jobName: jobName,
98 | jobID: jobID,
99 | startedAt: nowEpochSeconds(),
100 | arguments: arguments,
101 | }
102 | }
103 |
104 | func (o *observer) observeDone(jobName, jobID string, err error) {
105 | o.observationsChan <- &observation{
106 | kind: observationKindDone,
107 | jobName: jobName,
108 | jobID: jobID,
109 | err: err,
110 | }
111 | }
112 |
113 | func (o *observer) observeCheckin(jobName, jobID, checkin string) {
114 | o.observationsChan <- &observation{
115 | kind: observationKindCheckin,
116 | jobName: jobName,
117 | jobID: jobID,
118 | checkin: checkin,
119 | checkinAt: nowEpochSeconds(),
120 | }
121 | }
122 |
123 | func (o *observer) loop() {
124 | // Every tick we'll update redis if necessary
125 | // We don't update it on every job because the only purpose of this data is for humans to inspect the system,
126 | // and a fast worker could move onto new jobs every few ms.
127 | ticker := time.Tick(1000 * time.Millisecond)
128 |
129 | for {
130 | select {
131 | case <-o.stopChan:
132 | o.doneStoppingChan <- struct{}{}
133 | return
134 | case <-o.drainChan:
135 | DRAIN_LOOP:
136 | for {
137 | select {
138 | case obv := <-o.observationsChan:
139 | o.process(obv)
140 | default:
141 | if err := o.writeStatus(o.currentStartedObservation); err != nil {
142 | logError("observer.write", err)
143 | }
144 | o.doneDrainingChan <- struct{}{}
145 | break DRAIN_LOOP
146 | }
147 | }
148 | case <-ticker:
149 | if o.lastWrittenVersion != o.version {
150 | if err := o.writeStatus(o.currentStartedObservation); err != nil {
151 | logError("observer.write", err)
152 | }
153 | o.lastWrittenVersion = o.version
154 | }
155 | case obv := <-o.observationsChan:
156 | o.process(obv)
157 | }
158 | }
159 | }
160 |
161 | func (o *observer) process(obv *observation) {
162 | if obv.kind == observationKindStarted {
163 | o.currentStartedObservation = obv
164 | } else if obv.kind == observationKindDone {
165 | o.currentStartedObservation = nil
166 | } else if obv.kind == observationKindCheckin {
167 | if (o.currentStartedObservation != nil) && (obv.jobID == o.currentStartedObservation.jobID) {
168 | o.currentStartedObservation.checkin = obv.checkin
169 | o.currentStartedObservation.checkinAt = obv.checkinAt
170 | } else {
171 | logError("observer.checkin_mismatch", fmt.Errorf("got checkin but mismatch on job ID or no job"))
172 | }
173 | }
174 | o.version++
175 |
176 | // If this is the version observation we got, just go ahead and write it.
177 | if o.version == 1 {
178 | if err := o.writeStatus(o.currentStartedObservation); err != nil {
179 | logError("observer.first_write", err)
180 | }
181 | o.lastWrittenVersion = o.version
182 | }
183 | }
184 |
185 | func (o *observer) writeStatus(obv *observation) error {
186 | conn := o.pool.Get()
187 | defer conn.Close()
188 |
189 | key := redisKeyWorkerObservation(o.namespace, o.workerID)
190 |
191 | if obv == nil {
192 | if _, err := conn.Do("DEL", key); err != nil {
193 | return err
194 | }
195 | } else {
196 | // hash:
197 | // job_name -> obv.Name
198 | // job_id -> obv.jobID
199 | // started_at -> obv.startedAt
200 | // args -> json.Encode(obv.arguments)
201 | // checkin -> obv.checkin
202 | // checkin_at -> obv.checkinAt
203 |
204 | var argsJSON []byte
205 | if len(obv.arguments) == 0 {
206 | argsJSON = []byte("")
207 | } else {
208 | var err error
209 | argsJSON, err = json.Marshal(obv.arguments)
210 | if err != nil {
211 | return err
212 | }
213 | }
214 |
215 | args := make([]interface{}, 0, 13)
216 | args = append(args,
217 | key,
218 | "job_name", obv.jobName,
219 | "job_id", obv.jobID,
220 | "started_at", obv.startedAt,
221 | "args", argsJSON,
222 | )
223 |
224 | if (obv.checkin != "") && (obv.checkinAt > 0) {
225 | args = append(args,
226 | "checkin", obv.checkin,
227 | "checkin_at", obv.checkinAt,
228 | )
229 | }
230 |
231 | conn.Send("HMSET", args...)
232 | conn.Send("EXPIRE", key, 60*60*24)
233 | if err := conn.Flush(); err != nil {
234 | return err
235 | }
236 |
237 | }
238 |
239 | return nil
240 | }
241 |
--------------------------------------------------------------------------------
/webui/webui.go:
--------------------------------------------------------------------------------
1 | package webui
2 |
3 | import (
4 | "encoding/json"
5 | "fmt"
6 | "net/http"
7 | "strconv"
8 | "sync"
9 |
10 | "github.com/braintree/manners"
11 | "github.com/gocraft/web"
12 | "github.com/gocraft/work"
13 | "github.com/gocraft/work/webui/internal/assets"
14 | "github.com/gomodule/redigo/redis"
15 | )
16 |
17 | // Server implements an HTTP server which exposes a JSON API to view and manage gocraft/work items.
18 | type Server struct {
19 | namespace string
20 | pool *redis.Pool
21 | client *work.Client
22 | hostPort string
23 | server *manners.GracefulServer
24 | wg sync.WaitGroup
25 | router *web.Router
26 | }
27 |
28 | type context struct {
29 | *Server
30 | }
31 |
32 | // NewServer creates and returns a new server. The 'namespace' param is the redis namespace to use. The hostPort param is the address to bind on to expose the API.
33 | func NewServer(namespace string, pool *redis.Pool, hostPort string) *Server {
34 | router := web.New(context{})
35 | server := &Server{
36 | namespace: namespace,
37 | pool: pool,
38 | client: work.NewClient(namespace, pool),
39 | hostPort: hostPort,
40 | server: manners.NewWithServer(&http.Server{Addr: hostPort, Handler: router}),
41 | router: router,
42 | }
43 |
44 | router.Middleware(func(c *context, rw web.ResponseWriter, r *web.Request, next web.NextMiddlewareFunc) {
45 | c.Server = server
46 | next(rw, r)
47 | })
48 | router.Middleware(func(rw web.ResponseWriter, r *web.Request, next web.NextMiddlewareFunc) {
49 | rw.Header().Set("Content-Type", "application/json; charset=utf-8")
50 | next(rw, r)
51 | })
52 | router.Get("/queues", (*context).queues)
53 | router.Get("/worker_pools", (*context).workerPools)
54 | router.Get("/busy_workers", (*context).busyWorkers)
55 | router.Get("/retry_jobs", (*context).retryJobs)
56 | router.Get("/scheduled_jobs", (*context).scheduledJobs)
57 | router.Get("/dead_jobs", (*context).deadJobs)
58 | router.Post("/delete_dead_job/:died_at:\\d.*/:job_id", (*context).deleteDeadJob)
59 | router.Post("/retry_dead_job/:died_at:\\d.*/:job_id", (*context).retryDeadJob)
60 | router.Post("/delete_all_dead_jobs", (*context).deleteAllDeadJobs)
61 | router.Post("/retry_all_dead_jobs", (*context).retryAllDeadJobs)
62 |
63 | //
64 | // Build the HTML page:
65 | //
66 | assetRouter := router.Subrouter(context{}, "")
67 | assetRouter.Get("/", func(c *context, rw web.ResponseWriter, req *web.Request) {
68 | rw.Header().Set("Content-Type", "text/html; charset=utf-8")
69 | rw.Write(assets.MustAsset("index.html"))
70 | })
71 | assetRouter.Get("/work.js", func(c *context, rw web.ResponseWriter, req *web.Request) {
72 | rw.Header().Set("Content-Type", "application/javascript; charset=utf-8")
73 | rw.Write(assets.MustAsset("work.js"))
74 | })
75 |
76 | return server
77 | }
78 |
79 | // Start starts the server listening for requests on the hostPort specified in NewServer.
80 | func (w *Server) Start() {
81 | w.wg.Add(1)
82 | go func(w *Server) {
83 | w.server.ListenAndServe()
84 | w.wg.Done()
85 | }(w)
86 | }
87 |
88 | // Stop stops the server and blocks until it has finished.
89 | func (w *Server) Stop() {
90 | w.server.Close()
91 | w.wg.Wait()
92 | }
93 |
94 | func (c *context) queues(rw web.ResponseWriter, r *web.Request) {
95 | response, err := c.client.Queues()
96 | render(rw, response, err)
97 | }
98 |
99 | func (c *context) workerPools(rw web.ResponseWriter, r *web.Request) {
100 | response, err := c.client.WorkerPoolHeartbeats()
101 | render(rw, response, err)
102 | }
103 |
104 | func (c *context) busyWorkers(rw web.ResponseWriter, r *web.Request) {
105 | observations, err := c.client.WorkerObservations()
106 | if err != nil {
107 | renderError(rw, err)
108 | return
109 | }
110 |
111 | var busyObservations []*work.WorkerObservation
112 | for _, ob := range observations {
113 | if ob.IsBusy {
114 | busyObservations = append(busyObservations, ob)
115 | }
116 | }
117 |
118 | render(rw, busyObservations, err)
119 | }
120 |
121 | func (c *context) retryJobs(rw web.ResponseWriter, r *web.Request) {
122 | page, err := parsePage(r)
123 | if err != nil {
124 | renderError(rw, err)
125 | return
126 | }
127 |
128 | jobs, count, err := c.client.RetryJobs(page)
129 | if err != nil {
130 | renderError(rw, err)
131 | return
132 | }
133 |
134 | response := struct {
135 | Count int64 `json:"count"`
136 | Jobs []*work.RetryJob `json:"jobs"`
137 | }{Count: count, Jobs: jobs}
138 |
139 | render(rw, response, err)
140 | }
141 |
142 | func (c *context) scheduledJobs(rw web.ResponseWriter, r *web.Request) {
143 | page, err := parsePage(r)
144 | if err != nil {
145 | renderError(rw, err)
146 | return
147 | }
148 |
149 | jobs, count, err := c.client.ScheduledJobs(page)
150 | if err != nil {
151 | renderError(rw, err)
152 | return
153 | }
154 |
155 | response := struct {
156 | Count int64 `json:"count"`
157 | Jobs []*work.ScheduledJob `json:"jobs"`
158 | }{Count: count, Jobs: jobs}
159 |
160 | render(rw, response, err)
161 | }
162 |
163 | func (c *context) deadJobs(rw web.ResponseWriter, r *web.Request) {
164 | page, err := parsePage(r)
165 | if err != nil {
166 | renderError(rw, err)
167 | return
168 | }
169 |
170 | jobs, count, err := c.client.DeadJobs(page)
171 | if err != nil {
172 | renderError(rw, err)
173 | return
174 | }
175 |
176 | response := struct {
177 | Count int64 `json:"count"`
178 | Jobs []*work.DeadJob `json:"jobs"`
179 | }{Count: count, Jobs: jobs}
180 |
181 | render(rw, response, err)
182 | }
183 |
184 | func (c *context) deleteDeadJob(rw web.ResponseWriter, r *web.Request) {
185 | diedAt, err := strconv.ParseInt(r.PathParams["died_at"], 10, 64)
186 | if err != nil {
187 | renderError(rw, err)
188 | return
189 | }
190 |
191 | err = c.client.DeleteDeadJob(diedAt, r.PathParams["job_id"])
192 |
193 | render(rw, map[string]string{"status": "ok"}, err)
194 | }
195 |
196 | func (c *context) retryDeadJob(rw web.ResponseWriter, r *web.Request) {
197 | diedAt, err := strconv.ParseInt(r.PathParams["died_at"], 10, 64)
198 | if err != nil {
199 | renderError(rw, err)
200 | return
201 | }
202 |
203 | err = c.client.RetryDeadJob(diedAt, r.PathParams["job_id"])
204 |
205 | render(rw, map[string]string{"status": "ok"}, err)
206 | }
207 |
208 | func (c *context) deleteAllDeadJobs(rw web.ResponseWriter, r *web.Request) {
209 | err := c.client.DeleteAllDeadJobs()
210 | render(rw, map[string]string{"status": "ok"}, err)
211 | }
212 |
213 | func (c *context) retryAllDeadJobs(rw web.ResponseWriter, r *web.Request) {
214 | err := c.client.RetryAllDeadJobs()
215 | render(rw, map[string]string{"status": "ok"}, err)
216 | }
217 |
218 | func render(rw web.ResponseWriter, jsonable interface{}, err error) {
219 | if err != nil {
220 | renderError(rw, err)
221 | return
222 | }
223 |
224 | jsonData, err := json.MarshalIndent(jsonable, "", "\t")
225 | if err != nil {
226 | renderError(rw, err)
227 | return
228 | }
229 | rw.Write(jsonData)
230 | }
231 |
232 | func renderError(rw http.ResponseWriter, err error) {
233 | rw.WriteHeader(500)
234 | fmt.Fprintf(rw, `{"error": "%s"}`, err.Error())
235 | }
236 |
237 | func parsePage(r *web.Request) (uint, error) {
238 | err := r.ParseForm()
239 | if err != nil {
240 | return 0, err
241 | }
242 |
243 | pageStr := r.Form.Get("page")
244 | if pageStr == "" {
245 | pageStr = "1"
246 | }
247 |
248 | page, err := strconv.ParseUint(pageStr, 10, 0)
249 | return uint(page), err
250 | }
251 |
--------------------------------------------------------------------------------
/enqueue.go:
--------------------------------------------------------------------------------
1 | package work
2 |
3 | import (
4 | "sync"
5 | "time"
6 |
7 | "github.com/gomodule/redigo/redis"
8 | )
9 |
10 | // Enqueuer can enqueue jobs.
11 | type Enqueuer struct {
12 | Namespace string // eg, "myapp-work"
13 | Pool *redis.Pool
14 |
15 | queuePrefix string // eg, "myapp-work:jobs:"
16 | knownJobs map[string]int64
17 | enqueueUniqueScript *redis.Script
18 | enqueueUniqueInScript *redis.Script
19 | mtx sync.RWMutex
20 | }
21 |
22 | // NewEnqueuer creates a new enqueuer with the specified Redis namespace and Redis pool.
23 | func NewEnqueuer(namespace string, pool *redis.Pool) *Enqueuer {
24 | if pool == nil {
25 | panic("NewEnqueuer needs a non-nil *redis.Pool")
26 | }
27 |
28 | return &Enqueuer{
29 | Namespace: namespace,
30 | Pool: pool,
31 | queuePrefix: redisKeyJobsPrefix(namespace),
32 | knownJobs: make(map[string]int64),
33 | enqueueUniqueScript: redis.NewScript(2, redisLuaEnqueueUnique),
34 | enqueueUniqueInScript: redis.NewScript(2, redisLuaEnqueueUniqueIn),
35 | }
36 | }
37 |
38 | // Enqueue will enqueue the specified job name and arguments. The args param can be nil if no args ar needed.
39 | // Example: e.Enqueue("send_email", work.Q{"addr": "test@example.com"})
40 | func (e *Enqueuer) Enqueue(jobName string, args map[string]interface{}) (*Job, error) {
41 | job := &Job{
42 | Name: jobName,
43 | ID: makeIdentifier(),
44 | EnqueuedAt: nowEpochSeconds(),
45 | Args: args,
46 | }
47 |
48 | rawJSON, err := job.serialize()
49 | if err != nil {
50 | return nil, err
51 | }
52 |
53 | conn := e.Pool.Get()
54 | defer conn.Close()
55 |
56 | if _, err := conn.Do("LPUSH", e.queuePrefix+jobName, rawJSON); err != nil {
57 | return nil, err
58 | }
59 |
60 | if err := e.addToKnownJobs(conn, jobName); err != nil {
61 | return job, err
62 | }
63 |
64 | return job, nil
65 | }
66 |
67 | // EnqueueIn enqueues a job in the scheduled job queue for execution in secondsFromNow seconds.
68 | func (e *Enqueuer) EnqueueIn(jobName string, secondsFromNow int64, args map[string]interface{}) (*ScheduledJob, error) {
69 | job := &Job{
70 | Name: jobName,
71 | ID: makeIdentifier(),
72 | EnqueuedAt: nowEpochSeconds(),
73 | Args: args,
74 | }
75 |
76 | rawJSON, err := job.serialize()
77 | if err != nil {
78 | return nil, err
79 | }
80 |
81 | conn := e.Pool.Get()
82 | defer conn.Close()
83 |
84 | scheduledJob := &ScheduledJob{
85 | RunAt: nowEpochSeconds() + secondsFromNow,
86 | Job: job,
87 | }
88 |
89 | _, err = conn.Do("ZADD", redisKeyScheduled(e.Namespace), scheduledJob.RunAt, rawJSON)
90 | if err != nil {
91 | return nil, err
92 | }
93 |
94 | if err := e.addToKnownJobs(conn, jobName); err != nil {
95 | return scheduledJob, err
96 | }
97 |
98 | return scheduledJob, nil
99 | }
100 |
101 | // EnqueueUnique enqueues a job unless a job is already enqueued with the same name and arguments.
102 | // The already-enqueued job can be in the normal work queue or in the scheduled job queue.
103 | // Once a worker begins processing a job, another job with the same name and arguments can be enqueued again.
104 | // Any failed jobs in the retry queue or dead queue don't count against the uniqueness -- so if a job fails and is retried, two unique jobs with the same name and arguments can be enqueued at once.
105 | // In order to add robustness to the system, jobs are only unique for 24 hours after they're enqueued. This is mostly relevant for scheduled jobs.
106 | // EnqueueUnique returns the job if it was enqueued and nil if it wasn't
107 | func (e *Enqueuer) EnqueueUnique(jobName string, args map[string]interface{}) (*Job, error) {
108 | return e.EnqueueUniqueByKey(jobName, args, nil)
109 | }
110 |
111 | // EnqueueUniqueIn enqueues a unique job in the scheduled job queue for execution in secondsFromNow seconds. See EnqueueUnique for the semantics of unique jobs.
112 | func (e *Enqueuer) EnqueueUniqueIn(jobName string, secondsFromNow int64, args map[string]interface{}) (*ScheduledJob, error) {
113 | return e.EnqueueUniqueInByKey(jobName, secondsFromNow, args, nil)
114 | }
115 |
116 | // EnqueueUniqueByKey enqueues a job unless a job is already enqueued with the same name and key, updating arguments.
117 | // The already-enqueued job can be in the normal work queue or in the scheduled job queue.
118 | // Once a worker begins processing a job, another job with the same name and key can be enqueued again.
119 | // Any failed jobs in the retry queue or dead queue don't count against the uniqueness -- so if a job fails and is retried, two unique jobs with the same name and arguments can be enqueued at once.
120 | // In order to add robustness to the system, jobs are only unique for 24 hours after they're enqueued. This is mostly relevant for scheduled jobs.
121 | // EnqueueUniqueByKey returns the job if it was enqueued and nil if it wasn't
122 | func (e *Enqueuer) EnqueueUniqueByKey(jobName string, args map[string]interface{}, keyMap map[string]interface{}) (*Job, error) {
123 | enqueue, job, err := e.uniqueJobHelper(jobName, args, keyMap)
124 | if err != nil {
125 | return nil, err
126 | }
127 |
128 | res, err := enqueue(nil)
129 |
130 | if res == "ok" && err == nil {
131 | return job, nil
132 | }
133 | return nil, err
134 | }
135 |
136 | // EnqueueUniqueInByKey enqueues a job in the scheduled job queue that is unique on specified key for execution in secondsFromNow seconds. See EnqueueUnique for the semantics of unique jobs.
137 | // Subsequent calls with same key will update arguments
138 | func (e *Enqueuer) EnqueueUniqueInByKey(jobName string, secondsFromNow int64, args map[string]interface{}, keyMap map[string]interface{}) (*ScheduledJob, error) {
139 | enqueue, job, err := e.uniqueJobHelper(jobName, args, keyMap)
140 | if err != nil {
141 | return nil, err
142 | }
143 |
144 | scheduledJob := &ScheduledJob{
145 | RunAt: nowEpochSeconds() + secondsFromNow,
146 | Job: job,
147 | }
148 |
149 | res, err := enqueue(&scheduledJob.RunAt)
150 | if res == "ok" && err == nil {
151 | return scheduledJob, nil
152 | }
153 | return nil, err
154 | }
155 |
156 | func (e *Enqueuer) addToKnownJobs(conn redis.Conn, jobName string) error {
157 | needSadd := true
158 | now := time.Now().Unix()
159 |
160 | e.mtx.RLock()
161 | t, ok := e.knownJobs[jobName]
162 | e.mtx.RUnlock()
163 |
164 | if ok {
165 | if now < t {
166 | needSadd = false
167 | }
168 | }
169 | if needSadd {
170 | if _, err := conn.Do("SADD", redisKeyKnownJobs(e.Namespace), jobName); err != nil {
171 | return err
172 | }
173 |
174 | e.mtx.Lock()
175 | e.knownJobs[jobName] = now + 300
176 | e.mtx.Unlock()
177 | }
178 |
179 | return nil
180 | }
181 |
182 | type enqueueFnType func(*int64) (string, error)
183 |
184 | func (e *Enqueuer) uniqueJobHelper(jobName string, args map[string]interface{}, keyMap map[string]interface{}) (enqueueFnType, *Job, error) {
185 | useDefaultKeys := false
186 | if keyMap == nil {
187 | useDefaultKeys = true
188 | keyMap = args
189 | }
190 |
191 | uniqueKey, err := redisKeyUniqueJob(e.Namespace, jobName, keyMap)
192 | if err != nil {
193 | return nil, nil, err
194 | }
195 |
196 | job := &Job{
197 | Name: jobName,
198 | ID: makeIdentifier(),
199 | EnqueuedAt: nowEpochSeconds(),
200 | Args: args,
201 | Unique: true,
202 | UniqueKey: uniqueKey,
203 | }
204 |
205 | rawJSON, err := job.serialize()
206 | if err != nil {
207 | return nil, nil, err
208 | }
209 |
210 | enqueueFn := func(runAt *int64) (string, error) {
211 | conn := e.Pool.Get()
212 | defer conn.Close()
213 |
214 | if err := e.addToKnownJobs(conn, jobName); err != nil {
215 | return "", err
216 | }
217 |
218 | scriptArgs := []interface{}{}
219 | script := e.enqueueUniqueScript
220 |
221 | scriptArgs = append(scriptArgs, e.queuePrefix+jobName) // KEY[1]
222 | scriptArgs = append(scriptArgs, uniqueKey) // KEY[2]
223 | scriptArgs = append(scriptArgs, rawJSON) // ARGV[1]
224 | if useDefaultKeys {
225 | // keying on arguments so arguments can't be updated
226 | // we'll just get them off the original job so to save space, make this "1"
227 | scriptArgs = append(scriptArgs, "1") // ARGV[2]
228 | } else {
229 | // we'll use this for updated arguments since the job on the queue
230 | // doesn't get updated
231 | scriptArgs = append(scriptArgs, rawJSON) // ARGV[2]
232 | }
233 |
234 | if runAt != nil { // Scheduled job so different job queue with additional arg
235 | scriptArgs[0] = redisKeyScheduled(e.Namespace) // KEY[1]
236 | scriptArgs = append(scriptArgs, *runAt) // ARGV[3]
237 |
238 | script = e.enqueueUniqueInScript
239 | }
240 |
241 | return redis.String(script.Do(conn, scriptArgs...))
242 | }
243 |
244 | return enqueueFn, job, nil
245 | }
246 |
--------------------------------------------------------------------------------
/worker_pool_test.go:
--------------------------------------------------------------------------------
1 | package work
2 |
3 | import (
4 | "bytes"
5 | "fmt"
6 | "reflect"
7 | "testing"
8 | "time"
9 |
10 | "github.com/gomodule/redigo/redis"
11 | "github.com/stretchr/testify/assert"
12 | )
13 |
14 | type tstCtx struct {
15 | a int
16 | bytes.Buffer
17 | }
18 |
19 | func (c *tstCtx) record(s string) {
20 | _, _ = c.WriteString(s)
21 | }
22 |
23 | var tstCtxType = reflect.TypeOf(tstCtx{})
24 |
25 | func TestWorkerPoolHandlerValidations(t *testing.T) {
26 | var cases = []struct {
27 | fn interface{}
28 | good bool
29 | }{
30 | {func(j *Job) error { return nil }, true},
31 | {func(c *tstCtx, j *Job) error { return nil }, true},
32 | {func(c *tstCtx, j *Job) {}, false},
33 | {func(c *tstCtx, j *Job) string { return "" }, false},
34 | {func(c *tstCtx, j *Job) (error, string) { return nil, "" }, false},
35 | {func(c *tstCtx) error { return nil }, false},
36 | {func(c tstCtx, j *Job) error { return nil }, false},
37 | {func() error { return nil }, false},
38 | {func(c *tstCtx, j *Job, wat string) error { return nil }, false},
39 | }
40 |
41 | for i, testCase := range cases {
42 | r := isValidHandlerType(tstCtxType, reflect.ValueOf(testCase.fn))
43 | if testCase.good != r {
44 | t.Errorf("idx %d: should return %v but returned %v", i, testCase.good, r)
45 | }
46 | }
47 | }
48 |
49 | func TestWorkerPoolMiddlewareValidations(t *testing.T) {
50 | var cases = []struct {
51 | fn interface{}
52 | good bool
53 | }{
54 | {func(j *Job, n NextMiddlewareFunc) error { return nil }, true},
55 | {func(c *tstCtx, j *Job, n NextMiddlewareFunc) error { return nil }, true},
56 | {func(c *tstCtx, j *Job) error { return nil }, false},
57 | {func(c *tstCtx, j *Job, n NextMiddlewareFunc) {}, false},
58 | {func(c *tstCtx, j *Job, n NextMiddlewareFunc) string { return "" }, false},
59 | {func(c *tstCtx, j *Job, n NextMiddlewareFunc) (error, string) { return nil, "" }, false},
60 | {func(c *tstCtx, n NextMiddlewareFunc) error { return nil }, false},
61 | {func(c tstCtx, j *Job, n NextMiddlewareFunc) error { return nil }, false},
62 | {func() error { return nil }, false},
63 | {func(c *tstCtx, j *Job, wat string) error { return nil }, false},
64 | {func(c *tstCtx, j *Job, n NextMiddlewareFunc, wat string) error { return nil }, false},
65 | }
66 |
67 | for i, testCase := range cases {
68 | r := isValidMiddlewareType(tstCtxType, reflect.ValueOf(testCase.fn))
69 | if testCase.good != r {
70 | t.Errorf("idx %d: should return %v but returned %v", i, testCase.good, r)
71 | }
72 | }
73 | }
74 |
75 | func TestWorkerPoolStartStop(t *testing.T) {
76 | pool := newTestPool(":6379")
77 | ns := "work"
78 | wp := NewWorkerPool(TestContext{}, 10, ns, pool)
79 | wp.Start()
80 | wp.Start()
81 | wp.Stop()
82 | wp.Stop()
83 | wp.Start()
84 | wp.Stop()
85 | }
86 |
87 | func TestWorkerPoolValidations(t *testing.T) {
88 | pool := newTestPool(":6379")
89 | ns := "work"
90 | wp := NewWorkerPool(TestContext{}, 10, ns, pool)
91 |
92 | func() {
93 | defer func() {
94 | if panicErr := recover(); panicErr != nil {
95 | assert.Regexp(t, "Your middleware function can have one of these signatures", fmt.Sprintf("%v", panicErr))
96 | } else {
97 | t.Errorf("expected a panic when using bad middleware")
98 | }
99 | }()
100 |
101 | wp.Middleware(TestWorkerPoolValidations)
102 | }()
103 |
104 | func() {
105 | defer func() {
106 | if panicErr := recover(); panicErr != nil {
107 | assert.Regexp(t, "Your handler function can have one of these signatures", fmt.Sprintf("%v", panicErr))
108 | } else {
109 | t.Errorf("expected a panic when using a bad handler")
110 | }
111 | }()
112 |
113 | wp.Job("wat", TestWorkerPoolValidations)
114 | }()
115 | }
116 |
117 | func TestWorkersPoolRunSingleThreaded(t *testing.T) {
118 | pool := newTestPool(":6379")
119 | ns := "work"
120 | job1 := "job1"
121 | numJobs, concurrency, sleepTime := 5, 5, 2
122 | wp := setupTestWorkerPool(pool, ns, job1, concurrency, JobOptions{Priority: 1, MaxConcurrency: 1})
123 | wp.Start()
124 | // enqueue some jobs
125 | enqueuer := NewEnqueuer(ns, pool)
126 | for i := 0; i < numJobs; i++ {
127 | _, err := enqueuer.Enqueue(job1, Q{"sleep": sleepTime})
128 | assert.Nil(t, err)
129 | }
130 |
131 | // make sure we've enough jobs queued up to make an interesting test
132 | jobsQueued := listSize(pool, redisKeyJobs(ns, job1))
133 | assert.True(t, jobsQueued >= 3, "should be at least 3 jobs queued up, but only found %v", jobsQueued)
134 |
135 | // now make sure the during the duration of job execution there is never > 1 job in flight
136 | start := time.Now()
137 | totalRuntime := time.Duration(sleepTime*numJobs) * time.Millisecond
138 | time.Sleep(10 * time.Millisecond)
139 | for time.Since(start) < totalRuntime {
140 | // jobs in progress, lock count for the job and lock info for the pool should never exceed 1
141 | jobsInProgress := listSize(pool, redisKeyJobsInProgress(ns, wp.workerPoolID, job1))
142 | assert.True(t, jobsInProgress <= 1, "jobsInProgress should never exceed 1: actual=%d", jobsInProgress)
143 |
144 | jobLockCount := getInt64(pool, redisKeyJobsLock(ns, job1))
145 | assert.True(t, jobLockCount <= 1, "global lock count for job should never exceed 1, got: %v", jobLockCount)
146 | wpLockCount := hgetInt64(pool, redisKeyJobsLockInfo(ns, job1), wp.workerPoolID)
147 | assert.True(t, wpLockCount <= 1, "lock count for the worker pool should never exceed 1: actual=%v", wpLockCount)
148 | time.Sleep(time.Duration(sleepTime) * time.Millisecond)
149 | }
150 | wp.Drain()
151 | wp.Stop()
152 |
153 | // At this point it should all be empty.
154 | assert.EqualValues(t, 0, listSize(pool, redisKeyJobs(ns, job1)))
155 | assert.EqualValues(t, 0, listSize(pool, redisKeyJobsInProgress(ns, wp.workerPoolID, job1)))
156 | assert.EqualValues(t, 0, getInt64(pool, redisKeyJobsLock(ns, job1)))
157 | assert.EqualValues(t, 0, hgetInt64(pool, redisKeyJobsLockInfo(ns, job1), wp.workerPoolID))
158 | }
159 |
160 | func TestWorkerPoolPauseSingleThreadedJobs(t *testing.T) {
161 | pool := newTestPool(":6379")
162 | ns, job1 := "work", "job1"
163 | numJobs, concurrency, sleepTime := 5, 5, 2
164 | wp := setupTestWorkerPool(pool, ns, job1, concurrency, JobOptions{Priority: 1, MaxConcurrency: 1})
165 | wp.Start()
166 | // enqueue some jobs
167 | enqueuer := NewEnqueuer(ns, pool)
168 | for i := 0; i < numJobs; i++ {
169 | _, err := enqueuer.Enqueue(job1, Q{"sleep": sleepTime})
170 | assert.Nil(t, err)
171 | }
172 | // provide time for jobs to process
173 | time.Sleep(10 * time.Millisecond)
174 |
175 | // pause work, provide time for outstanding jobs to finish and queue up another job
176 | pauseJobs(ns, job1, pool)
177 | time.Sleep(2 * time.Millisecond)
178 | _, err := enqueuer.Enqueue(job1, Q{"sleep": sleepTime})
179 | assert.Nil(t, err)
180 |
181 | // check that we still have some jobs to process
182 | assert.True(t, listSize(pool, redisKeyJobs(ns, job1)) >= 1)
183 |
184 | // now make sure no jobs get started until we unpause
185 | start := time.Now()
186 | totalRuntime := time.Duration(sleepTime*numJobs) * time.Millisecond
187 | for time.Since(start) < totalRuntime {
188 | assert.EqualValues(t, 0, listSize(pool, redisKeyJobsInProgress(ns, wp.workerPoolID, job1)))
189 | // lock count for the job and lock info for the pool should both be at 1 while job is running
190 | assert.EqualValues(t, 0, getInt64(pool, redisKeyJobsLock(ns, job1)))
191 | assert.EqualValues(t, 0, hgetInt64(pool, redisKeyJobsLockInfo(ns, job1), wp.workerPoolID))
192 | time.Sleep(time.Duration(sleepTime) * time.Millisecond)
193 | }
194 |
195 | // unpause work and get past the backoff time
196 | unpauseJobs(ns, job1, pool)
197 | time.Sleep(10 * time.Millisecond)
198 |
199 | wp.Drain()
200 | wp.Stop()
201 |
202 | // At this point it should all be empty.
203 | assert.EqualValues(t, 0, listSize(pool, redisKeyJobs(ns, job1)))
204 | assert.EqualValues(t, 0, listSize(pool, redisKeyJobsInProgress(ns, wp.workerPoolID, job1)))
205 | assert.EqualValues(t, 0, getInt64(pool, redisKeyJobsLock(ns, job1)))
206 | assert.EqualValues(t, 0, hgetInt64(pool, redisKeyJobsLockInfo(ns, job1), wp.workerPoolID))
207 | }
208 |
209 | // Test Helpers
210 | func (t *TestContext) SleepyJob(job *Job) error {
211 | sleepTime := time.Duration(job.ArgInt64("sleep"))
212 | time.Sleep(sleepTime * time.Millisecond)
213 | return nil
214 | }
215 |
216 | func setupTestWorkerPool(pool *redis.Pool, namespace, jobName string, concurrency int, jobOpts JobOptions) *WorkerPool {
217 | deleteQueue(pool, namespace, jobName)
218 | deleteRetryAndDead(pool, namespace)
219 | deletePausedAndLockedKeys(namespace, jobName, pool)
220 |
221 | wp := NewWorkerPool(TestContext{}, uint(concurrency), namespace, pool)
222 | wp.JobWithOptions(jobName, jobOpts, (*TestContext).SleepyJob)
223 | // reset the backoff times to help with testing
224 | sleepBackoffsInMilliseconds = []int64{10, 10, 10, 10, 10}
225 | return wp
226 | }
227 |
--------------------------------------------------------------------------------
/worker.go:
--------------------------------------------------------------------------------
1 | package work
2 |
3 | import (
4 | "fmt"
5 | "math/rand"
6 | "reflect"
7 | "time"
8 |
9 | "github.com/gomodule/redigo/redis"
10 | )
11 |
12 | const fetchKeysPerJobType = 6
13 |
14 | type worker struct {
15 | workerID string
16 | poolID string
17 | namespace string
18 | pool *redis.Pool
19 | jobTypes map[string]*jobType
20 | sleepBackoffs []int64
21 | middleware []*middlewareHandler
22 | contextType reflect.Type
23 |
24 | redisFetchScript *redis.Script
25 | sampler prioritySampler
26 | *observer
27 |
28 | stopChan chan struct{}
29 | doneStoppingChan chan struct{}
30 |
31 | drainChan chan struct{}
32 | doneDrainingChan chan struct{}
33 | }
34 |
35 | func newWorker(namespace string, poolID string, pool *redis.Pool, contextType reflect.Type, middleware []*middlewareHandler, jobTypes map[string]*jobType, sleepBackoffs []int64) *worker {
36 | workerID := makeIdentifier()
37 | ob := newObserver(namespace, pool, workerID)
38 |
39 | if len(sleepBackoffs) == 0 {
40 | sleepBackoffs = sleepBackoffsInMilliseconds
41 | }
42 |
43 | w := &worker{
44 | workerID: workerID,
45 | poolID: poolID,
46 | namespace: namespace,
47 | pool: pool,
48 | contextType: contextType,
49 | sleepBackoffs: sleepBackoffs,
50 |
51 | observer: ob,
52 |
53 | stopChan: make(chan struct{}),
54 | doneStoppingChan: make(chan struct{}),
55 |
56 | drainChan: make(chan struct{}),
57 | doneDrainingChan: make(chan struct{}),
58 | }
59 |
60 | w.updateMiddlewareAndJobTypes(middleware, jobTypes)
61 |
62 | return w
63 | }
64 |
65 | // note: can't be called while the thing is started
66 | func (w *worker) updateMiddlewareAndJobTypes(middleware []*middlewareHandler, jobTypes map[string]*jobType) {
67 | w.middleware = middleware
68 | sampler := prioritySampler{}
69 | for _, jt := range jobTypes {
70 | sampler.add(jt.Priority,
71 | redisKeyJobs(w.namespace, jt.Name),
72 | redisKeyJobsInProgress(w.namespace, w.poolID, jt.Name),
73 | redisKeyJobsPaused(w.namespace, jt.Name),
74 | redisKeyJobsLock(w.namespace, jt.Name),
75 | redisKeyJobsLockInfo(w.namespace, jt.Name),
76 | redisKeyJobsConcurrency(w.namespace, jt.Name))
77 | }
78 | w.sampler = sampler
79 | w.jobTypes = jobTypes
80 | w.redisFetchScript = redis.NewScript(len(jobTypes)*fetchKeysPerJobType, redisLuaFetchJob)
81 | }
82 |
83 | func (w *worker) start() {
84 | go w.loop()
85 | go w.observer.start()
86 | }
87 |
88 | func (w *worker) stop() {
89 | w.stopChan <- struct{}{}
90 | <-w.doneStoppingChan
91 | w.observer.drain()
92 | w.observer.stop()
93 | }
94 |
95 | func (w *worker) drain() {
96 | w.drainChan <- struct{}{}
97 | <-w.doneDrainingChan
98 | w.observer.drain()
99 | }
100 |
101 | var sleepBackoffsInMilliseconds = []int64{0, 10, 100, 1000, 5000}
102 |
103 | func (w *worker) loop() {
104 | var drained bool
105 | var consequtiveNoJobs int64
106 |
107 | // Begin immediately. We'll change the duration on each tick with a timer.Reset()
108 | timer := time.NewTimer(0)
109 | defer timer.Stop()
110 |
111 | for {
112 | select {
113 | case <-w.stopChan:
114 | w.doneStoppingChan <- struct{}{}
115 | return
116 | case <-w.drainChan:
117 | drained = true
118 | timer.Reset(0)
119 | case <-timer.C:
120 | job, err := w.fetchJob()
121 | if err != nil {
122 | logError("worker.fetch", err)
123 | timer.Reset(10 * time.Millisecond)
124 | } else if job != nil {
125 | w.processJob(job)
126 | consequtiveNoJobs = 0
127 | timer.Reset(0)
128 | } else {
129 | if drained {
130 | w.doneDrainingChan <- struct{}{}
131 | drained = false
132 | }
133 | consequtiveNoJobs++
134 | idx := consequtiveNoJobs
135 | if idx >= int64(len(w.sleepBackoffs)) {
136 | idx = int64(len(w.sleepBackoffs)) - 1
137 | }
138 | timer.Reset(time.Duration(w.sleepBackoffs[idx]) * time.Millisecond)
139 | }
140 | }
141 | }
142 | }
143 |
144 | func (w *worker) fetchJob() (*Job, error) {
145 | // resort queues
146 | // NOTE: we could optimize this to only resort every second, or something.
147 | w.sampler.sample()
148 | numKeys := len(w.sampler.samples) * fetchKeysPerJobType
149 | var scriptArgs = make([]interface{}, 0, numKeys+1)
150 |
151 | for _, s := range w.sampler.samples {
152 | scriptArgs = append(scriptArgs, s.redisJobs, s.redisJobsInProg, s.redisJobsPaused, s.redisJobsLock, s.redisJobsLockInfo, s.redisJobsMaxConcurrency) // KEYS[1-6 * N]
153 | }
154 | scriptArgs = append(scriptArgs, w.poolID) // ARGV[1]
155 | conn := w.pool.Get()
156 | defer conn.Close()
157 |
158 | values, err := redis.Values(w.redisFetchScript.Do(conn, scriptArgs...))
159 | if err == redis.ErrNil {
160 | return nil, nil
161 | } else if err != nil {
162 | return nil, err
163 | }
164 |
165 | if len(values) != 3 {
166 | return nil, fmt.Errorf("need 3 elements back")
167 | }
168 |
169 | rawJSON, ok := values[0].([]byte)
170 | if !ok {
171 | return nil, fmt.Errorf("response msg not bytes")
172 | }
173 |
174 | dequeuedFrom, ok := values[1].([]byte)
175 | if !ok {
176 | return nil, fmt.Errorf("response queue not bytes")
177 | }
178 |
179 | inProgQueue, ok := values[2].([]byte)
180 | if !ok {
181 | return nil, fmt.Errorf("response in prog not bytes")
182 | }
183 |
184 | job, err := newJob(rawJSON, dequeuedFrom, inProgQueue)
185 | if err != nil {
186 | return nil, err
187 | }
188 |
189 | return job, nil
190 | }
191 |
192 | func (w *worker) processJob(job *Job) {
193 | if job.Unique {
194 | updatedJob := w.getAndDeleteUniqueJob(job)
195 | // This is to support the old way of doing it, where we used the job off the queue and just deleted the unique key
196 | // Going forward the job on the queue will always be just a placeholder, and we will be replacing it with the
197 | // updated job extracted here
198 | if updatedJob != nil {
199 | job = updatedJob
200 | }
201 | }
202 | var runErr error
203 | jt := w.jobTypes[job.Name]
204 | if jt == nil {
205 | runErr = fmt.Errorf("stray job: no handler")
206 | logError("process_job.stray", runErr)
207 | } else {
208 | w.observeStarted(job.Name, job.ID, job.Args)
209 | job.observer = w.observer // for Checkin
210 | _, runErr = runJob(job, w.contextType, w.middleware, jt)
211 | w.observeDone(job.Name, job.ID, runErr)
212 | }
213 |
214 | fate := terminateOnly
215 | if runErr != nil {
216 | job.failed(runErr)
217 | fate = w.jobFate(jt, job)
218 | }
219 | w.removeJobFromInProgress(job, fate)
220 | }
221 |
222 | func (w *worker) getAndDeleteUniqueJob(job *Job) *Job {
223 | var uniqueKey string
224 | var err error
225 |
226 | if job.UniqueKey != "" {
227 | uniqueKey = job.UniqueKey
228 | } else { // For jobs put in queue prior to this change. In the future this can be deleted as there will always be a UniqueKey
229 | uniqueKey, err = redisKeyUniqueJob(w.namespace, job.Name, job.Args)
230 | if err != nil {
231 | logError("worker.delete_unique_job.key", err)
232 | return nil
233 | }
234 | }
235 |
236 | conn := w.pool.Get()
237 | defer conn.Close()
238 |
239 | rawJSON, err := redis.Bytes(conn.Do("GET", uniqueKey))
240 | if err != nil {
241 | logError("worker.delete_unique_job.get", err)
242 | return nil
243 | }
244 |
245 | _, err = conn.Do("DEL", uniqueKey)
246 | if err != nil {
247 | logError("worker.delete_unique_job.del", err)
248 | return nil
249 | }
250 |
251 | // Previous versions did not support updated arguments and just set key to 1, so in these cases we should do nothing.
252 | // In the future this can be deleted, as we will always be getting arguments from here
253 | if string(rawJSON) == "1" {
254 | return nil
255 | }
256 |
257 | // The job pulled off the queue was just a placeholder with no args, so replace it
258 | jobWithArgs, err := newJob(rawJSON, job.dequeuedFrom, job.inProgQueue)
259 | if err != nil {
260 | logError("worker.delete_unique_job.updated_job", err)
261 | return nil
262 | }
263 |
264 | return jobWithArgs
265 | }
266 |
267 | func (w *worker) removeJobFromInProgress(job *Job, fate terminateOp) {
268 | conn := w.pool.Get()
269 | defer conn.Close()
270 |
271 | conn.Send("MULTI")
272 | conn.Send("LREM", job.inProgQueue, 1, job.rawJSON)
273 | conn.Send("DECR", redisKeyJobsLock(w.namespace, job.Name))
274 | conn.Send("HINCRBY", redisKeyJobsLockInfo(w.namespace, job.Name), w.poolID, -1)
275 | fate(conn)
276 | if _, err := conn.Do("EXEC"); err != nil {
277 | logError("worker.remove_job_from_in_progress.lrem", err)
278 | }
279 | }
280 |
281 | type terminateOp func(conn redis.Conn)
282 |
283 | func terminateOnly(_ redis.Conn) { return }
284 | func terminateAndRetry(w *worker, jt *jobType, job *Job) terminateOp {
285 | rawJSON, err := job.serialize()
286 | if err != nil {
287 | logError("worker.terminate_and_retry.serialize", err)
288 | return terminateOnly
289 | }
290 | return func(conn redis.Conn) {
291 | conn.Send("ZADD", redisKeyRetry(w.namespace), nowEpochSeconds()+jt.calcBackoff(job), rawJSON)
292 | }
293 | }
294 | func terminateAndDead(w *worker, job *Job) terminateOp {
295 | rawJSON, err := job.serialize()
296 | if err != nil {
297 | logError("worker.terminate_and_dead.serialize", err)
298 | return terminateOnly
299 | }
300 | return func(conn redis.Conn) {
301 | // NOTE: sidekiq limits the # of jobs: only keep jobs for 6 months, and only keep a max # of jobs
302 | // The max # of jobs seems really horrible. Seems like operations should be on top of it.
303 | // conn.Send("ZREMRANGEBYSCORE", redisKeyDead(w.namespace), "-inf", now - keepInterval)
304 | // conn.Send("ZREMRANGEBYRANK", redisKeyDead(w.namespace), 0, -maxJobs)
305 |
306 | conn.Send("ZADD", redisKeyDead(w.namespace), nowEpochSeconds(), rawJSON)
307 | }
308 | }
309 |
310 | func (w *worker) jobFate(jt *jobType, job *Job) terminateOp {
311 | if jt != nil {
312 | failsRemaining := int64(jt.MaxFails) - job.Fails
313 | if failsRemaining > 0 {
314 | return terminateAndRetry(w, jt, job)
315 | }
316 | if jt.SkipDead {
317 | return terminateOnly
318 | }
319 | }
320 | return terminateAndDead(w, job)
321 | }
322 |
323 | // Default algorithm returns an fastly increasing backoff counter which grows in an unbounded fashion
324 | func defaultBackoffCalculator(job *Job) int64 {
325 | fails := job.Fails
326 | return (fails * fails * fails * fails) + 15 + (rand.Int63n(30) * (fails + 1))
327 | }
328 |
--------------------------------------------------------------------------------
/dead_pool_reaper_test.go:
--------------------------------------------------------------------------------
1 | package work
2 |
3 | import (
4 | "testing"
5 | "time"
6 |
7 | "github.com/gomodule/redigo/redis"
8 | "github.com/stretchr/testify/assert"
9 | )
10 |
11 | func TestDeadPoolReaper(t *testing.T) {
12 | pool := newTestPool(":6379")
13 | ns := "work"
14 | cleanKeyspace(ns, pool)
15 |
16 | conn := pool.Get()
17 | defer conn.Close()
18 |
19 | workerPoolsKey := redisKeyWorkerPools(ns)
20 |
21 | // Create redis data
22 | var err error
23 | err = conn.Send("SADD", workerPoolsKey, "1")
24 | assert.NoError(t, err)
25 | err = conn.Send("SADD", workerPoolsKey, "2")
26 | assert.NoError(t, err)
27 | err = conn.Send("SADD", workerPoolsKey, "3")
28 | assert.NoError(t, err)
29 |
30 | err = conn.Send("HMSET", redisKeyHeartbeat(ns, "1"),
31 | "heartbeat_at", time.Now().Unix(),
32 | "job_names", "type1,type2",
33 | )
34 | assert.NoError(t, err)
35 |
36 | err = conn.Send("HMSET", redisKeyHeartbeat(ns, "2"),
37 | "heartbeat_at", time.Now().Add(-1*time.Hour).Unix(),
38 | "job_names", "type1,type2",
39 | )
40 | assert.NoError(t, err)
41 |
42 | err = conn.Send("HMSET", redisKeyHeartbeat(ns, "3"),
43 | "heartbeat_at", time.Now().Add(-1*time.Hour).Unix(),
44 | "job_names", "type1,type2",
45 | )
46 | assert.NoError(t, err)
47 | err = conn.Flush()
48 | assert.NoError(t, err)
49 |
50 | // Test getting dead pool
51 | reaper := newDeadPoolReaper(ns, pool, []string{})
52 | deadPools, err := reaper.findDeadPools()
53 | assert.NoError(t, err)
54 | assert.Equal(t, map[string][]string{"2": {"type1", "type2"}, "3": {"type1", "type2"}}, deadPools)
55 |
56 | // Test requeueing jobs
57 | _, err = conn.Do("lpush", redisKeyJobsInProgress(ns, "2", "type1"), "foo")
58 | assert.NoError(t, err)
59 | _, err = conn.Do("incr", redisKeyJobsLock(ns, "type1"))
60 | assert.NoError(t, err)
61 | _, err = conn.Do("hincrby", redisKeyJobsLockInfo(ns, "type1"), "2", 1) // worker pool 2 has lock
62 | assert.NoError(t, err)
63 |
64 | // Ensure 0 jobs in jobs queue
65 | jobsCount, err := redis.Int(conn.Do("llen", redisKeyJobs(ns, "type1")))
66 | assert.NoError(t, err)
67 | assert.Equal(t, 0, jobsCount)
68 |
69 | // Ensure 1 job in inprogress queue
70 | jobsCount, err = redis.Int(conn.Do("llen", redisKeyJobsInProgress(ns, "2", "type1")))
71 | assert.NoError(t, err)
72 | assert.Equal(t, 1, jobsCount)
73 |
74 | // Reap
75 | err = reaper.reap()
76 | assert.NoError(t, err)
77 |
78 | // Ensure 1 jobs in jobs queue
79 | jobsCount, err = redis.Int(conn.Do("llen", redisKeyJobs(ns, "type1")))
80 | assert.NoError(t, err)
81 | assert.Equal(t, 1, jobsCount)
82 |
83 | // Ensure 0 job in inprogress queue
84 | jobsCount, err = redis.Int(conn.Do("llen", redisKeyJobsInProgress(ns, "2", "type1")))
85 | assert.NoError(t, err)
86 | assert.Equal(t, 0, jobsCount)
87 |
88 | // Locks should get cleaned up
89 | assert.EqualValues(t, 0, getInt64(pool, redisKeyJobsLock(ns, "type1")))
90 | v, _ := conn.Do("HGET", redisKeyJobsLockInfo(ns, "type1"), "2")
91 | assert.Nil(t, v)
92 | }
93 |
94 | func TestDeadPoolReaperNoHeartbeat(t *testing.T) {
95 | pool := newTestPool(":6379")
96 | ns := "work"
97 |
98 | conn := pool.Get()
99 | defer conn.Close()
100 |
101 | workerPoolsKey := redisKeyWorkerPools(ns)
102 |
103 | // Create redis data
104 | var err error
105 | cleanKeyspace(ns, pool)
106 | err = conn.Send("SADD", workerPoolsKey, "1")
107 | assert.NoError(t, err)
108 | err = conn.Send("SADD", workerPoolsKey, "2")
109 | assert.NoError(t, err)
110 | err = conn.Send("SADD", workerPoolsKey, "3")
111 | assert.NoError(t, err)
112 | // stale lock info
113 | err = conn.Send("SET", redisKeyJobsLock(ns, "type1"), 3)
114 | assert.NoError(t, err)
115 | err = conn.Send("HSET", redisKeyJobsLockInfo(ns, "type1"), "1", 1)
116 | assert.NoError(t, err)
117 | err = conn.Send("HSET", redisKeyJobsLockInfo(ns, "type1"), "2", 1)
118 | assert.NoError(t, err)
119 | err = conn.Send("HSET", redisKeyJobsLockInfo(ns, "type1"), "3", 1)
120 | assert.NoError(t, err)
121 | err = conn.Flush()
122 | assert.NoError(t, err)
123 |
124 | // make sure test data was created
125 | numPools, err := redis.Int(conn.Do("scard", workerPoolsKey))
126 | assert.NoError(t, err)
127 | assert.EqualValues(t, 3, numPools)
128 |
129 | // Test getting dead pool ids
130 | reaper := newDeadPoolReaper(ns, pool, []string{"type1"})
131 | deadPools, err := reaper.findDeadPools()
132 | assert.NoError(t, err)
133 | assert.Equal(t, map[string][]string{"1": {}, "2": {}, "3": {}}, deadPools)
134 |
135 | // Test requeueing jobs
136 | _, err = conn.Do("lpush", redisKeyJobsInProgress(ns, "2", "type1"), "foo")
137 | assert.NoError(t, err)
138 |
139 | // Ensure 0 jobs in jobs queue
140 | jobsCount, err := redis.Int(conn.Do("llen", redisKeyJobs(ns, "type1")))
141 | assert.NoError(t, err)
142 | assert.Equal(t, 0, jobsCount)
143 |
144 | // Ensure 1 job in inprogress queue
145 | jobsCount, err = redis.Int(conn.Do("llen", redisKeyJobsInProgress(ns, "2", "type1")))
146 | assert.NoError(t, err)
147 | assert.Equal(t, 1, jobsCount)
148 |
149 | // Ensure dead worker pools still in the set
150 | jobsCount, err = redis.Int(conn.Do("scard", redisKeyWorkerPools(ns)))
151 | assert.NoError(t, err)
152 | assert.Equal(t, 3, jobsCount)
153 |
154 | // Reap
155 | err = reaper.reap()
156 | assert.NoError(t, err)
157 |
158 | // Ensure jobs queue was not altered
159 | jobsCount, err = redis.Int(conn.Do("llen", redisKeyJobs(ns, "type1")))
160 | assert.NoError(t, err)
161 | assert.Equal(t, 0, jobsCount)
162 |
163 | // Ensure inprogress queue was not altered
164 | jobsCount, err = redis.Int(conn.Do("llen", redisKeyJobsInProgress(ns, "2", "type1")))
165 | assert.NoError(t, err)
166 | assert.Equal(t, 1, jobsCount)
167 |
168 | // Ensure dead worker pools were removed from the set
169 | jobsCount, err = redis.Int(conn.Do("scard", redisKeyWorkerPools(ns)))
170 | assert.NoError(t, err)
171 | assert.Equal(t, 0, jobsCount)
172 |
173 | // Stale lock info was cleaned up using reap.curJobTypes
174 | assert.EqualValues(t, 0, getInt64(pool, redisKeyJobsLock(ns, "type1")))
175 | for _, poolID := range []string{"1", "2", "3"} {
176 | v, _ := conn.Do("HGET", redisKeyJobsLockInfo(ns, "type1"), poolID)
177 | assert.Nil(t, v)
178 | }
179 | }
180 |
181 | func TestDeadPoolReaperNoJobTypes(t *testing.T) {
182 | pool := newTestPool(":6379")
183 | ns := "work"
184 | cleanKeyspace(ns, pool)
185 |
186 | conn := pool.Get()
187 | defer conn.Close()
188 |
189 | workerPoolsKey := redisKeyWorkerPools(ns)
190 |
191 | // Create redis data
192 | var err error
193 | err = conn.Send("SADD", workerPoolsKey, "1")
194 | assert.NoError(t, err)
195 | err = conn.Send("SADD", workerPoolsKey, "2")
196 | assert.NoError(t, err)
197 |
198 | err = conn.Send("HMSET", redisKeyHeartbeat(ns, "1"),
199 | "heartbeat_at", time.Now().Add(-1*time.Hour).Unix(),
200 | )
201 | assert.NoError(t, err)
202 |
203 | err = conn.Send("HMSET", redisKeyHeartbeat(ns, "2"),
204 | "heartbeat_at", time.Now().Add(-1*time.Hour).Unix(),
205 | "job_names", "type1,type2",
206 | )
207 | assert.NoError(t, err)
208 |
209 | err = conn.Flush()
210 | assert.NoError(t, err)
211 |
212 | // Test getting dead pool
213 | reaper := newDeadPoolReaper(ns, pool, []string{})
214 | deadPools, err := reaper.findDeadPools()
215 | assert.NoError(t, err)
216 | assert.Equal(t, map[string][]string{"2": {"type1", "type2"}}, deadPools)
217 |
218 | // Test requeueing jobs
219 | _, err = conn.Do("lpush", redisKeyJobsInProgress(ns, "1", "type1"), "foo")
220 | assert.NoError(t, err)
221 | _, err = conn.Do("lpush", redisKeyJobsInProgress(ns, "2", "type1"), "foo")
222 | assert.NoError(t, err)
223 |
224 | // Ensure 0 jobs in jobs queue
225 | jobsCount, err := redis.Int(conn.Do("llen", redisKeyJobs(ns, "type1")))
226 | assert.NoError(t, err)
227 | assert.Equal(t, 0, jobsCount)
228 |
229 | // Ensure 1 job in inprogress queue for each job
230 | jobsCount, err = redis.Int(conn.Do("llen", redisKeyJobsInProgress(ns, "1", "type1")))
231 | assert.NoError(t, err)
232 | assert.Equal(t, 1, jobsCount)
233 | jobsCount, err = redis.Int(conn.Do("llen", redisKeyJobsInProgress(ns, "2", "type1")))
234 | assert.NoError(t, err)
235 | assert.Equal(t, 1, jobsCount)
236 |
237 | // Reap. Ensure job 2 is requeued but not job 1
238 | err = reaper.reap()
239 | assert.NoError(t, err)
240 |
241 | // Ensure 1 jobs in jobs queue
242 | jobsCount, err = redis.Int(conn.Do("llen", redisKeyJobs(ns, "type1")))
243 | assert.NoError(t, err)
244 | assert.Equal(t, 1, jobsCount)
245 |
246 | // Ensure 1 job in inprogress queue for 1
247 | jobsCount, err = redis.Int(conn.Do("llen", redisKeyJobsInProgress(ns, "1", "type1")))
248 | assert.NoError(t, err)
249 | assert.Equal(t, 1, jobsCount)
250 |
251 | // Ensure 0 jobs in inprogress queue for 2
252 | jobsCount, err = redis.Int(conn.Do("llen", redisKeyJobsInProgress(ns, "2", "type1")))
253 | assert.NoError(t, err)
254 | assert.Equal(t, 0, jobsCount)
255 | }
256 |
257 | func TestDeadPoolReaperWithWorkerPools(t *testing.T) {
258 | pool := newTestPool(":6379")
259 | ns := "work"
260 | job1 := "job1"
261 | stalePoolID := "aaa"
262 | cleanKeyspace(ns, pool)
263 | // test vars
264 | expectedDeadTime := 5 * time.Millisecond
265 |
266 | // create a stale job with a heartbeat
267 | conn := pool.Get()
268 | defer conn.Close()
269 | _, err := conn.Do("SADD", redisKeyWorkerPools(ns), stalePoolID)
270 | assert.NoError(t, err)
271 | _, err = conn.Do("LPUSH", redisKeyJobsInProgress(ns, stalePoolID, job1), `{"sleep": 10}`)
272 | assert.NoError(t, err)
273 | jobTypes := map[string]*jobType{"job1": nil}
274 | staleHeart := newWorkerPoolHeartbeater(ns, pool, stalePoolID, jobTypes, 1, []string{"id1"})
275 | staleHeart.start()
276 |
277 | // should have 1 stale job and empty job queue
278 | assert.EqualValues(t, 1, listSize(pool, redisKeyJobsInProgress(ns, stalePoolID, job1)))
279 | assert.EqualValues(t, 0, listSize(pool, redisKeyJobs(ns, job1)))
280 |
281 | // setup a worker pool and start the reaper, which should restart the stale job above
282 | wp := setupTestWorkerPool(pool, ns, job1, 1, JobOptions{Priority: 1})
283 | wp.deadPoolReaper = newDeadPoolReaper(wp.namespace, wp.pool, []string{"job1"})
284 | wp.deadPoolReaper.deadTime = expectedDeadTime
285 | wp.deadPoolReaper.start()
286 |
287 | // sleep long enough for staleJob to be considered dead
288 | time.Sleep(expectedDeadTime * 2)
289 |
290 | // now we should have 1 job in queue and no more stale jobs
291 | assert.EqualValues(t, 1, listSize(pool, redisKeyJobs(ns, job1)))
292 | assert.EqualValues(t, 0, listSize(pool, redisKeyJobsInProgress(ns, wp.workerPoolID, job1)))
293 | staleHeart.stop()
294 | wp.deadPoolReaper.stop()
295 | }
296 |
297 | func TestDeadPoolReaperCleanStaleLocks(t *testing.T) {
298 | pool := newTestPool(":6379")
299 | ns := "work"
300 | cleanKeyspace(ns, pool)
301 |
302 | conn := pool.Get()
303 | defer conn.Close()
304 | job1, job2 := "type1", "type2"
305 | jobNames := []string{job1, job2}
306 | workerPoolID1, workerPoolID2 := "1", "2"
307 | lock1 := redisKeyJobsLock(ns, job1)
308 | lock2 := redisKeyJobsLock(ns, job2)
309 | lockInfo1 := redisKeyJobsLockInfo(ns, job1)
310 | lockInfo2 := redisKeyJobsLockInfo(ns, job2)
311 |
312 | // Create redis data
313 | var err error
314 | err = conn.Send("SET", lock1, 3)
315 | assert.NoError(t, err)
316 | err = conn.Send("SET", lock2, 1)
317 | assert.NoError(t, err)
318 | err = conn.Send("HSET", lockInfo1, workerPoolID1, 1) // workerPoolID1 holds 1 lock on job1
319 | assert.NoError(t, err)
320 | err = conn.Send("HSET", lockInfo1, workerPoolID2, 2) // workerPoolID2 holds 2 locks on job1
321 | assert.NoError(t, err)
322 | err = conn.Send("HSET", lockInfo2, workerPoolID2, 2) // test that we don't go below 0 on job2 lock
323 | assert.NoError(t, err)
324 | err = conn.Flush()
325 | assert.NoError(t, err)
326 |
327 | reaper := newDeadPoolReaper(ns, pool, jobNames)
328 | // clean lock info for workerPoolID1
329 | reaper.cleanStaleLockInfo(workerPoolID1, jobNames)
330 | assert.NoError(t, err)
331 | assert.EqualValues(t, 2, getInt64(pool, lock1)) // job1 lock should be decr by 1
332 | assert.EqualValues(t, 1, getInt64(pool, lock2)) // job2 lock is unchanged
333 | v, _ := conn.Do("HGET", lockInfo1, workerPoolID1) // workerPoolID1 removed from job1's lock info
334 | assert.Nil(t, v)
335 |
336 | // now clean lock info for workerPoolID2
337 | reaper.cleanStaleLockInfo(workerPoolID2, jobNames)
338 | assert.NoError(t, err)
339 | // both locks should be at 0
340 | assert.EqualValues(t, 0, getInt64(pool, lock1))
341 | assert.EqualValues(t, 0, getInt64(pool, lock2))
342 | // worker pool ID 2 removed from both lock info hashes
343 | v, err = conn.Do("HGET", lockInfo1, workerPoolID2)
344 | assert.Nil(t, v)
345 | v, err = conn.Do("HGET", lockInfo2, workerPoolID2)
346 | assert.Nil(t, v)
347 | }
348 |
--------------------------------------------------------------------------------
/redis.go:
--------------------------------------------------------------------------------
1 | package work
2 |
3 | import (
4 | "bytes"
5 | "encoding/json"
6 | "fmt"
7 | )
8 |
9 | func redisNamespacePrefix(namespace string) string {
10 | l := len(namespace)
11 | if (l > 0) && (namespace[l-1] != ':') {
12 | namespace = namespace + ":"
13 | }
14 | return namespace
15 | }
16 |
17 | func redisKeyKnownJobs(namespace string) string {
18 | return redisNamespacePrefix(namespace) + "known_jobs"
19 | }
20 |
21 | // returns ":jobs:"
22 | // so that we can just append the job name and be good to go
23 | func redisKeyJobsPrefix(namespace string) string {
24 | return redisNamespacePrefix(namespace) + "jobs:"
25 | }
26 |
27 | func redisKeyJobs(namespace, jobName string) string {
28 | return redisKeyJobsPrefix(namespace) + jobName
29 | }
30 |
31 | func redisKeyJobsInProgress(namespace, poolID, jobName string) string {
32 | return fmt.Sprintf("%s:%s:inprogress", redisKeyJobs(namespace, jobName), poolID)
33 | }
34 |
35 | func redisKeyRetry(namespace string) string {
36 | return redisNamespacePrefix(namespace) + "retry"
37 | }
38 |
39 | func redisKeyDead(namespace string) string {
40 | return redisNamespacePrefix(namespace) + "dead"
41 | }
42 |
43 | func redisKeyScheduled(namespace string) string {
44 | return redisNamespacePrefix(namespace) + "scheduled"
45 | }
46 |
47 | func redisKeyWorkerObservation(namespace, workerID string) string {
48 | return redisNamespacePrefix(namespace) + "worker:" + workerID
49 | }
50 |
51 | func redisKeyWorkerPools(namespace string) string {
52 | return redisNamespacePrefix(namespace) + "worker_pools"
53 | }
54 |
55 | func redisKeyHeartbeat(namespace, workerPoolID string) string {
56 | return redisNamespacePrefix(namespace) + "worker_pools:" + workerPoolID
57 | }
58 |
59 | func redisKeyJobsPaused(namespace, jobName string) string {
60 | return redisKeyJobs(namespace, jobName) + ":paused"
61 | }
62 |
63 | func redisKeyJobsLock(namespace, jobName string) string {
64 | return redisKeyJobs(namespace, jobName) + ":lock"
65 | }
66 |
67 | func redisKeyJobsLockInfo(namespace, jobName string) string {
68 | return redisKeyJobs(namespace, jobName) + ":lock_info"
69 | }
70 |
71 | func redisKeyJobsConcurrency(namespace, jobName string) string {
72 | return redisKeyJobs(namespace, jobName) + ":max_concurrency"
73 | }
74 |
75 | func redisKeyUniqueJob(namespace, jobName string, args map[string]interface{}) (string, error) {
76 | var buf bytes.Buffer
77 |
78 | buf.WriteString(redisNamespacePrefix(namespace))
79 | buf.WriteString("unique:")
80 | buf.WriteString(jobName)
81 | buf.WriteRune(':')
82 |
83 | if args != nil {
84 | err := json.NewEncoder(&buf).Encode(args)
85 | if err != nil {
86 | return "", err
87 | }
88 | }
89 |
90 | return buf.String(), nil
91 | }
92 |
93 | func redisKeyLastPeriodicEnqueue(namespace string) string {
94 | return redisNamespacePrefix(namespace) + "last_periodic_enqueue"
95 | }
96 |
97 | // Used to fetch the next job to run
98 | //
99 | // KEYS[1] = the 1st job queue we want to try, eg, "work:jobs:emails"
100 | // KEYS[2] = the 1st job queue's in prog queue, eg, "work:jobs:emails:97c84119d13cb54119a38743:inprogress"
101 | // KEYS[3] = the 2nd job queue...
102 | // KEYS[4] = the 2nd job queue's in prog queue...
103 | // ...
104 | // KEYS[N] = the last job queue...
105 | // KEYS[N+1] = the last job queue's in prog queue...
106 | // ARGV[1] = job queue's workerPoolID
107 | var redisLuaFetchJob = fmt.Sprintf(`
108 | local function acquireLock(lockKey, lockInfoKey, workerPoolID)
109 | redis.call('incr', lockKey)
110 | redis.call('hincrby', lockInfoKey, workerPoolID, 1)
111 | end
112 |
113 | local function haveJobs(jobQueue)
114 | return redis.call('llen', jobQueue) > 0
115 | end
116 |
117 | local function isPaused(pauseKey)
118 | return redis.call('get', pauseKey)
119 | end
120 |
121 | local function canRun(lockKey, maxConcurrency)
122 | local activeJobs = tonumber(redis.call('get', lockKey))
123 | if (not maxConcurrency or maxConcurrency == 0) or (not activeJobs or activeJobs < maxConcurrency) then
124 | -- default case: maxConcurrency not defined or set to 0 means no cap on concurrent jobs OR
125 | -- maxConcurrency set, but lock does not yet exist OR
126 | -- maxConcurrency set, lock is set, but not yet at max concurrency
127 | return true
128 | else
129 | -- we are at max capacity for running jobs
130 | return false
131 | end
132 | end
133 |
134 | local res, jobQueue, inProgQueue, pauseKey, lockKey, maxConcurrency, workerPoolID, concurrencyKey, lockInfoKey
135 | local keylen = #KEYS
136 | workerPoolID = ARGV[1]
137 |
138 | for i=1,keylen,%d do
139 | jobQueue = KEYS[i]
140 | inProgQueue = KEYS[i+1]
141 | pauseKey = KEYS[i+2]
142 | lockKey = KEYS[i+3]
143 | lockInfoKey = KEYS[i+4]
144 | concurrencyKey = KEYS[i+5]
145 |
146 | maxConcurrency = tonumber(redis.call('get', concurrencyKey))
147 |
148 | if haveJobs(jobQueue) and not isPaused(pauseKey) and canRun(lockKey, maxConcurrency) then
149 | acquireLock(lockKey, lockInfoKey, workerPoolID)
150 | res = redis.call('rpoplpush', jobQueue, inProgQueue)
151 | return {res, jobQueue, inProgQueue}
152 | end
153 | end
154 | return nil`, fetchKeysPerJobType)
155 |
156 | // Used by the reaper to re-enqueue jobs that were in progress
157 | //
158 | // KEYS[1] = the 1st job's in progress queue
159 | // KEYS[2] = the 1st job's job queue
160 | // KEYS[3] = the 2nd job's in progress queue
161 | // KEYS[4] = the 2nd job's job queue
162 | // ...
163 | // KEYS[N] = the last job's in progress queue
164 | // KEYS[N+1] = the last job's job queue
165 | // ARGV[1] = workerPoolID for job queue
166 | var redisLuaReenqueueJob = fmt.Sprintf(`
167 | local function releaseLock(lockKey, lockInfoKey, workerPoolID)
168 | redis.call('decr', lockKey)
169 | redis.call('hincrby', lockInfoKey, workerPoolID, -1)
170 | end
171 |
172 | local keylen = #KEYS
173 | local res, jobQueue, inProgQueue, workerPoolID, lockKey, lockInfoKey
174 | workerPoolID = ARGV[1]
175 |
176 | for i=1,keylen,%d do
177 | inProgQueue = KEYS[i]
178 | jobQueue = KEYS[i+1]
179 | lockKey = KEYS[i+2]
180 | lockInfoKey = KEYS[i+3]
181 | res = redis.call('rpoplpush', inProgQueue, jobQueue)
182 | if res then
183 | releaseLock(lockKey, lockInfoKey, workerPoolID)
184 | return {res, inProgQueue, jobQueue}
185 | end
186 | end
187 | return nil`, requeueKeysPerJob)
188 |
189 | // Used by the reaper to clean up stale locks
190 | //
191 | // KEYS[1] = the 1st job's lock
192 | // KEYS[2] = the 1st job's lock info hash
193 | // KEYS[3] = the 2nd job's lock
194 | // KEYS[4] = the 2nd job's lock info hash
195 | // ...
196 | // KEYS[N] = the last job's lock
197 | // KEYS[N+1] = the last job's lock info haash
198 | // ARGV[1] = the dead worker pool id
199 | var redisLuaReapStaleLocks = `
200 | local keylen = #KEYS
201 | local lock, lockInfo, deadLockCount
202 | local deadPoolID = ARGV[1]
203 |
204 | for i=1,keylen,2 do
205 | lock = KEYS[i]
206 | lockInfo = KEYS[i+1]
207 | deadLockCount = tonumber(redis.call('hget', lockInfo, deadPoolID))
208 |
209 | if deadLockCount then
210 | redis.call('decrby', lock, deadLockCount)
211 | redis.call('hdel', lockInfo, deadPoolID)
212 |
213 | if tonumber(redis.call('get', lock)) < 0 then
214 | redis.call('set', lock, 0)
215 | end
216 | end
217 | end
218 | return nil
219 | `
220 |
221 | // KEYS[1] = zset of jobs (retry or scheduled), eg work:retry
222 | // KEYS[2] = zset of dead, eg work:dead. If we don't know the jobName of a job, we'll put it in dead.
223 | // KEYS[3...] = known job queues, eg ["work:jobs:create_watch", "work:jobs:send_email", ...]
224 | // ARGV[1] = jobs prefix, eg, "work:jobs:". We'll take that and append the job name from the JSON object in order to queue up a job
225 | // ARGV[2] = current time in epoch seconds
226 | var redisLuaZremLpushCmd = `
227 | local res, j, queue
228 | res = redis.call('zrangebyscore', KEYS[1], '-inf', ARGV[2], 'LIMIT', 0, 1)
229 | if #res > 0 then
230 | j = cjson.decode(res[1])
231 | redis.call('zrem', KEYS[1], res[1])
232 | queue = ARGV[1] .. j['name']
233 | for _,v in pairs(KEYS) do
234 | if v == queue then
235 | j['t'] = tonumber(ARGV[2])
236 | redis.call('lpush', queue, cjson.encode(j))
237 | return 'ok'
238 | end
239 | end
240 | j['err'] = 'unknown job when requeueing'
241 | j['failed_at'] = tonumber(ARGV[2])
242 | redis.call('zadd', KEYS[2], ARGV[2], cjson.encode(j))
243 | return 'dead' -- put on dead queue
244 | end
245 | return nil
246 | `
247 |
248 | // KEYS[1] = zset of (dead|scheduled|retry), eg, work:dead
249 | // ARGV[1] = died at. The z rank of the job.
250 | // ARGV[2] = job ID to requeue
251 | // Returns:
252 | // - number of jobs deleted (typically 1 or 0)
253 | // - job bytes (last job only)
254 | var redisLuaDeleteSingleCmd = `
255 | local jobs, i, j, deletedCount, jobBytes
256 | jobs = redis.call('zrangebyscore', KEYS[1], ARGV[1], ARGV[1])
257 | local jobCount = #jobs
258 | jobBytes = ''
259 | deletedCount = 0
260 | for i=1,jobCount do
261 | j = cjson.decode(jobs[i])
262 | if j['id'] == ARGV[2] then
263 | redis.call('zrem', KEYS[1], jobs[i])
264 | deletedCount = deletedCount + 1
265 | jobBytes = jobs[i]
266 | end
267 | end
268 | return {deletedCount, jobBytes}
269 | `
270 |
271 | // KEYS[1] = zset of dead jobs, eg, work:dead
272 | // KEYS[2...] = known job queues, eg ["work:jobs:create_watch", "work:jobs:send_email", ...]
273 | // ARGV[1] = jobs prefix, eg, "work:jobs:". We'll take that and append the job name from the JSON object in order to queue up a job
274 | // ARGV[2] = current time in epoch seconds
275 | // ARGV[3] = died at. The z rank of the job.
276 | // ARGV[4] = job ID to requeue
277 | // Returns: number of jobs requeued (typically 1 or 0)
278 | var redisLuaRequeueSingleDeadCmd = `
279 | local jobs, i, j, queue, found, requeuedCount
280 | jobs = redis.call('zrangebyscore', KEYS[1], ARGV[3], ARGV[3])
281 | local jobCount = #jobs
282 | requeuedCount = 0
283 | for i=1,jobCount do
284 | j = cjson.decode(jobs[i])
285 | if j['id'] == ARGV[4] then
286 | redis.call('zrem', KEYS[1], jobs[i])
287 | queue = ARGV[1] .. j['name']
288 | found = false
289 | for _,v in pairs(KEYS) do
290 | if v == queue then
291 | j['t'] = tonumber(ARGV[2])
292 | j['fails'] = nil
293 | j['failed_at'] = nil
294 | j['err'] = nil
295 | redis.call('lpush', queue, cjson.encode(j))
296 | requeuedCount = requeuedCount + 1
297 | found = true
298 | break
299 | end
300 | end
301 | if not found then
302 | j['err'] = 'unknown job when requeueing'
303 | j['failed_at'] = tonumber(ARGV[2])
304 | redis.call('zadd', KEYS[1], ARGV[2] + 5, cjson.encode(j))
305 | end
306 | end
307 | end
308 | return requeuedCount
309 | `
310 |
311 | // KEYS[1] = zset of dead jobs, eg work:dead
312 | // KEYS[2...] = known job queues, eg ["work:jobs:create_watch", "work:jobs:send_email", ...]
313 | // ARGV[1] = jobs prefix, eg, "work:jobs:". We'll take that and append the job name from the JSON object in order to queue up a job
314 | // ARGV[2] = current time in epoch seconds
315 | // ARGV[3] = max number of jobs to requeue
316 | // Returns: number of jobs requeued
317 | var redisLuaRequeueAllDeadCmd = `
318 | local jobs, i, j, queue, found, requeuedCount
319 | jobs = redis.call('zrangebyscore', KEYS[1], '-inf', ARGV[2], 'LIMIT', 0, ARGV[3])
320 | local jobCount = #jobs
321 | requeuedCount = 0
322 | for i=1,jobCount do
323 | j = cjson.decode(jobs[i])
324 | redis.call('zrem', KEYS[1], jobs[i])
325 | queue = ARGV[1] .. j['name']
326 | found = false
327 | for _,v in pairs(KEYS) do
328 | if v == queue then
329 | j['t'] = tonumber(ARGV[2])
330 | j['fails'] = nil
331 | j['failed_at'] = nil
332 | j['err'] = nil
333 | redis.call('lpush', queue, cjson.encode(j))
334 | requeuedCount = requeuedCount + 1
335 | found = true
336 | break
337 | end
338 | end
339 | if not found then
340 | j['err'] = 'unknown job when requeueing'
341 | j['failed_at'] = tonumber(ARGV[2])
342 | redis.call('zadd', KEYS[1], ARGV[2] + 5, cjson.encode(j))
343 | end
344 | end
345 | return requeuedCount
346 | `
347 |
348 | // KEYS[1] = job queue to push onto
349 | // KEYS[2] = Unique job's key. Test for existence and set if we push.
350 | // ARGV[1] = job
351 | // ARGV[2] = updated job or just a 1 if arguments don't update
352 | var redisLuaEnqueueUnique = `
353 | if redis.call('set', KEYS[2], ARGV[2], 'NX', 'EX', '86400') then
354 | redis.call('lpush', KEYS[1], ARGV[1])
355 | return 'ok'
356 | else
357 | redis.call('set', KEYS[2], ARGV[2], 'EX', '86400')
358 | end
359 | return 'dup'
360 | `
361 |
362 | // KEYS[1] = scheduled job queue
363 | // KEYS[2] = Unique job's key. Test for existence and set if we push.
364 | // ARGV[1] = job
365 | // ARGV[2] = updated job or just a 1 if arguments don't update
366 | // ARGV[3] = epoch seconds for job to be run at
367 | var redisLuaEnqueueUniqueIn = `
368 | if redis.call('set', KEYS[2], ARGV[2], 'NX', 'EX', '86400') then
369 | redis.call('zadd', KEYS[1], ARGV[3], ARGV[1])
370 | return 'ok'
371 | else
372 | redis.call('set', KEYS[2], ARGV[2], 'EX', '86400')
373 | end
374 | return 'dup'
375 | `
376 |
--------------------------------------------------------------------------------
/enqueue_test.go:
--------------------------------------------------------------------------------
1 | package work
2 |
3 | import (
4 | "fmt"
5 | "sync"
6 | "testing"
7 | "time"
8 |
9 | "github.com/stretchr/testify/assert"
10 | )
11 |
12 | func TestEnqueue(t *testing.T) {
13 | pool := newTestPool(":6379")
14 | ns := "work"
15 | cleanKeyspace(ns, pool)
16 | enqueuer := NewEnqueuer(ns, pool)
17 | job, err := enqueuer.Enqueue("wat", Q{"a": 1, "b": "cool"})
18 | assert.Nil(t, err)
19 | assert.Equal(t, "wat", job.Name)
20 | assert.True(t, len(job.ID) > 10) // Something is in it
21 | assert.True(t, job.EnqueuedAt > (time.Now().Unix()-10)) // Within 10 seconds
22 | assert.True(t, job.EnqueuedAt < (time.Now().Unix()+10)) // Within 10 seconds
23 | assert.Equal(t, "cool", job.ArgString("b"))
24 | assert.EqualValues(t, 1, job.ArgInt64("a"))
25 | assert.NoError(t, job.ArgError())
26 |
27 | // Make sure "wat" is in the known jobs
28 | assert.EqualValues(t, []string{"wat"}, knownJobs(pool, redisKeyKnownJobs(ns)))
29 |
30 | // Make sure the cache is set
31 | expiresAt := enqueuer.knownJobs["wat"]
32 | assert.True(t, expiresAt > (time.Now().Unix()+290))
33 |
34 | // Make sure the length of the queue is 1
35 | assert.EqualValues(t, 1, listSize(pool, redisKeyJobs(ns, "wat")))
36 |
37 | // Get the job
38 | j := jobOnQueue(pool, redisKeyJobs(ns, "wat"))
39 | assert.Equal(t, "wat", j.Name)
40 | assert.True(t, len(j.ID) > 10) // Something is in it
41 | assert.True(t, j.EnqueuedAt > (time.Now().Unix()-10)) // Within 10 seconds
42 | assert.True(t, j.EnqueuedAt < (time.Now().Unix()+10)) // Within 10 seconds
43 | assert.Equal(t, "cool", j.ArgString("b"))
44 | assert.EqualValues(t, 1, j.ArgInt64("a"))
45 | assert.NoError(t, j.ArgError())
46 |
47 | // Now enqueue another job, make sure that we can enqueue multiple
48 | _, err = enqueuer.Enqueue("wat", Q{"a": 1, "b": "cool"})
49 | _, err = enqueuer.Enqueue("wat", Q{"a": 1, "b": "cool"})
50 | assert.Nil(t, err)
51 | assert.EqualValues(t, 2, listSize(pool, redisKeyJobs(ns, "wat")))
52 | }
53 |
54 | func TestEnqueueIn(t *testing.T) {
55 | pool := newTestPool(":6379")
56 | ns := "work"
57 | cleanKeyspace(ns, pool)
58 | enqueuer := NewEnqueuer(ns, pool)
59 |
60 | // Set to expired value to make sure we update the set of known jobs
61 | enqueuer.knownJobs["wat"] = 4
62 |
63 | job, err := enqueuer.EnqueueIn("wat", 300, Q{"a": 1, "b": "cool"})
64 | assert.Nil(t, err)
65 | if assert.NotNil(t, job) {
66 | assert.Equal(t, "wat", job.Name)
67 | assert.True(t, len(job.ID) > 10) // Something is in it
68 | assert.True(t, job.EnqueuedAt > (time.Now().Unix()-10)) // Within 10 seconds
69 | assert.True(t, job.EnqueuedAt < (time.Now().Unix()+10)) // Within 10 seconds
70 | assert.Equal(t, "cool", job.ArgString("b"))
71 | assert.EqualValues(t, 1, job.ArgInt64("a"))
72 | assert.NoError(t, job.ArgError())
73 | assert.EqualValues(t, job.EnqueuedAt+300, job.RunAt)
74 | }
75 |
76 | // Make sure "wat" is in the known jobs
77 | assert.EqualValues(t, []string{"wat"}, knownJobs(pool, redisKeyKnownJobs(ns)))
78 |
79 | // Make sure the cache is set
80 | expiresAt := enqueuer.knownJobs["wat"]
81 | assert.True(t, expiresAt > (time.Now().Unix()+290))
82 |
83 | // Make sure the length of the scheduled job queue is 1
84 | assert.EqualValues(t, 1, zsetSize(pool, redisKeyScheduled(ns)))
85 |
86 | // Get the job
87 | score, j := jobOnZset(pool, redisKeyScheduled(ns))
88 |
89 | assert.True(t, score > time.Now().Unix()+290)
90 | assert.True(t, score <= time.Now().Unix()+300)
91 |
92 | assert.Equal(t, "wat", j.Name)
93 | assert.True(t, len(j.ID) > 10) // Something is in it
94 | assert.True(t, j.EnqueuedAt > (time.Now().Unix()-10)) // Within 10 seconds
95 | assert.True(t, j.EnqueuedAt < (time.Now().Unix()+10)) // Within 10 seconds
96 | assert.Equal(t, "cool", j.ArgString("b"))
97 | assert.EqualValues(t, 1, j.ArgInt64("a"))
98 | assert.NoError(t, j.ArgError())
99 | }
100 |
101 | func TestEnqueueUnique(t *testing.T) {
102 | pool := newTestPool(":6379")
103 | ns := "work"
104 | cleanKeyspace(ns, pool)
105 | enqueuer := NewEnqueuer(ns, pool)
106 | var mutex = &sync.Mutex{}
107 | job, err := enqueuer.EnqueueUnique("wat", Q{"a": 1, "b": "cool"})
108 | assert.NoError(t, err)
109 | if assert.NotNil(t, job) {
110 | assert.Equal(t, "wat", job.Name)
111 | assert.True(t, len(job.ID) > 10) // Something is in it
112 | assert.True(t, job.EnqueuedAt > (time.Now().Unix()-10)) // Within 10 seconds
113 | assert.True(t, job.EnqueuedAt < (time.Now().Unix()+10)) // Within 10 seconds
114 | assert.Equal(t, "cool", job.ArgString("b"))
115 | assert.EqualValues(t, 1, job.ArgInt64("a"))
116 | assert.NoError(t, job.ArgError())
117 | }
118 |
119 | job, err = enqueuer.EnqueueUnique("wat", Q{"a": 1, "b": "cool"})
120 | assert.NoError(t, err)
121 | assert.Nil(t, job)
122 |
123 | job, err = enqueuer.EnqueueUnique("wat", Q{"a": 1, "b": "coolio"})
124 | assert.NoError(t, err)
125 | assert.NotNil(t, job)
126 |
127 | job, err = enqueuer.EnqueueUnique("wat", nil)
128 | assert.NoError(t, err)
129 | assert.NotNil(t, job)
130 |
131 | job, err = enqueuer.EnqueueUnique("wat", nil)
132 | assert.NoError(t, err)
133 | assert.Nil(t, job)
134 |
135 | job, err = enqueuer.EnqueueUnique("taw", nil)
136 | assert.NoError(t, err)
137 | assert.NotNil(t, job)
138 |
139 | // Process the queues. Ensure the right number of jobs were processed
140 | var wats, taws int64
141 | wp := NewWorkerPool(TestContext{}, 3, ns, pool)
142 | wp.JobWithOptions("wat", JobOptions{Priority: 1, MaxFails: 1}, func(job *Job) error {
143 | mutex.Lock()
144 | wats++
145 | mutex.Unlock()
146 | return nil
147 | })
148 | wp.JobWithOptions("taw", JobOptions{Priority: 1, MaxFails: 1}, func(job *Job) error {
149 | mutex.Lock()
150 | taws++
151 | mutex.Unlock()
152 | return fmt.Errorf("ohno")
153 | })
154 | wp.Start()
155 | wp.Drain()
156 | wp.Stop()
157 |
158 | assert.EqualValues(t, 3, wats)
159 | assert.EqualValues(t, 1, taws)
160 |
161 | // Enqueue again. Ensure we can.
162 | job, err = enqueuer.EnqueueUnique("wat", Q{"a": 1, "b": "cool"})
163 | assert.NoError(t, err)
164 | assert.NotNil(t, job)
165 |
166 | job, err = enqueuer.EnqueueUnique("wat", Q{"a": 1, "b": "coolio"})
167 | assert.NoError(t, err)
168 | assert.NotNil(t, job)
169 |
170 | // Even though taw resulted in an error, we should still be able to re-queue it.
171 | // This could result in multiple taws enqueued at the same time in a production system.
172 | job, err = enqueuer.EnqueueUnique("taw", nil)
173 | assert.NoError(t, err)
174 | assert.NotNil(t, job)
175 | }
176 |
177 | func TestEnqueueUniqueIn(t *testing.T) {
178 | pool := newTestPool(":6379")
179 | ns := "work"
180 | cleanKeyspace(ns, pool)
181 | enqueuer := NewEnqueuer(ns, pool)
182 |
183 | // Enqueue two unique jobs -- ensure one job sticks.
184 | job, err := enqueuer.EnqueueUniqueIn("wat", 300, Q{"a": 1, "b": "cool"})
185 | assert.NoError(t, err)
186 | if assert.NotNil(t, job) {
187 | assert.Equal(t, "wat", job.Name)
188 | assert.True(t, len(job.ID) > 10) // Something is in it
189 | assert.True(t, job.EnqueuedAt > (time.Now().Unix()-10)) // Within 10 seconds
190 | assert.True(t, job.EnqueuedAt < (time.Now().Unix()+10)) // Within 10 seconds
191 | assert.Equal(t, "cool", job.ArgString("b"))
192 | assert.EqualValues(t, 1, job.ArgInt64("a"))
193 | assert.NoError(t, job.ArgError())
194 | assert.EqualValues(t, job.EnqueuedAt+300, job.RunAt)
195 | }
196 |
197 | job, err = enqueuer.EnqueueUniqueIn("wat", 10, Q{"a": 1, "b": "cool"})
198 | assert.NoError(t, err)
199 | assert.Nil(t, job)
200 |
201 | // Get the job
202 | score, j := jobOnZset(pool, redisKeyScheduled(ns))
203 |
204 | assert.True(t, score > time.Now().Unix()+290) // We don't want to overwrite the time
205 | assert.True(t, score <= time.Now().Unix()+300)
206 |
207 | assert.Equal(t, "wat", j.Name)
208 | assert.True(t, len(j.ID) > 10) // Something is in it
209 | assert.True(t, j.EnqueuedAt > (time.Now().Unix()-10)) // Within 10 seconds
210 | assert.True(t, j.EnqueuedAt < (time.Now().Unix()+10)) // Within 10 seconds
211 | assert.Equal(t, "cool", j.ArgString("b"))
212 | assert.EqualValues(t, 1, j.ArgInt64("a"))
213 | assert.NoError(t, j.ArgError())
214 | assert.True(t, j.Unique)
215 |
216 | // Now try to enqueue more stuff and ensure it
217 | job, err = enqueuer.EnqueueUniqueIn("wat", 300, Q{"a": 1, "b": "coolio"})
218 | assert.NoError(t, err)
219 | assert.NotNil(t, job)
220 |
221 | job, err = enqueuer.EnqueueUniqueIn("wat", 300, nil)
222 | assert.NoError(t, err)
223 | assert.NotNil(t, job)
224 |
225 | job, err = enqueuer.EnqueueUniqueIn("wat", 300, nil)
226 | assert.NoError(t, err)
227 | assert.Nil(t, job)
228 |
229 | job, err = enqueuer.EnqueueUniqueIn("taw", 300, nil)
230 | assert.NoError(t, err)
231 | assert.NotNil(t, job)
232 | }
233 |
234 | func TestEnqueueUniqueByKey(t *testing.T) {
235 | var arg3 string
236 | var arg4 string
237 |
238 | pool := newTestPool(":6379")
239 | ns := "work"
240 | cleanKeyspace(ns, pool)
241 | enqueuer := NewEnqueuer(ns, pool)
242 | var mutex = &sync.Mutex{}
243 | job, err := enqueuer.EnqueueUniqueByKey("wat", Q{"a": 3, "b": "foo"}, Q{"key": "123"})
244 | assert.NoError(t, err)
245 | if assert.NotNil(t, job) {
246 | assert.Equal(t, "wat", job.Name)
247 | assert.True(t, len(job.ID) > 10) // Something is in it
248 | assert.True(t, job.EnqueuedAt > (time.Now().Unix()-10)) // Within 10 seconds
249 | assert.True(t, job.EnqueuedAt < (time.Now().Unix()+10)) // Within 10 seconds
250 | assert.Equal(t, "foo", job.ArgString("b"))
251 | assert.EqualValues(t, 3, job.ArgInt64("a"))
252 | assert.NoError(t, job.ArgError())
253 | }
254 |
255 | job, err = enqueuer.EnqueueUniqueByKey("wat", Q{"a": 3, "b": "bar"}, Q{"key": "123"})
256 | assert.NoError(t, err)
257 | assert.Nil(t, job)
258 |
259 | job, err = enqueuer.EnqueueUniqueByKey("wat", Q{"a": 4, "b": "baz"}, Q{"key": "124"})
260 | assert.NoError(t, err)
261 | assert.NotNil(t, job)
262 |
263 | job, err = enqueuer.EnqueueUniqueByKey("taw", nil, Q{"key": "125"})
264 | assert.NoError(t, err)
265 | assert.NotNil(t, job)
266 |
267 | // Process the queues. Ensure the right number of jobs were processed
268 | var wats, taws int64
269 | wp := NewWorkerPool(TestContext{}, 3, ns, pool)
270 | wp.JobWithOptions("wat", JobOptions{Priority: 1, MaxFails: 1}, func(job *Job) error {
271 | mutex.Lock()
272 | argA := job.Args["a"].(float64)
273 | argB := job.Args["b"].(string)
274 | if argA == 3 {
275 | arg3 = argB
276 | }
277 | if argA == 4 {
278 | arg4 = argB
279 | }
280 |
281 | wats++
282 | mutex.Unlock()
283 | return nil
284 | })
285 | wp.JobWithOptions("taw", JobOptions{Priority: 1, MaxFails: 1}, func(job *Job) error {
286 | mutex.Lock()
287 | taws++
288 | mutex.Unlock()
289 | return fmt.Errorf("ohno")
290 | })
291 | wp.Start()
292 | wp.Drain()
293 | wp.Stop()
294 |
295 | assert.EqualValues(t, 2, wats)
296 | assert.EqualValues(t, 1, taws)
297 |
298 | // Check that arguments got updated to new value
299 | assert.EqualValues(t, "bar", arg3)
300 | assert.EqualValues(t, "baz", arg4)
301 |
302 | // Enqueue again. Ensure we can.
303 | job, err = enqueuer.EnqueueUniqueByKey("wat", Q{"a": 1, "b": "cool"}, Q{"key": "123"})
304 | assert.NoError(t, err)
305 | assert.NotNil(t, job)
306 |
307 | job, err = enqueuer.EnqueueUniqueByKey("wat", Q{"a": 1, "b": "coolio"}, Q{"key": "124"})
308 | assert.NoError(t, err)
309 | assert.NotNil(t, job)
310 |
311 | // Even though taw resulted in an error, we should still be able to re-queue it.
312 | // This could result in multiple taws enqueued at the same time in a production system.
313 | job, err = enqueuer.EnqueueUniqueByKey("taw", nil, Q{"key": "123"})
314 | assert.NoError(t, err)
315 | assert.NotNil(t, job)
316 | }
317 |
318 | func EnqueueUniqueInByKey(t *testing.T) {
319 | pool := newTestPool(":6379")
320 | ns := "work"
321 | cleanKeyspace(ns, pool)
322 | enqueuer := NewEnqueuer(ns, pool)
323 |
324 | // Enqueue two unique jobs -- ensure one job sticks.
325 | job, err := enqueuer.EnqueueUniqueInByKey("wat", 300, Q{"a": 1, "b": "cool"}, Q{"key": "123"})
326 | assert.NoError(t, err)
327 | if assert.NotNil(t, job) {
328 | assert.Equal(t, "wat", job.Name)
329 | assert.True(t, len(job.ID) > 10) // Something is in it
330 | assert.True(t, job.EnqueuedAt > (time.Now().Unix()-10)) // Within 10 seconds
331 | assert.True(t, job.EnqueuedAt < (time.Now().Unix()+10)) // Within 10 seconds
332 | assert.Equal(t, "cool", job.ArgString("b"))
333 | assert.EqualValues(t, 1, job.ArgInt64("a"))
334 | assert.NoError(t, job.ArgError())
335 | assert.EqualValues(t, job.EnqueuedAt+300, job.RunAt)
336 | }
337 |
338 | job, err = enqueuer.EnqueueUniqueInByKey("wat", 10, Q{"a": 1, "b": "cool"}, Q{"key": "123"})
339 | assert.NoError(t, err)
340 | assert.Nil(t, job)
341 |
342 | // Get the job
343 | score, j := jobOnZset(pool, redisKeyScheduled(ns))
344 |
345 | assert.True(t, score > time.Now().Unix()+290) // We don't want to overwrite the time
346 | assert.True(t, score <= time.Now().Unix()+300)
347 |
348 | assert.Equal(t, "wat", j.Name)
349 | assert.True(t, len(j.ID) > 10) // Something is in it
350 | assert.True(t, j.EnqueuedAt > (time.Now().Unix()-10)) // Within 10 seconds
351 | assert.True(t, j.EnqueuedAt < (time.Now().Unix()+10)) // Within 10 seconds
352 | assert.Equal(t, "cool", j.ArgString("b"))
353 | assert.EqualValues(t, 1, j.ArgInt64("a"))
354 | assert.NoError(t, j.ArgError())
355 | assert.True(t, j.Unique)
356 | }
357 |
--------------------------------------------------------------------------------
/webui/webui_test.go:
--------------------------------------------------------------------------------
1 | package webui
2 |
3 | import (
4 | "encoding/json"
5 | "fmt"
6 | "net/http"
7 | "net/http/httptest"
8 | "sync"
9 | "testing"
10 | "time"
11 |
12 | "github.com/gocraft/work"
13 | "github.com/gomodule/redigo/redis"
14 | "github.com/stretchr/testify/assert"
15 | )
16 |
17 | func TestWebUIStartStop(t *testing.T) {
18 | pool := newTestPool(":6379")
19 | ns := "work"
20 | cleanKeyspace(ns, pool)
21 |
22 | s := NewServer(ns, pool, ":6666")
23 | s.Start()
24 | s.Stop()
25 | }
26 |
27 | type TestContext struct{}
28 |
29 | func TestWebUIQueues(t *testing.T) {
30 | pool := newTestPool(":6379")
31 | ns := "work"
32 | cleanKeyspace(ns, pool)
33 |
34 | // Get some stuff to to show up in the jobs:
35 | enqueuer := work.NewEnqueuer(ns, pool)
36 | _, err := enqueuer.Enqueue("wat", nil)
37 | assert.NoError(t, err)
38 | enqueuer.Enqueue("foo", nil)
39 | enqueuer.Enqueue("zaz", nil)
40 |
41 | // Start a pool to work on it. It's going to work on the queues
42 | // side effect of that is knowing which jobs are avail
43 | wp := work.NewWorkerPool(TestContext{}, 10, ns, pool)
44 | wp.Job("wat", func(job *work.Job) error {
45 | return nil
46 | })
47 | wp.Job("foo", func(job *work.Job) error {
48 | return nil
49 | })
50 | wp.Job("zaz", func(job *work.Job) error {
51 | return nil
52 | })
53 | wp.Start()
54 | time.Sleep(20 * time.Millisecond)
55 | wp.Stop()
56 |
57 | // Now that we have the jobs, populate some queues
58 | enqueuer.Enqueue("wat", nil)
59 | enqueuer.Enqueue("wat", nil)
60 | enqueuer.Enqueue("wat", nil)
61 | enqueuer.Enqueue("foo", nil)
62 | enqueuer.Enqueue("foo", nil)
63 | enqueuer.Enqueue("zaz", nil)
64 |
65 | s := NewServer(ns, pool, ":6666")
66 |
67 | recorder := httptest.NewRecorder()
68 | request, _ := http.NewRequest("GET", "/queues", nil)
69 | s.router.ServeHTTP(recorder, request)
70 | assert.Equal(t, 200, recorder.Code)
71 |
72 | var res []interface{}
73 | err = json.Unmarshal(recorder.Body.Bytes(), &res)
74 | assert.NoError(t, err)
75 |
76 | assert.Equal(t, 3, len(res))
77 |
78 | foomap, ok := res[0].(map[string]interface{})
79 | assert.True(t, ok)
80 | assert.Equal(t, "foo", foomap["job_name"])
81 | assert.EqualValues(t, 2, foomap["count"])
82 | assert.EqualValues(t, 0, foomap["latency"])
83 | }
84 |
85 | func TestWebUIWorkerPools(t *testing.T) {
86 | pool := newTestPool(":6379")
87 | ns := "work"
88 | cleanKeyspace(ns, pool)
89 |
90 | wp := work.NewWorkerPool(TestContext{}, 10, ns, pool)
91 | wp.Job("wat", func(job *work.Job) error { return nil })
92 | wp.Job("bob", func(job *work.Job) error { return nil })
93 | wp.Start()
94 | defer wp.Stop()
95 |
96 | wp2 := work.NewWorkerPool(TestContext{}, 11, ns, pool)
97 | wp2.Job("foo", func(job *work.Job) error { return nil })
98 | wp2.Job("bar", func(job *work.Job) error { return nil })
99 | wp2.Start()
100 | defer wp2.Stop()
101 |
102 | time.Sleep(20 * time.Millisecond)
103 |
104 | s := NewServer(ns, pool, ":6666")
105 |
106 | recorder := httptest.NewRecorder()
107 | request, _ := http.NewRequest("GET", "/worker_pools", nil)
108 | s.router.ServeHTTP(recorder, request)
109 | assert.Equal(t, 200, recorder.Code)
110 |
111 | var res []interface{}
112 | err := json.Unmarshal(recorder.Body.Bytes(), &res)
113 | assert.NoError(t, err)
114 |
115 | assert.Equal(t, 2, len(res))
116 |
117 | w1stat, ok := res[0].(map[string]interface{})
118 | assert.True(t, ok)
119 | assert.True(t, w1stat["worker_pool_id"] != "")
120 | // NOTE: WorkerPoolStatus is tested elsewhere.
121 | }
122 |
123 | func TestWebUIBusyWorkers(t *testing.T) {
124 | pool := newTestPool(":6379")
125 | ns := "work"
126 | cleanKeyspace(ns, pool)
127 |
128 | // Keep a job in the in-progress state without using sleeps
129 | wgroup := sync.WaitGroup{}
130 | wgroup2 := sync.WaitGroup{}
131 | wgroup2.Add(1)
132 |
133 | wp := work.NewWorkerPool(TestContext{}, 10, ns, pool)
134 | wp.Job("wat", func(job *work.Job) error {
135 | wgroup2.Done()
136 | wgroup.Wait()
137 | return nil
138 | })
139 | wp.Start()
140 | defer wp.Stop()
141 |
142 | wp2 := work.NewWorkerPool(TestContext{}, 11, ns, pool)
143 | wp2.Start()
144 | defer wp2.Stop()
145 |
146 | time.Sleep(10 * time.Millisecond)
147 |
148 | s := NewServer(ns, pool, ":6666")
149 |
150 | recorder := httptest.NewRecorder()
151 | request, _ := http.NewRequest("GET", "/busy_workers", nil)
152 | s.router.ServeHTTP(recorder, request)
153 | assert.Equal(t, 200, recorder.Code)
154 |
155 | var res []interface{}
156 | err := json.Unmarshal(recorder.Body.Bytes(), &res)
157 | assert.NoError(t, err)
158 | assert.Equal(t, 0, len(res))
159 |
160 | wgroup.Add(1)
161 |
162 | // Ok, now let's make a busy worker
163 | enqueuer := work.NewEnqueuer(ns, pool)
164 | enqueuer.Enqueue("wat", nil)
165 | wgroup2.Wait()
166 | time.Sleep(5 * time.Millisecond) // need to let obsever process
167 |
168 | recorder = httptest.NewRecorder()
169 | request, _ = http.NewRequest("GET", "/busy_workers", nil)
170 | s.router.ServeHTTP(recorder, request)
171 | wgroup.Done()
172 | assert.Equal(t, 200, recorder.Code)
173 | err = json.Unmarshal(recorder.Body.Bytes(), &res)
174 | assert.NoError(t, err)
175 | assert.Equal(t, 1, len(res))
176 |
177 | if len(res) == 1 {
178 | hash, ok := res[0].(map[string]interface{})
179 | assert.True(t, ok)
180 | assert.Equal(t, "wat", hash["job_name"])
181 | assert.Equal(t, true, hash["is_busy"])
182 | }
183 | }
184 |
185 | func TestWebUIRetryJobs(t *testing.T) {
186 | pool := newTestPool(":6379")
187 | ns := "work"
188 | cleanKeyspace(ns, pool)
189 |
190 | enqueuer := work.NewEnqueuer(ns, pool)
191 | _, err := enqueuer.Enqueue("wat", nil)
192 | assert.Nil(t, err)
193 |
194 | wp := work.NewWorkerPool(TestContext{}, 2, ns, pool)
195 | wp.Job("wat", func(job *work.Job) error {
196 | return fmt.Errorf("ohno")
197 | })
198 | wp.Start()
199 | wp.Drain()
200 | wp.Stop()
201 |
202 | s := NewServer(ns, pool, ":6666")
203 |
204 | recorder := httptest.NewRecorder()
205 | request, _ := http.NewRequest("GET", "/retry_jobs", nil)
206 | s.router.ServeHTTP(recorder, request)
207 | assert.Equal(t, 200, recorder.Code)
208 | var res struct {
209 | Count int64 `json:"count"`
210 | Jobs []struct {
211 | RetryAt int64 `json:"retry_at"`
212 | Name string `json:"name"`
213 | Fails int64 `json:"fails"`
214 | } `json:"jobs"`
215 | }
216 | err = json.Unmarshal(recorder.Body.Bytes(), &res)
217 | assert.NoError(t, err)
218 |
219 | assert.EqualValues(t, 1, res.Count)
220 | assert.Equal(t, 1, len(res.Jobs))
221 | if len(res.Jobs) == 1 {
222 | assert.True(t, res.Jobs[0].RetryAt > 0)
223 | assert.Equal(t, "wat", res.Jobs[0].Name)
224 | assert.EqualValues(t, 1, res.Jobs[0].Fails)
225 | }
226 | }
227 |
228 | func TestWebUIScheduledJobs(t *testing.T) {
229 | pool := newTestPool(":6379")
230 | ns := "testwork"
231 | cleanKeyspace(ns, pool)
232 |
233 | enqueuer := work.NewEnqueuer(ns, pool)
234 | _, err := enqueuer.EnqueueIn("watter", 1, nil)
235 | assert.Nil(t, err)
236 |
237 | s := NewServer(ns, pool, ":6666")
238 |
239 | recorder := httptest.NewRecorder()
240 | request, _ := http.NewRequest("GET", "/scheduled_jobs", nil)
241 | s.router.ServeHTTP(recorder, request)
242 | assert.Equal(t, 200, recorder.Code)
243 | var res struct {
244 | Count int64 `json:"count"`
245 | Jobs []struct {
246 | RunAt int64 `json:"run_at"`
247 | Name string `json:"name"`
248 | } `json:"jobs"`
249 | }
250 | err = json.Unmarshal(recorder.Body.Bytes(), &res)
251 | assert.NoError(t, err)
252 |
253 | assert.EqualValues(t, 1, res.Count)
254 | assert.Equal(t, 1, len(res.Jobs))
255 | if len(res.Jobs) == 1 {
256 | assert.True(t, res.Jobs[0].RunAt > 0)
257 | assert.Equal(t, "watter", res.Jobs[0].Name)
258 | }
259 | }
260 |
261 | func TestWebUIDeadJobs(t *testing.T) {
262 | pool := newTestPool(":6379")
263 | ns := "testwork"
264 | cleanKeyspace(ns, pool)
265 |
266 | enqueuer := work.NewEnqueuer(ns, pool)
267 | _, err := enqueuer.Enqueue("wat", nil)
268 | _, err = enqueuer.Enqueue("wat", nil)
269 | assert.Nil(t, err)
270 |
271 | wp := work.NewWorkerPool(TestContext{}, 2, ns, pool)
272 | wp.JobWithOptions("wat", work.JobOptions{Priority: 1, MaxFails: 1}, func(job *work.Job) error {
273 | return fmt.Errorf("ohno")
274 | })
275 | wp.Start()
276 | wp.Drain()
277 | wp.Stop()
278 |
279 | s := NewServer(ns, pool, ":6666")
280 |
281 | recorder := httptest.NewRecorder()
282 | request, _ := http.NewRequest("GET", "/dead_jobs", nil)
283 | s.router.ServeHTTP(recorder, request)
284 | assert.Equal(t, 200, recorder.Code)
285 | var res struct {
286 | Count int64 `json:"count"`
287 | Jobs []struct {
288 | DiedAt int64 `json:"died_at"`
289 | Name string `json:"name"`
290 | ID string `json:"id"`
291 | Fails int64 `json:"fails"`
292 | } `json:"jobs"`
293 | }
294 | err = json.Unmarshal(recorder.Body.Bytes(), &res)
295 | assert.NoError(t, err)
296 |
297 | assert.EqualValues(t, 2, res.Count)
298 | assert.Equal(t, 2, len(res.Jobs))
299 | var diedAt0, diedAt1 int64
300 | var id0, id1 string
301 | if len(res.Jobs) == 2 {
302 | assert.True(t, res.Jobs[0].DiedAt > 0)
303 | assert.Equal(t, "wat", res.Jobs[0].Name)
304 | assert.EqualValues(t, 1, res.Jobs[0].Fails)
305 |
306 | diedAt0, diedAt1 = res.Jobs[0].DiedAt, res.Jobs[1].DiedAt
307 | id0, id1 = res.Jobs[0].ID, res.Jobs[1].ID
308 | } else {
309 | return
310 | }
311 |
312 | // Ok, now let's retry one and delete one.
313 | recorder = httptest.NewRecorder()
314 | request, _ = http.NewRequest("POST", fmt.Sprintf("/delete_dead_job/%d/%s", diedAt0, id0), nil)
315 | s.router.ServeHTTP(recorder, request)
316 | assert.Equal(t, 200, recorder.Code)
317 |
318 | recorder = httptest.NewRecorder()
319 | request, _ = http.NewRequest("POST", fmt.Sprintf("/retry_dead_job/%d/%s", diedAt1, id1), nil)
320 | s.router.ServeHTTP(recorder, request)
321 | assert.Equal(t, 200, recorder.Code)
322 |
323 | // Make sure dead queue is empty
324 | recorder = httptest.NewRecorder()
325 | request, _ = http.NewRequest("GET", "/dead_jobs", nil)
326 | s.router.ServeHTTP(recorder, request)
327 | assert.Equal(t, 200, recorder.Code)
328 | err = json.Unmarshal(recorder.Body.Bytes(), &res)
329 | assert.NoError(t, err)
330 | assert.EqualValues(t, 0, res.Count)
331 |
332 | // Make sure the "wat" queue has 1 item in it
333 | recorder = httptest.NewRecorder()
334 | request, _ = http.NewRequest("GET", "/queues", nil)
335 | s.router.ServeHTTP(recorder, request)
336 | assert.Equal(t, 200, recorder.Code)
337 | var queueRes []struct {
338 | JobName string `json:"job_name"`
339 | Count int64 `json:"count"`
340 | }
341 | err = json.Unmarshal(recorder.Body.Bytes(), &queueRes)
342 | assert.NoError(t, err)
343 | assert.Equal(t, 1, len(queueRes))
344 | if len(queueRes) == 1 {
345 | assert.Equal(t, "wat", queueRes[0].JobName)
346 | }
347 | }
348 |
349 | func TestWebUIDeadJobsDeleteRetryAll(t *testing.T) {
350 | pool := newTestPool(":6379")
351 | ns := "testwork"
352 | cleanKeyspace(ns, pool)
353 |
354 | enqueuer := work.NewEnqueuer(ns, pool)
355 | _, err := enqueuer.Enqueue("wat", nil)
356 | _, err = enqueuer.Enqueue("wat", nil)
357 | assert.Nil(t, err)
358 |
359 | wp := work.NewWorkerPool(TestContext{}, 2, ns, pool)
360 | wp.JobWithOptions("wat", work.JobOptions{Priority: 1, MaxFails: 1}, func(job *work.Job) error {
361 | return fmt.Errorf("ohno")
362 | })
363 | wp.Start()
364 | wp.Drain()
365 | wp.Stop()
366 |
367 | s := NewServer(ns, pool, ":6666")
368 |
369 | recorder := httptest.NewRecorder()
370 | request, _ := http.NewRequest("GET", "/dead_jobs", nil)
371 | s.router.ServeHTTP(recorder, request)
372 | assert.Equal(t, 200, recorder.Code)
373 | var res struct {
374 | Count int64 `json:"count"`
375 | Jobs []struct {
376 | DiedAt int64 `json:"died_at"`
377 | Name string `json:"name"`
378 | ID string `json:"id"`
379 | Fails int64 `json:"fails"`
380 | } `json:"jobs"`
381 | }
382 | err = json.Unmarshal(recorder.Body.Bytes(), &res)
383 | assert.NoError(t, err)
384 |
385 | assert.EqualValues(t, 2, res.Count)
386 | assert.Equal(t, 2, len(res.Jobs))
387 |
388 | // Ok, now let's retry all
389 | recorder = httptest.NewRecorder()
390 | request, _ = http.NewRequest("POST", "/retry_all_dead_jobs", nil)
391 | s.router.ServeHTTP(recorder, request)
392 | assert.Equal(t, 200, recorder.Code)
393 |
394 | // Make sure dead queue is empty
395 | recorder = httptest.NewRecorder()
396 | request, _ = http.NewRequest("GET", "/dead_jobs", nil)
397 | s.router.ServeHTTP(recorder, request)
398 | assert.Equal(t, 200, recorder.Code)
399 | err = json.Unmarshal(recorder.Body.Bytes(), &res)
400 | assert.NoError(t, err)
401 | assert.EqualValues(t, 0, res.Count)
402 |
403 | // Make sure the "wat" queue has 2 items in it
404 | recorder = httptest.NewRecorder()
405 | request, _ = http.NewRequest("GET", "/queues", nil)
406 | s.router.ServeHTTP(recorder, request)
407 | assert.Equal(t, 200, recorder.Code)
408 | var queueRes []struct {
409 | JobName string `json:"job_name"`
410 | Count int64 `json:"count"`
411 | }
412 | err = json.Unmarshal(recorder.Body.Bytes(), &queueRes)
413 | assert.NoError(t, err)
414 | assert.Equal(t, 1, len(queueRes))
415 | if len(queueRes) == 1 {
416 | assert.Equal(t, "wat", queueRes[0].JobName)
417 | assert.EqualValues(t, 2, queueRes[0].Count)
418 | }
419 |
420 | // Make them dead again:
421 | wp.Start()
422 | wp.Drain()
423 | wp.Stop()
424 |
425 | // Make sure we have 2 dead things again:
426 | recorder = httptest.NewRecorder()
427 | request, _ = http.NewRequest("GET", "/dead_jobs", nil)
428 | s.router.ServeHTTP(recorder, request)
429 | assert.Equal(t, 200, recorder.Code)
430 | err = json.Unmarshal(recorder.Body.Bytes(), &res)
431 | assert.NoError(t, err)
432 | assert.EqualValues(t, 2, res.Count)
433 |
434 | // Now delete them:
435 | recorder = httptest.NewRecorder()
436 | request, _ = http.NewRequest("POST", "/delete_all_dead_jobs", nil)
437 | s.router.ServeHTTP(recorder, request)
438 | assert.Equal(t, 200, recorder.Code)
439 |
440 | // Make sure dead queue is empty
441 | recorder = httptest.NewRecorder()
442 | request, _ = http.NewRequest("GET", "/dead_jobs", nil)
443 | s.router.ServeHTTP(recorder, request)
444 | assert.Equal(t, 200, recorder.Code)
445 | err = json.Unmarshal(recorder.Body.Bytes(), &res)
446 | assert.NoError(t, err)
447 | assert.EqualValues(t, 0, res.Count)
448 | }
449 |
450 | func TestWebUIAssets(t *testing.T) {
451 | pool := newTestPool(":6379")
452 | ns := "testwork"
453 | s := NewServer(ns, pool, ":6666")
454 |
455 | recorder := httptest.NewRecorder()
456 | request, _ := http.NewRequest("GET", "/", nil)
457 | s.router.ServeHTTP(recorder, request)
458 | body := string(recorder.Body.Bytes())
459 | assert.Regexp(t, "html", body)
460 |
461 | recorder = httptest.NewRecorder()
462 | request, _ = http.NewRequest("GET", "/work.js", nil)
463 | s.router.ServeHTTP(recorder, request)
464 | }
465 |
466 | func newTestPool(addr string) *redis.Pool {
467 | return &redis.Pool{
468 | MaxActive: 3,
469 | MaxIdle: 3,
470 | IdleTimeout: 240 * time.Second,
471 | Dial: func() (redis.Conn, error) {
472 | return redis.Dial("tcp", addr)
473 | },
474 | Wait: true,
475 | }
476 | }
477 |
478 | func cleanKeyspace(namespace string, pool *redis.Pool) {
479 | conn := pool.Get()
480 | defer conn.Close()
481 |
482 | keys, err := redis.Strings(conn.Do("KEYS", namespace+"*"))
483 | if err != nil {
484 | panic("could not get keys: " + err.Error())
485 | }
486 | for _, k := range keys {
487 | if _, err := conn.Do("DEL", k); err != nil {
488 | panic("could not del: " + err.Error())
489 | }
490 | }
491 | }
492 |
--------------------------------------------------------------------------------