├── .circleci └── config.yml ├── .github └── workflows │ └── golangci-lint.yml ├── .gitignore ├── .golangci.yml ├── Dockerfile ├── LICENSE ├── Makefile ├── README.md ├── api ├── api.go ├── api_test.go ├── middleware │ └── logger.go └── webui_bindata.go ├── assets └── job-list.png ├── client ├── README.md ├── client.go ├── client_test.go └── doc.go ├── cmd ├── root.go ├── run.go └── serve.go ├── deployment ├── README.md ├── automate-testing │ ├── delete.sql │ ├── example-command.sh │ └── test.sh └── systemd │ ├── README.md │ └── kala.service ├── docker-compose.yml ├── examples ├── README.md ├── bash │ └── example-shell-command.sh ├── example-kala-commands │ └── example-command.sh ├── python │ ├── basic_test.py │ ├── example_dependent_jobs.py │ └── requirements.txt └── ruby │ └── basic_test.rb ├── go.mod ├── go.sum ├── integrate └── integration_test.go ├── job ├── cache.go ├── cache_test.go ├── cache_util.go ├── caches_test.go ├── clock.go ├── clock_test.go ├── db.go ├── db_test.go ├── job.go ├── job_exclude_from_race_test.go ├── job_recurring_test.go ├── job_scheduling_test.go ├── job_test.go ├── runner.go ├── runner_test.go ├── stats.go ├── stats_test.go ├── storage │ ├── boltdb │ │ ├── boltdb.go │ │ └── boltdb_test.go │ ├── consul │ │ ├── consul.go │ │ └── consul_test.go │ ├── mongo │ │ ├── mongo.go │ │ └── mongo_test.go │ ├── mysql │ │ ├── mysql.go │ │ └── mysql_test.go │ ├── postgres │ │ ├── postgres.go │ │ ├── postgres_int_test.go │ │ └── postgres_test.go │ └── redis │ │ ├── redis.go │ │ └── redis_test.go └── test_utils.go ├── load ├── README.md ├── loadtest.js └── spiketest.js ├── main.go ├── tools.go ├── utils └── iso8601 │ ├── iso8601.go │ └── iso8601_test.go └── webui ├── android-chrome-192x192.png ├── android-chrome-512x512.png ├── apple-touch-icon.png ├── css ├── bulma.css ├── bulma.css.map ├── bulma.min.css └── loader.css ├── favicon-16x16.png ├── favicon-32x32.png ├── favicon.ico ├── index.html ├── js ├── FormData.js ├── actions.js ├── app.js ├── fetch.js ├── fontawesome.js ├── kala.js ├── promise.js ├── reef │ ├── reef.polyfills.min.js │ └── router.min.js ├── routes.js ├── store.js └── utils.js ├── logo.png └── site.webmanifest /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | jobs: 3 | build: 4 | docker: 5 | - image: cimg/go:1.17.13 6 | - image: progrium/consul 7 | command: "-server -bootstrap" 8 | - image: mongo:3.4 9 | - image: mysql:5.7 10 | environment: 11 | MYSQL_ALLOW_EMPTY_PASSWORD: 1 12 | MYSQL_DATABASE: test 13 | 14 | working_directory: /home/circleci/go/src/github.com/ajvb/kala 15 | environment: 16 | TEST_RESULTS: /tmp/test-results 17 | GO111MODULE: "on" 18 | MYSQL_DSN: root:@/test 19 | 20 | steps: 21 | - checkout 22 | - run: mkdir -p $TEST_RESULTS 23 | - run: 24 | name: Waiting for Consul to be ready 25 | command: | 26 | for i in `seq 1 10`; 27 | do 28 | nc -z localhost 8500 && echo Success && exit 0 29 | echo -n . 30 | sleep 1 31 | done 32 | echo Failed waiting for Consul && exit 1 33 | - run: 34 | name: Waiting for Mongo to be ready 35 | command: | 36 | for i in `seq 1 10`; 37 | do 38 | nc -z localhost 27017 && echo Success && exit 0 39 | echo -n . 40 | sleep 1 41 | done 42 | echo Failed waiting for Mongo && exit 1 43 | - run: 44 | name: Waiting for Mysql to be ready 45 | command: | 46 | for i in `seq 1 15`; 47 | do 48 | nc -z localhost 3306 && echo Success && exit 0 49 | echo -n . 50 | sleep 1 51 | done 52 | echo Failed waiting for Mysql && exit 1 53 | 54 | - run: 55 | name: Run unit tests 56 | command: go test -v -race ./... 57 | 58 | - run: make bin/kala 59 | 60 | - run: 61 | name: Start service 62 | command: ./bin/kala serve --jobdb=consul 63 | background: true 64 | 65 | - run: 66 | name: Validate service is working 67 | command: | 68 | sleep 5 69 | curl --retry 10 --retry-delay 1 -X POST --header "Content-Type: application/json" -d '{"command": "echo cool", "name": "test_job"}' http://localhost:8000//api/v1/job/ 70 | 71 | - store_artifacts: 72 | path: /tmp/test-results 73 | destination: raw-test-output 74 | 75 | - store_test_results: 76 | path: /tmp/test-results 77 | -------------------------------------------------------------------------------- /.github/workflows/golangci-lint.yml: -------------------------------------------------------------------------------- 1 | name: golangci-lint 2 | on: 3 | push: 4 | tags: 5 | - v* 6 | branches: 7 | - master 8 | - feature/* 9 | - fix/* 10 | pull_request: 11 | jobs: 12 | golangci: 13 | name: lint 14 | runs-on: ubuntu-latest 15 | steps: 16 | - uses: actions/setup-go@v3 17 | with: 18 | go-version: 1.17 19 | - uses: actions/checkout@v3 20 | - name: golangci-lint 21 | uses: golangci/golangci-lint-action@v3 22 | with: 23 | # Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version 24 | version: v1.47.3 25 | 26 | # Optional: working directory, useful for monorepos 27 | # working-directory: somedir 28 | 29 | # Optional: golangci-lint command line arguments. 30 | # args: --issues-exit-code=0 31 | 32 | # Optional: show only new issues if it's a pull request. The default value is `false`. 33 | # only-new-issues: true 34 | 35 | # Optional: if set to true then the all caching functionality will be complete disabled, 36 | # takes precedence over all other caching options. 37 | # skip-cache: true 38 | 39 | # Optional: if set to true then the action don't cache or restore ~/go/pkg. 40 | # skip-pkg-cache: true 41 | 42 | # Optional: if set to true then the action don't cache or restore ~/.cache/go-build. 43 | # skip-build-cache: true -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | *.db 3 | *.coverprofile 4 | .vscode/ 5 | kala 6 | vendor 7 | .idea/* 8 | .DS_Store 9 | -------------------------------------------------------------------------------- /.golangci.yml: -------------------------------------------------------------------------------- 1 | run: 2 | modules-download-mode: readonly 3 | 4 | linters-settings: 5 | 6 | gocritic: 7 | enabled-tags: 8 | - performance 9 | - style 10 | - diagnostic 11 | - experimental 12 | - opinionated 13 | disabled-checks: 14 | - regexpMust 15 | - hugeParam 16 | - commentedOutCode 17 | - paramTypeCombine 18 | - whyNoLint 19 | 20 | gofmt: 21 | simplify: false 22 | 23 | golint: 24 | min-confidence: 0.8 25 | 26 | gomnd: 27 | settings: 28 | mnd: 29 | checks: argument,case,condition,operation,return,assign 30 | ignored-numbers: ["0.0", "1.0"] # "0" and "1" are ignored by default 31 | 32 | govet: 33 | check-shadowing: true 34 | 35 | enable: [atomicalign] 36 | enable-all: false 37 | disable: [shadow] 38 | disable-all: false 39 | 40 | misspell: 41 | locale: US 42 | 43 | nolintlint: 44 | allow-unused: false 45 | allow-leading-space: true 46 | require-explanation: true 47 | allow-no-explanation: [gomnd, lll] 48 | require-specific: true 49 | 50 | linters: 51 | enable: 52 | - asciicheck 53 | - bodyclose 54 | - deadcode 55 | - dogsled 56 | - errcheck 57 | - gocognit 58 | - goconst 59 | - gocritic 60 | - gocyclo 61 | - gofmt 62 | - goimports 63 | - gomnd 64 | - goprintffuncname 65 | - gosec 66 | - gosimple 67 | - govet 68 | - ineffassign 69 | - lll 70 | - megacheck 71 | - misspell 72 | - nakedret 73 | - nestif 74 | - nolintlint 75 | - prealloc 76 | - rowserrcheck 77 | - staticcheck 78 | - structcheck 79 | - stylecheck 80 | - typecheck 81 | - unconvert 82 | - unparam 83 | - unused 84 | - varcheck 85 | - dupl 86 | 87 | disable: 88 | - funlen 89 | - maligned 90 | - scopelint # Deprecated 91 | - depguard 92 | - gochecknoglobals 93 | - gochecknoinits 94 | - godox 95 | 96 | - goerr113 97 | - golint 98 | - testpackage 99 | 100 | - whitespace 101 | - wsl 102 | - godot 103 | 104 | - interfacer 105 | 106 | 107 | issues: 108 | exclude: 109 | 110 | exclude-rules: 111 | 112 | # Exclude some linters from running on tests files. 113 | - path: (_test\.go|test_utils\.go) 114 | linters: 115 | - gocyclo 116 | - errcheck 117 | - dupl 118 | - gosec 119 | - goconst 120 | - funlen 121 | - gochecknoglobals 122 | - gocognit 123 | - nestif 124 | - lll 125 | - bodyclose 126 | - gomnd 127 | 128 | - linters: 129 | - stylecheck 130 | text: "ST(1003|1005|1000|1021|1020|1022):" 131 | 132 | - linters: 133 | - lll 134 | source: "(//|Errorf|Printf|logf|Fatalf|Flags|Wrapf|Newf|Warnf)" 135 | 136 | - linters: 137 | - govet 138 | text: "shadow: declaration of \"err\" shadows declaration at line" 139 | source: "(if err := .+; err != nil {|, err :=)" 140 | 141 | severity: 142 | default-severity: error 143 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang 2 | 3 | WORKDIR /go/src/github.com/ajvb/kala 4 | COPY . . 5 | RUN go build && mv kala /usr/bin 6 | 7 | CMD ["kala", "serve"] 8 | EXPOSE 8000 9 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2015 AJ Bahnken 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | 23 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | APP := kala 2 | VERSION := $(shell git describe --tags --always --dirty) 3 | GOPATH := $(CURDIR)/Godeps/_workspace:$(GOPATH) 4 | PATH := $(GOPATH)/bin:$(PATH) 5 | 6 | bin/$(APP): bin 7 | go build -v -o $@ -ldflags "-X main.Version='${VERSION}'" 8 | 9 | bin: clean 10 | mkdir -p bin 11 | 12 | clean: 13 | rm -rf bin 14 | 15 | start: bin/$(APP) 16 | ./bin/$(APP) serve -v 17 | 18 | start-consul: bin/$(APP) 19 | ./bin/$(APP) serve --jobdb=consul -v 20 | 21 | test: 22 | go test -v ./... 23 | 24 | gen: tools 25 | go-bindata-assetfs -pkg api -o api/webui_bindata.go webui/... 26 | 27 | tools: 28 | go install github.com/go-bindata/go-bindata/... 29 | go install github.com/elazarl/go-bindata-assetfs/... 30 | 31 | 32 | .PHONY: bin/$(APP) bin clean start test gen tools 33 | -------------------------------------------------------------------------------- /api/api.go: -------------------------------------------------------------------------------- 1 | package api 2 | 3 | import ( 4 | "encoding/json" 5 | "errors" 6 | "io" 7 | "net/http" 8 | "net/http/pprof" 9 | "runtime" 10 | 11 | "github.com/ajvb/kala/api/middleware" 12 | "github.com/ajvb/kala/job" 13 | 14 | "github.com/gorilla/mux" 15 | "github.com/phyber/negroni-gzip/gzip" 16 | log "github.com/sirupsen/logrus" 17 | "github.com/urfave/negroni" 18 | ) 19 | 20 | const ( 21 | // Base API v1 Path 22 | ApiUrlPrefix = "/api/v1/" 23 | 24 | JobPath = "job/" 25 | ApiJobPath = ApiUrlPrefix + JobPath 26 | 27 | contentType = "Content-Type" 28 | jsonContentType = "application/json;charset=UTF-8" 29 | 30 | MAX_BODY_SIZE = 1048576 31 | READ_HEADER_TIMEOUT = 0 32 | ) 33 | 34 | type KalaStatsResponse struct { 35 | Stats *job.KalaStats 36 | } 37 | 38 | // HandleKalaStatsRequest is the handler for getting system-level metrics 39 | // /api/v1/stats 40 | func HandleKalaStatsRequest(cache job.JobCache) func(w http.ResponseWriter, r *http.Request) { 41 | return func(w http.ResponseWriter, r *http.Request) { 42 | resp := &KalaStatsResponse{ 43 | Stats: job.NewKalaStats(cache), 44 | } 45 | 46 | w.Header().Set(contentType, jsonContentType) 47 | w.WriteHeader(http.StatusOK) 48 | if err := json.NewEncoder(w).Encode(resp); err != nil { 49 | log.Errorf("Error occurred when marshaling response: %s", err) 50 | return 51 | } 52 | } 53 | } 54 | 55 | type ListJobStatsResponse struct { 56 | JobStats []*job.JobStat `json:"job_stats"` 57 | } 58 | 59 | // HandleListJobStatsRequest is the handler for getting job-specific stats 60 | // /api/v1/job/stats/{id} 61 | func HandleListJobStatsRequest(cache job.JobCache) func(w http.ResponseWriter, r *http.Request) { 62 | return func(w http.ResponseWriter, r *http.Request) { 63 | id := mux.Vars(r)["id"] 64 | j, err := cache.Get(id) 65 | if err != nil { 66 | w.WriteHeader(http.StatusNotFound) 67 | return 68 | } 69 | 70 | resp := &ListJobStatsResponse{ 71 | JobStats: j.Stats, 72 | } 73 | 74 | w.Header().Set(contentType, jsonContentType) 75 | w.WriteHeader(http.StatusOK) 76 | if err := json.NewEncoder(w).Encode(resp); err != nil { 77 | log.Errorf("Error occurred when marshaling response: %s", err) 78 | return 79 | } 80 | } 81 | } 82 | 83 | type ListJobsResponse struct { 84 | Jobs map[string]*job.Job `json:"jobs"` 85 | } 86 | 87 | // HandleListJobs responds with an array of all Jobs within the server, 88 | // active or disabled. 89 | func HandleListJobsRequest(cache job.JobCache) func(w http.ResponseWriter, r *http.Request) { 90 | return func(w http.ResponseWriter, r *http.Request) { 91 | allJobs := cache.GetAll() 92 | allJobs.Lock.RLock() 93 | defer allJobs.Lock.RUnlock() 94 | 95 | resp := &ListJobsResponse{ 96 | Jobs: allJobs.Jobs, 97 | } 98 | 99 | w.Header().Set(contentType, jsonContentType) 100 | w.WriteHeader(http.StatusOK) 101 | if err := json.NewEncoder(w).Encode(resp); err != nil { 102 | log.Errorf("Error occurred when marshaling response: %s", err) 103 | return 104 | } 105 | } 106 | } 107 | 108 | type AddJobResponse struct { 109 | Id string `json:"id"` 110 | } 111 | 112 | func unmarshalNewJob(r *http.Request) (*job.Job, error) { 113 | newJob := &job.Job{} 114 | 115 | body, err := io.ReadAll(io.LimitReader(r.Body, MAX_BODY_SIZE)) 116 | if err != nil { 117 | log.Errorf("Error occurred when reading r.Body: %s", err) 118 | return nil, err 119 | } 120 | defer r.Body.Close() 121 | 122 | if err := json.Unmarshal(body, newJob); err != nil { 123 | log.Errorf("Error occurred when unmarshaling data: %s", err) 124 | return nil, err 125 | } 126 | 127 | return newJob, nil 128 | } 129 | 130 | // HandleAddJob takes a job object and unmarshals it to a Job type, 131 | // and then throws the job in the schedulers. 132 | func HandleAddJob(cache job.JobCache, defaultOwner string) func(http.ResponseWriter, *http.Request) { 133 | return func(w http.ResponseWriter, r *http.Request) { 134 | newJob, err := unmarshalNewJob(r) 135 | if err != nil { 136 | errorEncodeJSON(err, http.StatusBadRequest, w) 137 | return 138 | } 139 | 140 | if defaultOwner != "" && newJob.Owner == "" { 141 | newJob.Owner = defaultOwner 142 | } 143 | 144 | err = newJob.Init(cache) 145 | if err != nil { 146 | errStr := "Error occurred when initializing the job" 147 | log.Errorf(errStr+": %s", err) 148 | errorEncodeJSON(errors.New(errStr), http.StatusBadRequest, w) 149 | return 150 | } 151 | 152 | resp := &AddJobResponse{ 153 | Id: newJob.Id, 154 | } 155 | 156 | w.Header().Set(contentType, jsonContentType) 157 | w.WriteHeader(http.StatusCreated) 158 | if err := json.NewEncoder(w).Encode(resp); err != nil { 159 | log.Errorf("Error occurred when marshaling response: %s", err) 160 | return 161 | } 162 | } 163 | } 164 | 165 | // HandleJobRequest routes requests to /api/v1/job/{id} to either 166 | // handleDeleteJob if its a DELETE or handleGetJob if its a GET request. 167 | func HandleJobRequest(cache job.JobCache) func(w http.ResponseWriter, r *http.Request) { 168 | return func(w http.ResponseWriter, r *http.Request) { 169 | id := mux.Vars(r)["id"] 170 | 171 | j, err := cache.Get(id) 172 | if err != nil { 173 | log.Errorf("Error occurred when trying to get the job you requested.") 174 | w.WriteHeader(http.StatusNotFound) 175 | return 176 | } 177 | if j == nil { 178 | w.WriteHeader(http.StatusNotFound) 179 | return 180 | } 181 | 182 | if r.Method == "DELETE" { 183 | err = j.Delete(cache) 184 | if err != nil { 185 | errorEncodeJSON(err, http.StatusInternalServerError, w) 186 | } else { 187 | w.WriteHeader(http.StatusNoContent) 188 | } 189 | } else if r.Method == "GET" { 190 | handleGetJob(w, r, j) 191 | } 192 | } 193 | } 194 | 195 | // HandleDeleteAllJobs is the handler for deleting all jobs 196 | // DELETE /api/v1/job/all 197 | func HandleDeleteAllJobs(cache job.JobCache) func(w http.ResponseWriter, r *http.Request) { 198 | return func(w http.ResponseWriter, r *http.Request) { 199 | if err := job.DeleteAll(cache); err != nil { 200 | errorEncodeJSON(err, http.StatusInternalServerError, w) 201 | } else { 202 | w.WriteHeader(http.StatusNoContent) 203 | } 204 | } 205 | } 206 | 207 | type JobResponse struct { 208 | Job *job.Job `json:"job"` 209 | } 210 | 211 | func handleGetJob(w http.ResponseWriter, _ *http.Request, j *job.Job) { 212 | resp := &JobResponse{ 213 | Job: j, 214 | } 215 | 216 | w.Header().Set(contentType, jsonContentType) 217 | w.WriteHeader(http.StatusOK) 218 | if err := json.NewEncoder(w).Encode(resp); err != nil { 219 | log.Errorf("Error occurred when marshaling response: %s", err) 220 | return 221 | } 222 | } 223 | 224 | // HandleStartJobRequest is the handler for manually starting jobs 225 | // /api/v1/job/start/{id} 226 | func HandleStartJobRequest(cache job.JobCache) func(w http.ResponseWriter, r *http.Request) { 227 | return func(w http.ResponseWriter, r *http.Request) { 228 | id := mux.Vars(r)["id"] 229 | j, err := cache.Get(id) 230 | if err != nil { 231 | log.Errorf("Error occurred when trying to get the job you requested.") 232 | w.WriteHeader(http.StatusNotFound) 233 | return 234 | } 235 | if j == nil { 236 | w.WriteHeader(http.StatusNotFound) 237 | return 238 | } 239 | 240 | j.StopTimer() 241 | j.Run(cache) 242 | 243 | w.WriteHeader(http.StatusNoContent) 244 | } 245 | } 246 | 247 | // HandleDisableJobRequest is the handler for mdisabling jobs 248 | // /api/v1/job/disable/{id} 249 | func HandleDisableJobRequest(cache job.JobCache) func(w http.ResponseWriter, r *http.Request) { 250 | return func(w http.ResponseWriter, r *http.Request) { 251 | id := mux.Vars(r)["id"] 252 | j, err := cache.Get(id) 253 | if err != nil { 254 | log.Errorf("Error occurred when trying to get the job you requested.") 255 | w.WriteHeader(http.StatusNotFound) 256 | return 257 | } 258 | if j == nil { 259 | w.WriteHeader(http.StatusNotFound) 260 | return 261 | } 262 | 263 | if err := j.Disable(cache); err != nil { 264 | errorEncodeJSON(err, http.StatusInternalServerError, w) 265 | return 266 | } 267 | 268 | w.WriteHeader(http.StatusNoContent) 269 | } 270 | } 271 | 272 | // HandleEnableJobRequest is the handler for enable jobs 273 | // /api/v1/job/enable/{id} 274 | func HandleEnableJobRequest(cache job.JobCache) func(w http.ResponseWriter, r *http.Request) { 275 | return func(w http.ResponseWriter, r *http.Request) { 276 | id := mux.Vars(r)["id"] 277 | j, err := cache.Get(id) 278 | if err != nil { 279 | log.Errorf("Error occurred when trying to get the job you requested.") 280 | w.WriteHeader(http.StatusNotFound) 281 | return 282 | } 283 | if j == nil { 284 | w.WriteHeader(http.StatusNotFound) 285 | return 286 | } 287 | 288 | if err := j.Enable(cache); err != nil { 289 | errorEncodeJSON(err, http.StatusInternalServerError, w) 290 | return 291 | } 292 | 293 | w.WriteHeader(http.StatusNoContent) 294 | } 295 | } 296 | 297 | type apiError struct { 298 | Error string `json:"error"` 299 | } 300 | 301 | func errorEncodeJSON(errToEncode error, status int, w http.ResponseWriter) { 302 | js, err := json.Marshal(apiError{Error: errToEncode.Error()}) 303 | if err != nil { 304 | log.Errorf("could not encode error message: %v", err) 305 | return 306 | } 307 | w.Header().Set(contentType, jsonContentType) 308 | http.Error(w, string(js), status) 309 | } 310 | 311 | // SetupApiRoutes is used within main to initialize all of the routes 312 | func SetupApiRoutes(r *mux.Router, cache job.JobCache, defaultOwner string) { 313 | // Route for creating a job 314 | r.HandleFunc(ApiJobPath, HandleAddJob(cache, defaultOwner)).Methods("POST") 315 | // Route for deleting all jobs 316 | r.HandleFunc(ApiJobPath+"all/", HandleDeleteAllJobs(cache)).Methods("DELETE") 317 | // Route for deleting and getting a job 318 | r.HandleFunc(ApiJobPath+"{id}/", HandleJobRequest(cache)).Methods("DELETE", "GET") 319 | // Route for getting job stats 320 | r.HandleFunc(ApiJobPath+"stats/{id}/", HandleListJobStatsRequest(cache)).Methods("GET") 321 | // Route for listing all jops 322 | r.HandleFunc(ApiJobPath, HandleListJobsRequest(cache)).Methods("GET") 323 | // Route for manually start a job 324 | r.HandleFunc(ApiJobPath+"start/{id}/", HandleStartJobRequest(cache)).Methods("POST") 325 | // Route for manually start a job 326 | r.HandleFunc(ApiJobPath+"enable/{id}/", HandleEnableJobRequest(cache)).Methods("POST") 327 | // Route for manually disable a job 328 | r.HandleFunc(ApiJobPath+"disable/{id}/", HandleDisableJobRequest(cache)).Methods("POST") 329 | // Route for getting app-level metrics 330 | r.HandleFunc(ApiUrlPrefix+"stats/", HandleKalaStatsRequest(cache)).Methods("GET") 331 | } 332 | 333 | func MakeServer(listenAddr string, cache job.JobCache, defaultOwner string, profile bool) *http.Server { 334 | r := mux.NewRouter() 335 | // Allows for the use for /job as well as /job/ 336 | r.StrictSlash(true) 337 | SetupApiRoutes(r, cache, defaultOwner) 338 | r.PathPrefix("/webui/").Handler(http.StripPrefix("/webui/", http.FileServer(assetFS()))) 339 | 340 | if profile { 341 | runtime.SetMutexProfileFraction(5) //nolint:gomnd 342 | // Register pprof handlers 343 | r.HandleFunc("/debug/pprof/", pprof.Index) 344 | r.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) 345 | r.HandleFunc("/debug/pprof/profile", pprof.Profile) 346 | r.HandleFunc("/debug/pprof/symbol", pprof.Symbol) 347 | r.HandleFunc("/debug/pprof/trace", pprof.Trace) 348 | r.Handle("/debug/pprof/goroutine", pprof.Handler("goroutine")) 349 | r.Handle("/debug/pprof/heap", pprof.Handler("heap")) 350 | r.Handle("/debug/pprof/threadcreate", pprof.Handler("threadcreate")) 351 | r.Handle("/debug/pprof/block", pprof.Handler("block")) 352 | r.Handle("/debug/pprof/mutex", pprof.Handler("mutex")) 353 | } 354 | 355 | n := negroni.New(negroni.NewRecovery(), &middleware.Logger{Logger: log.Logger{}}, gzip.Gzip(gzip.DefaultCompression)) 356 | n.UseHandler(r) 357 | 358 | return &http.Server{ 359 | ReadHeaderTimeout: READ_HEADER_TIMEOUT, 360 | Addr: listenAddr, 361 | Handler: n, 362 | } 363 | } 364 | -------------------------------------------------------------------------------- /api/middleware/logger.go: -------------------------------------------------------------------------------- 1 | package middleware 2 | 3 | import ( 4 | "net/http" 5 | "time" 6 | 7 | log "github.com/sirupsen/logrus" 8 | "github.com/urfave/negroni" 9 | ) 10 | 11 | // Logger is a middleware handler that logs the request as it goes in and the response as it goes out. 12 | type Logger struct { 13 | // Logger inherits from log.Logger used to log messages with the Logger middleware 14 | log.Logger 15 | } 16 | 17 | func (l *Logger) ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { 18 | start := time.Now() 19 | l.Infof("Started %s %s", r.Method, r.URL.Path) 20 | 21 | next(rw, r) 22 | 23 | res := rw.(negroni.ResponseWriter) 24 | l.Infof("Completed %v %s in %v", res.Status(), http.StatusText(res.Status()), time.Since(start)) 25 | } 26 | -------------------------------------------------------------------------------- /assets/job-list.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ajvb/kala/4eb4f7ae04346e0f6ba92d0c63a619ca9c59559c/assets/job-list.png -------------------------------------------------------------------------------- /client/README.md: -------------------------------------------------------------------------------- 1 | # Kala Go Client Library 2 | 3 | -------------------------------------------------------------------------------- /client/client.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "errors" 7 | "fmt" 8 | "io" 9 | "net/http" 10 | "strings" 11 | 12 | "github.com/ajvb/kala/api" 13 | "github.com/ajvb/kala/job" 14 | ) 15 | 16 | const ( 17 | methodGet = "GET" 18 | methodPost = "POST" 19 | methodDelete = "DELETE" 20 | ) 21 | 22 | var ( 23 | ErrJobNotFound = errors.New("Job not found") 24 | ErrJobCreationError = errors.New("Error creating job") 25 | 26 | ErrGenericError = errors.New("An error occurred performing your request") 27 | 28 | jobPath = api.JobPath[:len(api.JobPath)-1] 29 | ) 30 | 31 | // KalaClient is the base struct for this package. 32 | type KalaClient struct { 33 | apiEndpoint string 34 | } 35 | 36 | // New is used to create a new KalaClient based off of the apiEndpoint 37 | // Example: 38 | // c := New("http://127.0.0.1:8000") 39 | func New(apiEndpoint string) *KalaClient { 40 | apiEndpoint = strings.TrimSuffix(apiEndpoint, "/") 41 | apiUrlPrefix := api.ApiUrlPrefix[:len(api.ApiUrlPrefix)-1] 42 | 43 | return &KalaClient{ 44 | apiEndpoint: apiEndpoint + apiUrlPrefix, 45 | } 46 | } 47 | 48 | func (kc *KalaClient) encode(value interface{}) (io.Reader, error) { 49 | if value == nil { 50 | return nil, nil 51 | } 52 | buf := new(bytes.Buffer) 53 | if err := json.NewEncoder(buf).Encode(value); err != nil { 54 | return nil, err 55 | } 56 | return buf, nil 57 | } 58 | 59 | func (kc *KalaClient) decode(body io.Reader, target interface{}) error { 60 | if target == nil { 61 | return nil 62 | } 63 | return json.NewDecoder(body).Decode(target) 64 | } 65 | 66 | func (kc *KalaClient) url(parts ...string) string { 67 | return strings.Join(append([]string{kc.apiEndpoint}, parts...), "/") + "/" 68 | } 69 | 70 | func (kc *KalaClient) do(method, url string, expectedStatus int, payload, target interface{}) ( 71 | statusCode int, 72 | err error, 73 | ) { 74 | body, err := kc.encode(payload) 75 | if err != nil { 76 | return 77 | } 78 | req, err := http.NewRequest(method, url, body) 79 | if err != nil { 80 | return 81 | } 82 | resp, err := http.DefaultClient.Do(req) 83 | if err != nil { 84 | return 85 | } 86 | defer resp.Body.Close() 87 | if status := resp.StatusCode; status != expectedStatus { 88 | return status, ErrGenericError 89 | } 90 | return resp.StatusCode, kc.decode(resp.Body, target) 91 | } 92 | 93 | // CreateJob is used for creating a new job within Kala. Note that the 94 | // Name and Command fields are the only ones that are required. 95 | // Example: 96 | // c := New("http://127.0.0.1:8000") 97 | // body := &job.Job{ 98 | // Schedule: "R2/2015-06-04T19:25:16.828696-07:00/PT10S", 99 | // Name: "test_job", 100 | // Command: "bash -c 'date'", 101 | // } 102 | // id, err := c.CreateJob(body) 103 | func (kc *KalaClient) CreateJob(body *job.Job) (string, error) { 104 | id := &api.AddJobResponse{} 105 | _, err := kc.do(methodPost, kc.url(jobPath), http.StatusCreated, body, id) 106 | if err != nil { 107 | if err == ErrGenericError { 108 | return "", ErrJobCreationError 109 | } 110 | return "", err 111 | } 112 | return id.Id, err 113 | } 114 | 115 | // GetJob is used to retrieve a Job from Kala by its ID. 116 | // Example: 117 | // c := New("http://127.0.0.1:8000") 118 | // id := "93b65499-b211-49ce-57e0-19e735cc5abd" 119 | // job, err := c.GetJob(id) 120 | func (kc *KalaClient) GetJob(id string) (*job.Job, error) { 121 | j := &api.JobResponse{} 122 | _, err := kc.do(methodGet, kc.url(jobPath, id), http.StatusOK, nil, j) 123 | if err != nil { 124 | if err == ErrGenericError { 125 | return nil, ErrJobNotFound 126 | } 127 | return nil, err 128 | } 129 | return j.Job, nil 130 | } 131 | 132 | // GetAllJobs returns a map of string (ID's) to job.Job's which contains 133 | // all Jobs currently within Kala. 134 | // Example: 135 | // c := New("http://127.0.0.1:8000") 136 | // jobs, err := c.GetAllJobs() 137 | func (kc *KalaClient) GetAllJobs() (map[string]*job.Job, error) { 138 | jobs := &api.ListJobsResponse{} 139 | _, err := kc.do(methodGet, kc.url(jobPath), http.StatusOK, nil, jobs) 140 | return jobs.Jobs, err 141 | } 142 | 143 | // DeleteJob is used to delete a Job from Kala by its ID. 144 | // Example: 145 | // c := New("http://127.0.0.1:8000") 146 | // id := "93b65499-b211-49ce-57e0-19e735cc5abd" 147 | // ok, err := c.DeleteJob(id) 148 | func (kc *KalaClient) DeleteJob(id string) (bool, error) { 149 | status, err := kc.do(methodDelete, kc.url(jobPath, id), http.StatusNoContent, nil, nil) 150 | if err != nil { 151 | if err == ErrGenericError { 152 | return false, fmt.Errorf("Delete failed with a status code of %d", status) 153 | } 154 | return false, err 155 | } 156 | return true, nil 157 | } 158 | 159 | // DeleteAllJobs is used to delete all jobs from Kala 160 | // Example: 161 | // c := New("http://127.0.0.1:8000") 162 | // ok, err := c.DeleteAllJobs() 163 | func (kc *KalaClient) DeleteAllJobs() (bool, error) { 164 | return kc.DeleteJob("all") 165 | } 166 | 167 | // GetJobStats is used to retrieve stats about a Job from Kala by its ID. 168 | // Example: 169 | // c := New("http://127.0.0.1:8000") 170 | // id := "93b65499-b211-49ce-57e0-19e735cc5abd" 171 | // stats, err := c.GetJobStats(id) 172 | func (kc *KalaClient) GetJobStats(id string) ([]*job.JobStat, error) { 173 | js := &api.ListJobStatsResponse{} 174 | _, err := kc.do(methodGet, kc.url(jobPath, "stats", id), http.StatusOK, nil, js) 175 | return js.JobStats, err 176 | } 177 | 178 | // StartJob is used to manually start a Job by its ID. 179 | // Example: 180 | // c := New("http://127.0.0.1:8000") 181 | // id := "93b65499-b211-49ce-57e0-19e735cc5abd" 182 | // ok, err := c.StartJob(id) 183 | func (kc *KalaClient) StartJob(id string) (bool, error) { 184 | _, err := kc.do(methodPost, kc.url(jobPath, "start", id), http.StatusNoContent, nil, nil) 185 | if err != nil { 186 | if err == ErrGenericError { 187 | return false, nil 188 | } 189 | return false, err 190 | } 191 | return true, nil 192 | } 193 | 194 | // GetKalaStats retrieves system-level metrics about Kala 195 | // Example: 196 | // c := New("http://127.0.0.1:8000") 197 | // stats, err := c.GetKalaStats() 198 | func (kc *KalaClient) GetKalaStats() (*job.KalaStats, error) { 199 | ks := &api.KalaStatsResponse{} 200 | _, err := kc.do(methodGet, kc.url("stats"), http.StatusOK, nil, ks) 201 | return ks.Stats, err 202 | } 203 | 204 | // DisableJob is used to disable a Job in Kala using its ID. 205 | // Example: 206 | // c := New("http://127.0.0.1:8000") 207 | // id := "93b65499-b211-49ce-57e0-19e735cc5abd" 208 | // ok, err := c.DisableJob(id) 209 | func (kc *KalaClient) DisableJob(id string) (bool, error) { 210 | status, err := kc.do(methodPost, kc.url(jobPath, "disable", id), http.StatusNoContent, nil, nil) 211 | if err != nil { 212 | if err == ErrGenericError { 213 | return false, fmt.Errorf("Disable failed with a status code of %d", status) 214 | } 215 | return false, err 216 | } 217 | return true, nil 218 | } 219 | 220 | // EnableJob is used to enable a disabled Job in Kala using its ID. 221 | // Example: 222 | // c := New("http://127.0.0.1:8000") 223 | // id := "93b65499-b211-49ce-57e0-19e735cc5abd" 224 | // ok, err := c.EnableJob(id) 225 | func (kc *KalaClient) EnableJob(id string) (bool, error) { 226 | status, err := kc.do(methodPost, kc.url(jobPath, "enable", id), http.StatusNoContent, nil, nil) 227 | if err != nil { 228 | if err == ErrGenericError { 229 | return false, fmt.Errorf("Enable failed with a status code of %d", status) 230 | } 231 | return false, err 232 | } 233 | return true, nil 234 | } 235 | -------------------------------------------------------------------------------- /client/client_test.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | "fmt" 5 | "net/http/httptest" 6 | "os" 7 | "time" 8 | 9 | "github.com/ajvb/kala/api" 10 | "github.com/ajvb/kala/job" 11 | 12 | "testing" 13 | 14 | "github.com/gorilla/mux" 15 | "github.com/stretchr/testify/assert" 16 | ) 17 | 18 | func NewTestServer() *httptest.Server { 19 | r := mux.NewRouter() 20 | db := &job.MockDB{} 21 | cache := job.NewLockFreeJobCache(db) 22 | api.SetupApiRoutes(r, cache, "") 23 | return httptest.NewServer(r) 24 | } 25 | 26 | func cleanUp() { 27 | ts := NewTestServer() 28 | kc := New(ts.URL) 29 | jobs, err := kc.GetAllJobs() 30 | if err != nil { 31 | fmt.Printf("Problem running clean up (can't get all jobs from the server)") 32 | os.Exit(1) 33 | } 34 | defer ts.Close() 35 | if len(jobs) == 0 { 36 | return 37 | } 38 | for _, job := range jobs { 39 | kc.DeleteJob(job.Id) 40 | } 41 | } 42 | 43 | func NewJobMap() *job.Job { 44 | scheduleTime := time.Now().Add(time.Minute * 5) 45 | repeat := 1 46 | delay := "P1DT10M10S" 47 | parsedTime := scheduleTime.Format(time.RFC3339) 48 | scheduleStr := fmt.Sprintf("R%d/%s/%s", repeat, parsedTime, delay) 49 | 50 | return &job.Job{ 51 | Schedule: scheduleStr, 52 | Name: "mock_job", 53 | Command: "bash -c 'date'", 54 | Owner: "example@example.com", 55 | } 56 | } 57 | 58 | func TestCreateGetDeleteJob(t *testing.T) { 59 | ts := NewTestServer() 60 | defer ts.Close() 61 | kc := New(ts.URL) 62 | j := NewJobMap() 63 | 64 | id, err := kc.CreateJob(j) 65 | assert.NoError(t, err) 66 | assert.NotEqual(t, id, "") 67 | 68 | respJob, err := kc.GetJob(id) 69 | assert.NoError(t, err) 70 | assert.Equal(t, j.Schedule, respJob.Schedule) 71 | assert.Equal(t, j.Name, respJob.Name) 72 | assert.Equal(t, j.Command, respJob.Command) 73 | assert.Equal(t, j.Owner, respJob.Owner) 74 | 75 | ok, err := kc.DeleteJob(id) 76 | assert.NoError(t, err) 77 | assert.True(t, ok) 78 | 79 | cleanUp() 80 | } 81 | 82 | func TestCreateJobError(t *testing.T) { 83 | ts := NewTestServer() 84 | defer ts.Close() 85 | kc := New(ts.URL) 86 | j := NewJobMap() 87 | 88 | j.Schedule = "bbbbbbbbbbbbbbb" 89 | 90 | id, err := kc.CreateJob(j) 91 | assert.Error(t, err) 92 | assert.Equal(t, id, "") 93 | 94 | cleanUp() 95 | } 96 | 97 | func TestGetJobError(t *testing.T) { 98 | ts := NewTestServer() 99 | defer ts.Close() 100 | kc := New(ts.URL) 101 | 102 | respJob, err := kc.GetJob("id-that-doesnt-exist") 103 | assert.Error(t, err) 104 | assert.Nil(t, respJob) 105 | 106 | cleanUp() 107 | } 108 | 109 | func TestDeleteJobError(t *testing.T) { 110 | ts := NewTestServer() 111 | defer ts.Close() 112 | kc := New(ts.URL) 113 | 114 | ok, err := kc.DeleteJob("id-that-doesnt-exist") 115 | assert.Error(t, err) 116 | assert.False(t, ok) 117 | 118 | cleanUp() 119 | } 120 | 121 | func TestGetAllJobs(t *testing.T) { 122 | ts := NewTestServer() 123 | defer ts.Close() 124 | kc := New(ts.URL) 125 | j := NewJobMap() 126 | 127 | id, err := kc.CreateJob(j) 128 | assert.NoError(t, err) 129 | 130 | jobs, err := kc.GetAllJobs() 131 | assert.NoError(t, err) 132 | assert.Equal(t, 1, len(jobs)) 133 | assert.Equal(t, j.Schedule, jobs[id].Schedule) 134 | assert.Equal(t, j.Name, jobs[id].Name) 135 | assert.Equal(t, j.Command, jobs[id].Command) 136 | assert.Equal(t, j.Owner, jobs[id].Owner) 137 | 138 | cleanUp() 139 | } 140 | 141 | func TestGetAllJobsNoJobsExist(t *testing.T) { 142 | ts := NewTestServer() 143 | defer ts.Close() 144 | kc := New(ts.URL) 145 | 146 | jobs, err := kc.GetAllJobs() 147 | fmt.Printf("JOBS: %#v", jobs) 148 | assert.NoError(t, err) 149 | assert.Equal(t, 0, len(jobs)) 150 | 151 | cleanUp() 152 | } 153 | 154 | func TestDeleteJob(t *testing.T) { 155 | ts := NewTestServer() 156 | defer ts.Close() 157 | kc := New(ts.URL) 158 | j := NewJobMap() 159 | 160 | id, err := kc.CreateJob(j) 161 | assert.NoError(t, err) 162 | 163 | ok, err := kc.DeleteJob(id) 164 | assert.NoError(t, err) 165 | assert.True(t, ok) 166 | 167 | respJob, err := kc.GetJob(id) 168 | assert.Nil(t, respJob) 169 | assert.Error(t, err) 170 | 171 | cleanUp() 172 | } 173 | 174 | func TestDeleteAllJobs(t *testing.T) { 175 | ts := NewTestServer() 176 | defer ts.Close() 177 | kc := New(ts.URL) 178 | j := NewJobMap() 179 | 180 | for i := 0; i < 10; i++ { 181 | _, err := kc.CreateJob(j) 182 | assert.NoError(t, err) 183 | } 184 | 185 | ok, err := kc.DeleteAllJobs() 186 | assert.NoError(t, err) 187 | assert.True(t, ok) 188 | 189 | allJobs, err := kc.GetAllJobs() 190 | assert.Empty(t, allJobs) 191 | assert.NoError(t, err) 192 | 193 | cleanUp() 194 | } 195 | 196 | func TestGetJobStats(t *testing.T) { 197 | ts := NewTestServer() 198 | defer ts.Close() 199 | kc := New(ts.URL) 200 | j := NewJobMap() 201 | 202 | // Create the job 203 | id, err := kc.CreateJob(j) 204 | assert.NoError(t, err) 205 | // Start the job 206 | ok, err := kc.StartJob(id) 207 | now := time.Now() 208 | assert.NoError(t, err) 209 | assert.True(t, ok) 210 | // Wait let the job run 211 | time.Sleep(time.Second * 1) 212 | 213 | stats, err := kc.GetJobStats(id) 214 | assert.NoError(t, err) 215 | assert.Equal(t, id, stats[0].JobId) 216 | assert.Equal(t, uint(0), stats[0].NumberOfRetries) 217 | assert.True(t, stats[0].Success) 218 | assert.True(t, stats[0].ExecutionDuration != time.Duration(0)) 219 | assert.WithinDuration(t, now, stats[0].RanAt, time.Second) 220 | 221 | cleanUp() 222 | } 223 | 224 | func TestGetJobStatsError(t *testing.T) { 225 | ts := NewTestServer() 226 | defer ts.Close() 227 | kc := New(ts.URL) 228 | 229 | stats, err := kc.GetJobStats("not-an-actual-id") 230 | assert.Error(t, err) 231 | assert.Nil(t, stats) 232 | } 233 | 234 | func TestStartJob(t *testing.T) { 235 | ts := NewTestServer() 236 | defer ts.Close() 237 | kc := New(ts.URL) 238 | j := NewJobMap() 239 | 240 | id, err := kc.CreateJob(j) 241 | assert.NoError(t, err) 242 | assert.NotEqual(t, id, "") 243 | 244 | now := time.Now() 245 | ok, err := kc.StartJob(id) 246 | assert.NoError(t, err) 247 | assert.True(t, ok) 248 | 249 | // Wait let the job run 250 | time.Sleep(time.Second * 1) 251 | 252 | respJob, err := kc.GetJob(id) 253 | assert.NoError(t, err) 254 | assert.Equal(t, uint(1), respJob.Metadata.SuccessCount) 255 | assert.WithinDuration(t, now, respJob.Metadata.LastSuccess, time.Second*2) 256 | assert.WithinDuration(t, now, respJob.Metadata.LastAttemptedRun, time.Second*2) 257 | 258 | cleanUp() 259 | } 260 | 261 | func TestStartJobError(t *testing.T) { 262 | ts := NewTestServer() 263 | defer ts.Close() 264 | kc := New(ts.URL) 265 | 266 | ok, err := kc.StartJob("not-an-actual-id") 267 | assert.NoError(t, err) 268 | assert.False(t, ok) 269 | } 270 | 271 | func TestGetKalaStats(t *testing.T) { 272 | ts := NewTestServer() 273 | defer ts.Close() 274 | kc := New(ts.URL) 275 | 276 | for i := 0; i < 5; i++ { 277 | // Generate new job 278 | j := NewJobMap() 279 | 280 | id, err := kc.CreateJob(j) 281 | assert.NoError(t, err) 282 | assert.NotEqual(t, id, "") 283 | 284 | ok, err := kc.StartJob(id) 285 | assert.NoError(t, err) 286 | assert.True(t, ok) 287 | } 288 | time.Sleep(time.Second * 3) 289 | 290 | stats, err := kc.GetKalaStats() 291 | assert.NoError(t, err) 292 | 293 | assert.Equal(t, 5, stats.ActiveJobs) 294 | assert.Equal(t, 0, stats.DisabledJobs) 295 | assert.Equal(t, 5, stats.Jobs) 296 | 297 | assert.Equal(t, uint(0), stats.ErrorCount) 298 | assert.Equal(t, uint(5), stats.SuccessCount) 299 | 300 | cleanUp() 301 | } 302 | 303 | func TestDisableEnableJob(t *testing.T) { 304 | 305 | ts := NewTestServer() 306 | defer ts.Close() 307 | kc := New(ts.URL) 308 | j := NewJobMap() 309 | 310 | id, err := kc.CreateJob(j) 311 | assert.NoError(t, err) 312 | assert.NotEqual(t, id, "") 313 | 314 | respJob, err := kc.GetJob(id) 315 | assert.Nil(t, err) 316 | assert.Equal(t, respJob.Disabled, false) 317 | 318 | ok, err := kc.DisableJob(id) 319 | assert.NoError(t, err) 320 | assert.True(t, ok) 321 | 322 | disJob, err := kc.GetJob(id) 323 | assert.Nil(t, err) 324 | assert.Equal(t, disJob.Disabled, true) 325 | 326 | ok, err = kc.EnableJob(id) 327 | assert.NoError(t, err) 328 | assert.True(t, ok) 329 | 330 | enJob, err := kc.GetJob(id) 331 | assert.Nil(t, err) 332 | assert.Equal(t, enJob.Disabled, false) 333 | 334 | cleanUp() 335 | } 336 | -------------------------------------------------------------------------------- /client/doc.go: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | KalaClient in the official Go client library for interfacing with the Kala HTTP API. 4 | 5 | It is a simple wrapper around the Sling library, and it is thread-safe. 6 | 7 | */ 8 | package client 9 | -------------------------------------------------------------------------------- /cmd/root.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "strings" 7 | 8 | homedir "github.com/mitchellh/go-homedir" 9 | "github.com/spf13/cobra" 10 | "github.com/spf13/viper" 11 | ) 12 | 13 | var cfgFile string 14 | 15 | var RootCmd = &cobra.Command{ 16 | Use: "kala", 17 | Short: "Modern job scheduler", 18 | Long: `See https://github.com/ajvb/kala for documentation.`, 19 | PersistentPreRunE: func(cmd *cobra.Command, args []string) error { 20 | return viper.BindPFlags(cmd.Flags()) 21 | }, 22 | } 23 | 24 | func init() { 25 | cobra.OnInitialize(initViper) 26 | RootCmd.PersistentFlags().StringVar(&cfgFile, "kala", "", "config file (default is $HOME/kala.yaml)") 27 | } 28 | 29 | func initViper() { 30 | if cfgFile != "" { 31 | viper.SetConfigFile(cfgFile) 32 | } else { 33 | home, err := homedir.Dir() 34 | if err != nil { 35 | fmt.Println(err) 36 | os.Exit(1) 37 | } 38 | 39 | viper.AddConfigPath(home) 40 | viper.SetConfigName("kala") 41 | } 42 | 43 | viper.SetEnvPrefix("kala") 44 | viper.AutomaticEnv() 45 | viper.SetEnvKeyReplacer(strings.NewReplacer("-", "_")) 46 | 47 | if err := viper.ReadInConfig(); err == nil { 48 | fmt.Println("Using config file:", viper.ConfigFileUsed()) 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /cmd/run.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | 7 | "github.com/ajvb/kala/job" 8 | "github.com/spf13/cobra" 9 | ) 10 | 11 | var runCommandCmd = &cobra.Command{ 12 | Use: "run", 13 | Short: "run a job", 14 | Long: `runs the specific job immediately and returns the result`, 15 | Run: func(cmd *cobra.Command, args []string) { 16 | if len(args) == 0 { 17 | log.Fatal("Must include a command") 18 | } else if len(args) > 1 { 19 | log.Fatal("Must only include a command") 20 | } 21 | 22 | j := &job.Job{ 23 | Command: args[0], 24 | } 25 | 26 | out, err := j.RunCmd() 27 | if err != nil { 28 | log.Fatalf("Command Failed with err: %s", err) 29 | } else { 30 | fmt.Println("Command Succeeded!") 31 | fmt.Println("Output: ", out) 32 | } 33 | }, 34 | } 35 | 36 | func init() { 37 | RootCmd.AddCommand(runCommandCmd) 38 | } 39 | -------------------------------------------------------------------------------- /cmd/serve.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "crypto/tls" 5 | "crypto/x509" 6 | "fmt" 7 | "os" 8 | "strings" 9 | "time" 10 | 11 | "github.com/ajvb/kala/api" 12 | "github.com/ajvb/kala/job" 13 | "github.com/ajvb/kala/job/storage/boltdb" 14 | "github.com/ajvb/kala/job/storage/consul" 15 | "github.com/ajvb/kala/job/storage/mongo" 16 | "github.com/ajvb/kala/job/storage/mysql" 17 | "github.com/ajvb/kala/job/storage/postgres" 18 | "github.com/ajvb/kala/job/storage/redis" 19 | 20 | redislib "github.com/garyburd/redigo/redis" 21 | log "github.com/sirupsen/logrus" 22 | "github.com/spf13/cobra" 23 | "github.com/spf13/viper" 24 | "gopkg.in/mgo.v2" 25 | ) 26 | 27 | var serveCmd = &cobra.Command{ 28 | Use: "serve", 29 | Short: "start kala server", 30 | Long: `start the kala server with the backing store of your choice, and run until interrupted`, 31 | Run: func(cmd *cobra.Command, args []string) { 32 | if viper.GetBool("verbose") { 33 | log.SetLevel(log.DebugLevel) 34 | } 35 | 36 | var parsedPort string 37 | port := viper.GetString("port") 38 | if port != "" { 39 | if strings.Contains(port, ":") { 40 | parsedPort = port 41 | } else { 42 | parsedPort = ":" + port 43 | } 44 | } else { 45 | parsedPort = ":8000" 46 | } 47 | 48 | var connectionString string 49 | if viper.GetString("interface") != "" { 50 | connectionString = viper.GetString("interface") + parsedPort 51 | } else { 52 | connectionString = parsedPort 53 | } 54 | 55 | var db job.JobDB 56 | 57 | switch viper.GetString("jobdb") { 58 | case "boltdb": 59 | db = boltdb.GetBoltDB(viper.GetString("bolt-path")) 60 | case "redis": 61 | if viper.GetString("jobdb-password") != "" { 62 | option := redislib.DialPassword(viper.GetString("jobdb-password")) 63 | db = redis.New(viper.GetString("jobdb-address"), option, true) 64 | } else { 65 | db = redis.New(viper.GetString("jobdb-address"), redislib.DialOption{}, false) 66 | } 67 | case "mongo": 68 | if viper.GetString("jobdb-username") != "" { 69 | cred := &mgo.Credential{ 70 | Username: viper.GetString("jobdb-username"), 71 | Password: viper.GetString("jobdb-password")} 72 | db = mongo.New(viper.GetString("jobdb-address"), cred) 73 | } else { 74 | db = mongo.New(viper.GetString("jobdb-address"), &mgo.Credential{}) 75 | } 76 | case "consul": 77 | db = consul.New(viper.GetString("jobdb-address")) 78 | case "postgres": 79 | dsn := fmt.Sprintf("postgres://%s:%s@%s", viper.GetString("jobdb-username"), viper.GetString("jobdb-password"), viper.GetString("jobdb-address")) 80 | db = postgres.New(dsn) 81 | case "mysql", "mariadb": 82 | dsn := fmt.Sprintf("%s:%s@%s", viper.GetString("jobdb-username"), viper.GetString("jobdb-password"), viper.GetString("jobdb-address")) 83 | log.Debug("Mysql/Maria DSN: ", dsn) 84 | if viper.IsSet("jobdb-tls-capath") { 85 | // https://godoc.org/github.com/go-sql-driver/mysql#RegisterTLSConfig 86 | rootCertPool := x509.NewCertPool() 87 | pem, err := os.ReadFile(viper.GetString("jobdb-tls-capath")) 88 | if err != nil { 89 | log.Fatal(err) 90 | } 91 | if ok := rootCertPool.AppendCertsFromPEM(pem); !ok { 92 | log.Fatal("Failed to append PEM.") 93 | } 94 | clientCert := make([]tls.Certificate, 0, 1) 95 | certs, err := tls.LoadX509KeyPair(viper.GetString("jobdb-tls-certpath"), viper.GetString("jobdb-tls-keypath")) 96 | if err != nil { 97 | log.Fatal(err) 98 | } 99 | clientCert = append(clientCert, certs) 100 | cfg := tls.Config{ 101 | MinVersion: tls.VersionTLS12, 102 | RootCAs: rootCertPool, 103 | Certificates: clientCert, 104 | } 105 | if viper.IsSet("jobdb-tls-servername") { 106 | sn := viper.GetString("jobdb-tls-servername") 107 | cfg.ServerName = sn 108 | // Solve gcp invalid hostname in CN: https://github.com/golang/go/issues/40748#issuecomment-673599371 109 | if strings.Contains(sn, ":") { 110 | cfg.InsecureSkipVerify = true 111 | cfg.VerifyConnection = func(cs tls.ConnectionState) error { 112 | commonName := cs.PeerCertificates[0].Subject.CommonName 113 | if commonName != cs.ServerName { 114 | return fmt.Errorf("invalid certificate name %q, expected %q", commonName, cs.ServerName) 115 | } 116 | opts := x509.VerifyOptions{ 117 | Roots: rootCertPool, 118 | Intermediates: x509.NewCertPool(), 119 | } 120 | for _, cert := range cs.PeerCertificates[1:] { 121 | opts.Intermediates.AddCert(cert) 122 | } 123 | _, err := cs.PeerCertificates[0].Verify(opts) 124 | return err 125 | } 126 | } 127 | } 128 | db = mysql.New(dsn, &cfg) 129 | } else { 130 | db = mysql.New(dsn, nil) 131 | } 132 | default: 133 | log.Fatalf("Unknown Job DB implementation '%s'", viper.GetString("jobdb")) 134 | } 135 | 136 | if viper.GetBool("no-persist") { 137 | log.Warn("No-persist mode engaged; using in-memory database!") 138 | db = &job.MockDB{} 139 | } 140 | 141 | // Create cache 142 | log.Infof("Preparing cache") 143 | cache := job.NewLockFreeJobCache(db) 144 | 145 | // Persistence mode 146 | persistEvery := viper.GetInt("persist-every") 147 | if viper.GetBool("no-tx-persist") { 148 | log.Warnf("Transactional persistence is not enabled; job cache will persist to db every %d seconds", persistEvery) 149 | } else { 150 | log.Infof("Enabling transactional persistence, plus persist all jobs to db every %d seconds", persistEvery) 151 | cache.PersistOnWrite = true 152 | } 153 | 154 | if persistEvery < 1 { 155 | log.Fatal("With transactional persistence off, you will need to set persist-every to greater than zero.") 156 | } 157 | 158 | // Startup cache 159 | cache.Start(time.Duration(persistEvery)*time.Second, time.Duration(viper.GetInt("jobstat-ttl"))*time.Minute) 160 | 161 | // Launch API server 162 | log.Infof("Starting server on port %s", connectionString) 163 | srv := api.MakeServer(connectionString, cache, viper.GetString("default-owner"), viper.GetBool("profile")) 164 | log.Fatal(srv.ListenAndServe()) 165 | }, 166 | } 167 | 168 | func init() { 169 | RootCmd.AddCommand(serveCmd) 170 | serveCmd.Flags().StringP("port", "p", ":8000", "Port for Kala to run on.") 171 | serveCmd.Flags().BoolP("no-persist", "n", false, "No Persistence Mode - In this mode no data will be saved to the database. Perfect for testing.") 172 | serveCmd.Flags().StringP("interface", "i", "", "Interface to listen on, default is all.") 173 | serveCmd.Flags().StringP("default-owner", "o", "", "Default owner. The inputted email will be attached to any job missing an owner") 174 | serveCmd.Flags().String("jobdb", "boltdb", "Implementation of job database, either 'boltdb', 'redis', 'mongo', 'consul', 'postgres', 'mariadb', or 'mysql'.") 175 | serveCmd.Flags().String("bolt-path", "", "Path to the bolt database file, default is current directory.") 176 | serveCmd.Flags().String("jobdb-address", "", "Network address for the job database, in 'host:port' format.") 177 | serveCmd.Flags().String("jobdb-username", "", "Username for the job database.") 178 | serveCmd.Flags().String("jobdb-password", "", "Password for the job database.") 179 | serveCmd.Flags().String("jobdb-tls-capath", "", "Path to tls server CA file for the job database.") 180 | serveCmd.Flags().String("jobdb-tls-certpath", "", "Path to tls client cert file for the job database.") 181 | serveCmd.Flags().String("jobdb-tls-keypath", "", "Path to tls client key file for the job database.") 182 | serveCmd.Flags().String("jobdb-tls-servername", "", "Server name to verify cert for the job database.") 183 | serveCmd.Flags().BoolP("verbose", "v", false, "Set for verbose logging.") 184 | serveCmd.Flags().IntP("persist-every", "e", 60*60, "Interval in seconds between persisting all jobs to db") //nolint:gomnd 185 | serveCmd.Flags().Int("jobstat-ttl", -1, "Sets the jobstat-ttl in minutes. The default -1 value indicates JobStat entries will be kept forever") 186 | serveCmd.Flags().Bool("profile", false, "Activate pprof handlers") 187 | serveCmd.Flags().Bool("no-tx-persist", false, "Only persist to db periodically, not transactionally.") 188 | } 189 | -------------------------------------------------------------------------------- /deployment/README.md: -------------------------------------------------------------------------------- 1 | #Deployment 2 | 3 | Inside here are example ways in which to "deploy Kala" 4 | -------------------------------------------------------------------------------- /deployment/automate-testing/delete.sql: -------------------------------------------------------------------------------- 1 | use kala; 2 | delete from jobs; -------------------------------------------------------------------------------- /deployment/automate-testing/example-command.sh: -------------------------------------------------------------------------------- 1 | echo $(date) >> ./deployment/automate-testing/date.txt -------------------------------------------------------------------------------- /deployment/automate-testing/test.sh: -------------------------------------------------------------------------------- 1 | mysql -ukala -pkala < delete.sql 2 | rm date.txt 3 | cd ../.. 4 | go run main.go serve --jobdb=mariadb --jobdb-address=\(localhost:3306\)/kala --jobdb-username=kala --jobdb-password=kala & 5 | cd deployment/automate-testing 6 | #GO_PID=$! 7 | ps -ef | grep kala | awk -F ' ' '{print $2}' > pid.txt 8 | sleep 5 9 | now=$(date --date="+15 seconds" +%Y-%m-%dT%H:%M:%S+02:00) 10 | id=$(curl http://127.0.0.1:8000/api/v1/job/ -d '{"epsilon": "PT5S", "command": "bash ./deployment/automate-testing/example-command.sh", "name": "test_job", "schedule": "R/'$now'/PT5S"}' | jq -r .id) 11 | sleep 17 12 | initial=$(cat date.txt | wc -l) 13 | curl -X DELETE http://127.0.0.1:8000/api/v1/job/$id/ 14 | sleep 13 15 | final=$(cat date.txt | wc -l) 16 | if [ $initial == $final ]; then 17 | echo "Strings are equal" 18 | else 19 | echo "Strings are not equal" 20 | fi 21 | #kill $GO_PID 22 | kill -9 `cat pid.txt` 23 | rm pid.txt 24 | -------------------------------------------------------------------------------- /deployment/systemd/README.md: -------------------------------------------------------------------------------- 1 | # Systemd Deployment 2 | 3 | ### Steps for Use: 4 | 5 | 1. Make sure to change the path to your `kala` binary in ExecStart (within the `kala.service` file) to where it is. 6 | 1. Make sure to change the port in `kala.service`'s ExecStart to what you want it to be. (Defaults to 8000) 7 | 1. Move `kala.service` to `/etc/systemd/system/` 8 | 1. Run `systemctl enable /etc/systemd/system/kala.service` 9 | 1. Run `systemctl start kala` and then `systemctl status kala` to verify that it is working properly. 10 | 11 | 12 | ### Resources 13 | 14 | * [Getting Started with systemd](https://coreos.com/docs/launching-containers/launching/getting-started-with-systemd/) 15 | 16 | -------------------------------------------------------------------------------- /deployment/systemd/kala.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Kala 3 | 4 | [Service] 5 | ExecStart=/home/yourname/bin/kala serve -v -p 8000 6 | 7 | [Install] 8 | WantedBy=multi-user.target 9 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | services: 4 | consul: 5 | image: progrium/consul 6 | ports: 7 | - "8500:8500" 8 | command: "-server -bootstrap" 9 | redis: 10 | image: redis:4 11 | ports: 12 | - "6379:6379" 13 | mongo: 14 | image: mongo:3.4 15 | ports: 16 | - "27017:27017" 17 | postgres: 18 | image: postgres:10 19 | ports: 20 | - "5432:5432" 21 | mysql: 22 | image: mysql:5.7 23 | ports: 24 | - "3306:3306" 25 | environment: 26 | MYSQL_ALLOW_EMPTY_PASSWORD: 1 27 | MYSQL_DATABASE: test 28 | # TZ: Europe/Madrid 29 | 30 | -------------------------------------------------------------------------------- /examples/README.md: -------------------------------------------------------------------------------- 1 | # Examples 2 | 3 | Firstly, run kala on default port: 4 | ```bash 5 | kala serve 6 | ``` 7 | 8 | ### Running python script 9 | 10 | ```bash 11 | pip install -r requirements.txt 12 | python python/basic_test.py 13 | ``` 14 | 15 | ### Running ruby script 16 | 17 | ```bash 18 | ruby ruby/basic_test.rb 19 | ``` 20 | -------------------------------------------------------------------------------- /examples/bash/example-shell-command.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | http http://127.0.0.1:8000/api/v1/job/ name=test_job command="touch lol" schedule=R2/2015-03-18T21:48:30-07:00/PT5S 3 | -------------------------------------------------------------------------------- /examples/example-kala-commands/example-command.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | echo $(date) >> $(pwd)/my_data_collection.txt 3 | -------------------------------------------------------------------------------- /examples/python/basic_test.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import simplejson 3 | import os 4 | from time import sleep 5 | from datetime import datetime, timedelta 6 | from dateutil.tz import tzlocal 7 | 8 | API_URL = "http://127.0.0.1:8000/api/v1/job/" 9 | 10 | data = { 11 | "name": "test_job", 12 | "command": "bash " + os.path.dirname(os.path.realpath(__file__)) + "/../example-kala-commands/example-command.sh", 13 | "epsilon": "PT5S", 14 | } 15 | 16 | dt = datetime.isoformat(datetime.now(tzlocal()) + timedelta(0, 10)) 17 | data["schedule"] = "%s/%s/%s" % ("R2", dt, "PT10S") 18 | 19 | if __name__ == "__main__": 20 | print('Sending request to {0}'.format(API_URL)) 21 | print('Payload is: {0}'.format(data)) 22 | r = requests.post(API_URL, data=simplejson.dumps(data)) 23 | 24 | print("\n\nCreating...\n") 25 | job_id = simplejson.loads(r.content)['id'] 26 | print('Job was created with an id of {0}'.format(job_id)) 27 | 28 | print("\n\nGetting...\n") 29 | m = requests.get(API_URL + job_id) 30 | print(m.content) 31 | 32 | print("\n\nDeleting...\n") 33 | 34 | print("\n\nWaiting to delete...\n") 35 | sleep(21) 36 | n = requests.delete(API_URL + job_id) 37 | 38 | print(n.__dict__) 39 | -------------------------------------------------------------------------------- /examples/python/example_dependent_jobs.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import simplejson 3 | import os 4 | from time import sleep 5 | from datetime import datetime, timedelta 6 | from dateutil.tz import tzlocal 7 | 8 | API_URL = "http://127.0.0.1:8000/api/v1/job/" 9 | 10 | scheduled_start = datetime.isoformat(datetime.now(tzlocal()) + timedelta(0, 10)) 11 | parent_job = { 12 | "name": "test_job", 13 | "command": "bash " + os.path.dirname(os.path.realpath(__file__)) + "/../example-kala-commands/example-command.sh", 14 | "epsilon": "PT5S", 15 | "schedule": "%s/%s/%s" % ("R2", scheduled_start, "PT10S"), 16 | } 17 | child_job = { 18 | "name": "my_child_job", 19 | "command": "bash " + os.path.dirname(os.path.realpath(__file__)) + "/../example-kala-commands/example-command.sh", 20 | "epsilon": "PT5S", 21 | } 22 | 23 | if __name__ == "__main__": 24 | print "Sending request to %s" % API_URL 25 | 26 | r = requests.post(API_URL, data=simplejson.dumps(parent_job)) 27 | 28 | job_id = simplejson.loads(r.content)['id'] 29 | print "Parent Job was created with an id of %s" % job_id 30 | 31 | # Make sure to add the parent_job's id to the payload. 32 | child_job["parent_jobs"] = [job_id] 33 | 34 | print "Creating child job..." 35 | new_req = requests.post(API_URL, data=simplejson.dumps(child_job)) 36 | 37 | child_job_id = simplejson.loads(new_req.content)['id'] 38 | print "Child Job was created with an id of %s" % child_job_id 39 | 40 | print "Waiting to delete...." 41 | sleep(21) 42 | n = requests.delete(API_URL + child_job_id) 43 | n = requests.delete(API_URL + job_id) 44 | 45 | print n.__dict__ 46 | -------------------------------------------------------------------------------- /examples/python/requirements.txt: -------------------------------------------------------------------------------- 1 | dateutils==0.6.6 2 | ipython==8.10.0 3 | python-dateutil==2.4.1 4 | pytz==2014.10 5 | requests==2.20.0 6 | simplejson==3.6.5 7 | six==1.9.0 8 | wsgiref==0.1.2 9 | -------------------------------------------------------------------------------- /examples/ruby/basic_test.rb: -------------------------------------------------------------------------------- 1 | require 'net/http' 2 | require 'time' 3 | require 'json' 4 | 5 | class Kala 6 | API_URL = 'http://127.0.0.1:8000/api/v1/job/' 7 | 8 | def post(data) 9 | req = Net::HTTP::Post.new(uri.path) 10 | req.body = data.to_json 11 | call(req) 12 | end 13 | 14 | def get(id) 15 | req = Net::HTTP::Get.new("#{uri.path}#{id}/") 16 | call(req) 17 | end 18 | 19 | def delete(id) 20 | req = Net::HTTP::Delete.new("#{uri.path}#{id}/") 21 | call(req) 22 | end 23 | 24 | private 25 | 26 | def uri 27 | @uri ||= URI(API_URL) 28 | end 29 | 30 | def call(req) 31 | res = Net::HTTP.start(uri.host, uri.port) do |http| 32 | http.request(req) 33 | end 34 | 35 | if res.body 36 | JSON.parse(res.body) 37 | else 38 | res 39 | end 40 | end 41 | end 42 | 43 | data = { 44 | name: 'ruby_job', 45 | command: "bash #{Dir.pwd}/../example-kala-commands/example-command.sh", 46 | epsilon: 'PT5S', 47 | schedule: "R2/#{(Time.now + 10).iso8601}/PT10S" 48 | } 49 | 50 | puts "Sending request to #{Kala::API_URL}" 51 | puts "Payload is: #{data}\n\n" 52 | 53 | kala = Kala.new 54 | 55 | job_id = kala.post(data)['id'] 56 | puts "Job was created with an id of #{job_id}" 57 | 58 | puts "Getting informations about job #{job_id}" 59 | puts kala.get(job_id) 60 | 61 | puts "Waiting to delete job #{job_id}" 62 | sleep(21) 63 | puts kala.delete(job_id).inspect 64 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/ajvb/kala 2 | 3 | go 1.13 4 | 5 | require ( 6 | github.com/DATA-DOG/go-sqlmock v1.3.0 7 | github.com/armon/go-metrics v0.3.3 // indirect 8 | github.com/cornelk/hashmap v1.0.1 9 | github.com/elazarl/go-bindata-assetfs v1.0.1 10 | github.com/garyburd/redigo v1.0.1-0.20170208211623-48545177e92a 11 | github.com/go-bindata/go-bindata v3.1.2+incompatible 12 | github.com/go-sql-driver/mysql v1.4.1 13 | github.com/gorilla/context v0.0.0-20141217160251-215affda49ad // indirect 14 | github.com/gorilla/mux v0.0.0-20150808061613-ffb3f683aad4 15 | github.com/hashicorp/consul/api v1.1.0 16 | github.com/hashicorp/go-msgpack v1.1.5 // indirect 17 | github.com/hashicorp/go-uuid v1.0.2 // indirect 18 | github.com/hashicorp/memberlist v0.2.2 // indirect 19 | github.com/jmoiron/sqlx v1.2.0 20 | github.com/lestrrat-go/tcputil v0.0.0-20180223003554-d3c7f98154fb // indirect 21 | github.com/lestrrat-go/test-mysqld v0.0.0-20190527004737-6c91be710371 22 | github.com/lib/pq v1.0.0 23 | github.com/mattn/go-shellwords v1.0.0 24 | github.com/mitchellh/go-homedir v1.1.0 25 | github.com/mixer/clock v0.0.0-20190507173039-c311c17adb1f 26 | github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d 27 | github.com/ory/dockertest/v3 v3.8.1 28 | github.com/phyber/negroni-gzip v0.0.0-20180113114010-ef6356a5d029 29 | github.com/rafaeljusto/redigomock v0.0.0-20170720131524-7ae0511314e9 30 | github.com/sirupsen/logrus v1.8.1 31 | github.com/spf13/cobra v1.0.0 32 | github.com/spf13/viper v1.7.0 33 | github.com/stretchr/testify v1.7.0 34 | github.com/urfave/negroni v1.0.0 35 | go.etcd.io/bbolt v1.3.5 36 | google.golang.org/appengine v1.6.6 // indirect 37 | gopkg.in/mgo.v2 v2.0.0-20160818020120-3f83fa500528 38 | ) 39 | -------------------------------------------------------------------------------- /integrate/integration_test.go: -------------------------------------------------------------------------------- 1 | package integrate 2 | 3 | import ( 4 | "fmt" 5 | "net" 6 | "net/http" 7 | "net/http/httptest" 8 | "runtime" 9 | "testing" 10 | "time" 11 | 12 | "github.com/ajvb/kala/api" 13 | "github.com/ajvb/kala/client" 14 | "github.com/ajvb/kala/job" 15 | "github.com/mixer/clock" 16 | ) 17 | 18 | func TestIntegrationTest(t *testing.T) { 19 | 20 | jobDB := &job.MockDB{} 21 | cache := job.NewLockFreeJobCache(jobDB) 22 | 23 | clk := clock.NewMockClock() 24 | cache.Clock.SetClock(clk) 25 | 26 | addr := newLocalListener(t) 27 | 28 | kalaApi := api.MakeServer(addr, cache, "default@example.com", false) 29 | go kalaApi.ListenAndServe() 30 | runtime.Gosched() 31 | kalaClient := client.New("http://" + addr) 32 | 33 | hit := make(chan struct{}) 34 | srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 35 | hit <- struct{}{} 36 | w.WriteHeader(200) 37 | })) 38 | 39 | scheduleTime := clk.Now().Add(time.Minute * 3) 40 | parsedTime := scheduleTime.Format(time.RFC3339) 41 | delay := "PT5M" 42 | scheduleStr := fmt.Sprintf("R/%s/%s", parsedTime, delay) 43 | body := &job.Job{ 44 | Name: "Increment HitCount", 45 | Schedule: scheduleStr, 46 | JobType: job.RemoteJob, 47 | RemoteProperties: job.RemoteProperties{ 48 | Url: "http://" + srv.Listener.Addr().String(), 49 | Method: http.MethodGet, 50 | ExpectedResponseCodes: []int{200}, 51 | }, 52 | } 53 | _, err := kalaClient.CreateJob(body) 54 | if err != nil { 55 | t.Fatal(err) 56 | } 57 | 58 | clk.AddTime(time.Minute) 59 | select { 60 | case <-hit: 61 | t.Fatalf("Did not expect job to have run yet.") 62 | case <-time.After(time.Second): 63 | } 64 | 65 | clk.AddTime(time.Minute) 66 | select { 67 | case <-hit: 68 | t.Fatalf("Still did not expect job to have run yet.") 69 | case <-time.After(time.Second): 70 | } 71 | 72 | clk.AddTime(time.Minute * 3) 73 | select { 74 | case <-hit: 75 | case <-time.After(time.Second * 5): 76 | t.Fatalf("Expected job to have run.") 77 | } 78 | 79 | clk.AddTime(time.Minute * 5) 80 | select { 81 | case <-hit: 82 | case <-time.After(time.Second * 5): 83 | t.Fatalf("Expected job to have run again.") 84 | } 85 | 86 | } 87 | 88 | func newLocalListener(t *testing.T) string { 89 | l, err := net.Listen("tcp", "127.0.0.1:0") 90 | if err != nil { 91 | if l, err = net.Listen("tcp6", "[::1]:0"); err != nil { 92 | t.Fatalf("failed to listen on a port: %v", err) 93 | } 94 | } 95 | defer func() { 96 | if err := l.Close(); err != nil { 97 | t.Fatal(err) 98 | } 99 | }() 100 | return l.Addr().String() 101 | } 102 | -------------------------------------------------------------------------------- /job/cache.go: -------------------------------------------------------------------------------- 1 | package job 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "os" 7 | "os/signal" 8 | "sync" 9 | "syscall" 10 | "time" 11 | 12 | "github.com/cornelk/hashmap" 13 | log "github.com/sirupsen/logrus" 14 | ) 15 | 16 | var ( 17 | ErrJobDoesntExist = errors.New("The job you requested does not exist") 18 | ) 19 | 20 | type JobCache interface { 21 | Get(id string) (*Job, error) 22 | GetAll() *JobsMap 23 | Set(j *Job) error 24 | Delete(id string) error 25 | Persist() error 26 | Enable(j *Job) error 27 | Disable(j *Job) error 28 | } 29 | 30 | type JobsMap struct { 31 | Jobs map[string]*Job 32 | Lock sync.RWMutex 33 | } 34 | 35 | func NewJobsMap() *JobsMap { 36 | return &JobsMap{ 37 | Jobs: map[string]*Job{}, 38 | Lock: sync.RWMutex{}, 39 | } 40 | } 41 | 42 | type MemoryJobCache struct { 43 | // Jobs is a map from Job id's to pointers to the jobs. 44 | // Used as the main "data store" within this cache implementation. 45 | jobs *JobsMap 46 | jobDB JobDB 47 | PersistOnWrite bool 48 | } 49 | 50 | func NewMemoryJobCache(jobDB JobDB) *MemoryJobCache { 51 | return &MemoryJobCache{ 52 | jobs: NewJobsMap(), 53 | jobDB: jobDB, 54 | } 55 | } 56 | 57 | func (c *MemoryJobCache) Start(persistWaitTime time.Duration) { 58 | if persistWaitTime == 0 { 59 | c.PersistOnWrite = true 60 | } 61 | 62 | // Prep cache 63 | allJobs, err := c.jobDB.GetAll() 64 | if err != nil { 65 | log.Fatal(err) 66 | } 67 | for _, j := range allJobs { 68 | if j.ShouldStartWaiting() { 69 | j.StartWaiting(c, false) 70 | } 71 | err = c.Set(j) 72 | if err != nil { 73 | log.Errorln(err) 74 | } 75 | } 76 | 77 | // Occasionally, save items in cache to db. 78 | if persistWaitTime > 0 { 79 | go c.PersistEvery(persistWaitTime) 80 | } 81 | 82 | // Process-level defer for shutting down the db. 83 | ch := make(chan os.Signal, 1) 84 | signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) 85 | go func() { 86 | s := <-ch 87 | log.Infof("Process got signal: %s", s) 88 | log.Infof("Shutting down....") 89 | 90 | // Persist all jobs to database 91 | err = c.Persist() 92 | if err != nil { 93 | log.Errorln(err) 94 | } 95 | 96 | // Close the database 97 | c.jobDB.Close() 98 | 99 | os.Exit(0) 100 | }() 101 | } 102 | 103 | func (c *MemoryJobCache) Get(id string) (*Job, error) { 104 | c.jobs.Lock.RLock() 105 | defer c.jobs.Lock.RUnlock() 106 | 107 | j := c.jobs.Jobs[id] 108 | if j == nil { 109 | return nil, ErrJobDoesntExist 110 | } 111 | 112 | return j, nil 113 | } 114 | 115 | func (c *MemoryJobCache) GetAll() *JobsMap { 116 | return c.jobs 117 | } 118 | 119 | func (c *MemoryJobCache) Set(j *Job) error { 120 | c.jobs.Lock.Lock() 121 | defer c.jobs.Lock.Unlock() 122 | if j == nil { 123 | return nil 124 | } 125 | 126 | if c.PersistOnWrite { 127 | if err := c.jobDB.Save(j); err != nil { 128 | return err 129 | } 130 | } 131 | 132 | c.jobs.Jobs[j.Id] = j 133 | return nil 134 | } 135 | 136 | func (c *MemoryJobCache) Delete(id string) error { 137 | c.jobs.Lock.Lock() 138 | defer c.jobs.Lock.Unlock() 139 | 140 | j := c.jobs.Jobs[id] 141 | if j == nil { 142 | return ErrJobDoesntExist 143 | } 144 | j.lock.Lock() 145 | defer j.lock.Unlock() 146 | 147 | err := c.jobDB.Delete(id) 148 | if err != nil { 149 | err = fmt.Errorf("Error occurred while trying to delete job from db: %s", err) 150 | if c.PersistOnWrite { 151 | return err 152 | } 153 | } 154 | 155 | j.lock.Unlock() 156 | j.StopTimer() 157 | j.lock.Lock() 158 | 159 | go func() { 160 | log.Errorln(j.DeleteFromParentJobs(c)) // todo: review 161 | }() 162 | 163 | // Remove itself from dependent jobs as a parent job 164 | // and possibly delete child jobs if they don't have any other parents. 165 | go func() { 166 | log.Errorln(j.DeleteFromDependentJobs(c)) // todo: review 167 | }() 168 | 169 | delete(c.jobs.Jobs, id) 170 | 171 | return err 172 | } 173 | 174 | func (c *MemoryJobCache) Enable(j *Job) error { 175 | return enable(j, c, c.PersistOnWrite) 176 | } 177 | 178 | // Disable stops a job from running by stopping its jobTimer. It also sets Job.Disabled to true, 179 | // which is reflected in the UI. 180 | func (c *MemoryJobCache) Disable(j *Job) error { 181 | return disable(j, c, c.PersistOnWrite) 182 | } 183 | 184 | func (c *MemoryJobCache) Persist() error { 185 | c.jobs.Lock.RLock() 186 | defer c.jobs.Lock.RUnlock() 187 | for _, j := range c.jobs.Jobs { 188 | err := c.jobDB.Save(j) 189 | if err != nil { 190 | return err 191 | } 192 | } 193 | return nil 194 | } 195 | 196 | func (c *MemoryJobCache) PersistEvery(persistWaitTime time.Duration) { 197 | wait := time.NewTicker(persistWaitTime).C 198 | var err error 199 | for { 200 | <-wait 201 | err = c.Persist() 202 | if err != nil { 203 | log.Errorf("Error occurred persisting the database. Err: %s", err) 204 | } 205 | } 206 | } 207 | 208 | type LockFreeJobCache struct { 209 | jobs *hashmap.HashMap 210 | jobDB JobDB 211 | retentionPeriod time.Duration 212 | PersistOnWrite bool 213 | Clock 214 | } 215 | 216 | func NewLockFreeJobCache(jobDB JobDB) *LockFreeJobCache { 217 | return &LockFreeJobCache{ 218 | jobs: hashmap.New(8), //nolint:gomnd 219 | jobDB: jobDB, 220 | retentionPeriod: -1, 221 | } 222 | } 223 | 224 | func (c *LockFreeJobCache) Start(persistWaitTime time.Duration, jobstatTtl time.Duration) { 225 | if persistWaitTime == 0 { 226 | c.PersistOnWrite = true 227 | } 228 | 229 | // Prep cache 230 | allJobs, err := c.jobDB.GetAll() 231 | if err != nil { 232 | log.Fatal(err) 233 | } 234 | for _, j := range allJobs { 235 | if j.Schedule == "" { 236 | log.Infof("Job %s:%s skipped.", j.Name, j.Id) 237 | continue 238 | } 239 | if j.ShouldStartWaiting() { 240 | j.StartWaiting(c, false) 241 | } 242 | log.Infof("Job %s:%s added to cache.", j.Name, j.Id) 243 | err := c.Set(j) 244 | if err != nil { 245 | log.Errorln(err) 246 | } 247 | } 248 | // Occasionally, save items in cache to db. 249 | if persistWaitTime > 0 { 250 | go c.PersistEvery(persistWaitTime) 251 | } 252 | 253 | // Run retention every minute to clean up old job stats entries 254 | if jobstatTtl > 0 { 255 | c.retentionPeriod = jobstatTtl 256 | go c.RetainEvery(1 * time.Minute) 257 | } 258 | 259 | // Process-level defer for shutting down the db. 260 | ch := make(chan os.Signal, 1) 261 | signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) 262 | go func() { 263 | s := <-ch 264 | log.Infof("Process got signal: %s", s) 265 | log.Infof("Shutting down....") 266 | 267 | // Persist all jobs to database 268 | log.Errorln(c.Persist()) 269 | 270 | // Close the database 271 | c.jobDB.Close() 272 | 273 | os.Exit(0) 274 | }() 275 | } 276 | 277 | func (c *LockFreeJobCache) Get(id string) (*Job, error) { 278 | val, exists := c.jobs.GetStringKey(id) 279 | if val == nil || !exists { 280 | return nil, ErrJobDoesntExist 281 | } 282 | j := val.(*Job) 283 | if j == nil { 284 | return nil, ErrJobDoesntExist 285 | } 286 | return j, nil 287 | } 288 | 289 | func (c *LockFreeJobCache) GetAll() *JobsMap { 290 | jm := NewJobsMap() 291 | for el := range c.jobs.Iter() { 292 | jm.Jobs[el.Key.(string)] = el.Value.(*Job) 293 | } 294 | return jm 295 | } 296 | 297 | func (c *LockFreeJobCache) Set(j *Job) error { 298 | if j == nil { 299 | return nil 300 | } 301 | 302 | if c.PersistOnWrite { 303 | if err := c.jobDB.Save(j); err != nil { 304 | return err 305 | } 306 | } 307 | 308 | c.jobs.Set(j.Id, j) 309 | return nil 310 | } 311 | 312 | func (c *LockFreeJobCache) Delete(id string) error { 313 | j, err := c.Get(id) 314 | if err != nil { 315 | return ErrJobDoesntExist 316 | } 317 | j.lock.Lock() 318 | defer j.lock.Unlock() 319 | 320 | err = c.jobDB.Delete(id) 321 | if err != nil { 322 | err = fmt.Errorf("Error occurred while trying to delete job from db: %s", err) 323 | if c.PersistOnWrite { 324 | return err 325 | } 326 | } 327 | 328 | j.lock.Unlock() 329 | j.StopTimer() 330 | j.lock.Lock() 331 | 332 | go func() { 333 | log.Errorln(j.DeleteFromParentJobs(c)) // todo: review 334 | }() 335 | // Remove itself from dependent jobs as a parent job 336 | // and possibly delete child jobs if they don't have any other parents. 337 | go func() { 338 | log.Errorln(j.DeleteFromDependentJobs(c)) // todo: review 339 | }() 340 | log.Infof("Deleting %s", id) 341 | c.jobs.Del(id) 342 | return err 343 | } 344 | 345 | func (c *LockFreeJobCache) Enable(j *Job) error { 346 | return enable(j, c, c.PersistOnWrite) 347 | } 348 | 349 | // Disable stops a job from running by stopping its jobTimer. It also sets Job.Disabled to true, 350 | // which is reflected in the UI. 351 | func (c *LockFreeJobCache) Disable(j *Job) error { 352 | return disable(j, c, c.PersistOnWrite) 353 | } 354 | 355 | func (c *LockFreeJobCache) Persist() error { 356 | jm := c.GetAll() 357 | for _, j := range jm.Jobs { 358 | j.lock.RLock() 359 | err := c.jobDB.Save(j) 360 | if err != nil { 361 | j.lock.RUnlock() 362 | return err 363 | } 364 | j.lock.RUnlock() 365 | } 366 | return nil 367 | } 368 | 369 | func (c *LockFreeJobCache) PersistEvery(persistWaitTime time.Duration) { 370 | wait := time.NewTicker(persistWaitTime).C 371 | var err error 372 | for { 373 | <-wait 374 | err = c.Persist() 375 | if err != nil { 376 | log.Errorf("Error occurred persisting the database. Err: %s", err) 377 | } 378 | } 379 | } 380 | 381 | func (c *LockFreeJobCache) locateJobStatsIndexForRetention(stats []*JobStat) (marker int) { 382 | now := time.Now() 383 | expiresAt := now.Add(-c.retentionPeriod) 384 | pos := -1 385 | for i, el := range stats { 386 | diff := el.RanAt.Sub(expiresAt) 387 | if diff < 0 { 388 | pos = i 389 | } 390 | } 391 | return pos 392 | } 393 | 394 | func (c *LockFreeJobCache) Retain() error { 395 | for el := range c.jobs.Iter() { 396 | job := el.Value.(*Job) 397 | c.compactJobStats(job) 398 | } 399 | return nil 400 | } 401 | 402 | func (c *LockFreeJobCache) compactJobStats(job *Job) { 403 | job.lock.Lock() 404 | defer job.lock.Unlock() 405 | pos := c.locateJobStatsIndexForRetention(job.Stats) 406 | if pos >= 0 { 407 | log.Infof("JobStats TTL: removing %d items", pos+1) 408 | tmp := make([]*JobStat, len(job.Stats)-pos-1) 409 | copy(tmp, job.Stats[pos+1:]) 410 | job.Stats = tmp 411 | } 412 | } 413 | 414 | func (c *LockFreeJobCache) RetainEvery(retentionWaitTime time.Duration) { 415 | wait := time.NewTicker(retentionWaitTime).C 416 | var err error 417 | for { 418 | <-wait 419 | err = c.Retain() 420 | if err != nil { 421 | log.Errorf("Error occurred during invoking retention. Err: %s", err) 422 | } 423 | } 424 | } 425 | -------------------------------------------------------------------------------- /job/cache_test.go: -------------------------------------------------------------------------------- 1 | package job 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | // This file contains tests for specific JobCaches. 11 | 12 | func TestCacheStart(t *testing.T) { 13 | cache := NewMockCache() 14 | cache.Start(time.Hour, time.Hour) 15 | } 16 | 17 | func TestCacheRetainShouldRemoveOldJobStats(t *testing.T) { 18 | cache := NewMockCache() 19 | mockDb := &MockDBGetAll{} 20 | cache.jobDB = mockDb 21 | 22 | pastDate := time.Date(2016, time.April, 12, 20, 0, 0, 0, time.UTC) 23 | j := GetMockRecurringJobWithSchedule(pastDate, "PT1S") 24 | j.Stats = GetMockJobStats(pastDate, 5) 25 | j.Id = "0" 26 | 27 | jobs := make([]*Job, 0) 28 | jobs = append(jobs, j) 29 | mockDb.response = jobs 30 | 31 | cache.Start(0, 1*time.Minute) // Retain 1 minute 32 | j.lock.RLock() 33 | assert.Equal(t, 5, len(j.Stats)) 34 | j.lock.RUnlock() 35 | 36 | time.Sleep(time.Second * 2) 37 | cache.Retain() 38 | 39 | j.lock.RLock() 40 | assert.Equal(t, 1, len(j.Stats)) // New job stats should not be cleaned up 41 | j.lock.RUnlock() 42 | } 43 | 44 | func TestCacheStartStartsARecurringJobWithStartDateInThePast(t *testing.T) { 45 | 46 | cache := NewMockCache() 47 | mockDb := &MockDBGetAll{} 48 | cache.jobDB = mockDb 49 | 50 | pastDate := time.Date(2016, time.April, 12, 20, 0, 0, 0, time.UTC) 51 | j := GetMockRecurringJobWithSchedule(pastDate, "PT1S") 52 | j.Id = "0" 53 | 54 | jobs := make([]*Job, 0) 55 | jobs = append(jobs, j) 56 | mockDb.response = jobs 57 | 58 | cache.Start(0, -1) 59 | time.Sleep(time.Second * 2) 60 | 61 | j.lock.RLock() 62 | assert.Equal(t, j.Metadata.SuccessCount, uint(1)) 63 | j.lock.RUnlock() 64 | } 65 | 66 | func TestCacheStartCanResumeJobAtNextScheduledPoint(t *testing.T) { 67 | 68 | cache := NewMockCache() 69 | mockDb := &MockDBGetAll{} 70 | cache.jobDB = mockDb 71 | 72 | pastDate := time.Now().Add(-1 * time.Second) 73 | j := GetMockRecurringJobWithSchedule(pastDate, "PT3S") 74 | j.Id = "0" 75 | j.ResumeAtNextScheduledTime = true 76 | j.InitDelayDuration(false) 77 | 78 | jobs := make([]*Job, 0) 79 | jobs = append(jobs, j) 80 | mockDb.response = jobs 81 | 82 | cache.Start(0, -1) 83 | 84 | // After 1 second, the job should not have run. 85 | time.Sleep(time.Second * 1) 86 | j.lock.RLock() 87 | assert.Equal(t, 0, int(j.Metadata.SuccessCount)) 88 | j.lock.RUnlock() 89 | 90 | // After 2 more seconds, it should have run. 91 | time.Sleep(time.Second * 2) 92 | j.lock.RLock() 93 | assert.Equal(t, 1, int(j.Metadata.SuccessCount)) 94 | j.lock.RUnlock() 95 | 96 | // Disable to prevent from running 97 | assert.NoError(t, j.Disable(cache)) 98 | 99 | // It shouldn't run while it's disabled. 100 | time.Sleep(time.Second * 3) 101 | j.lock.RLock() 102 | assert.Equal(t, 1, int(j.Metadata.SuccessCount)) 103 | j.lock.RUnlock() 104 | 105 | // Re-enable 106 | assert.NoError(t, j.Enable(cache)) 107 | 108 | // It shouldn't re-run right away; should wait for its next run point. 109 | time.Sleep(time.Second * 1) 110 | j.lock.RLock() 111 | assert.Equal(t, 1, int(j.Metadata.SuccessCount)) 112 | j.lock.RUnlock() 113 | 114 | // Now it should have run again. 115 | time.Sleep(time.Second * 2) 116 | j.lock.RLock() 117 | assert.Equal(t, 2, int(j.Metadata.SuccessCount)) 118 | j.lock.RUnlock() 119 | 120 | } 121 | -------------------------------------------------------------------------------- /job/cache_util.go: -------------------------------------------------------------------------------- 1 | package job 2 | 3 | func enable(j *Job, cache JobCache, persist bool) error { 4 | j.lock.Lock() 5 | defer j.lock.Unlock() 6 | 7 | shouldStartWaiting := j.jobTimer != nil && j.Disabled 8 | j.Disabled = false 9 | 10 | if persist { 11 | j.lock.Unlock() 12 | if err := cache.Set(j); err != nil { 13 | j.lock.Lock() 14 | j.Disabled = true 15 | return err 16 | } 17 | j.lock.Lock() 18 | } 19 | 20 | if shouldStartWaiting { 21 | go j.StartWaiting(cache, false) 22 | } 23 | 24 | return nil 25 | } 26 | 27 | func disable(j *Job, cache JobCache, persist bool) error { 28 | j.lock.Lock() 29 | defer j.lock.Unlock() 30 | 31 | j.Disabled = true 32 | if persist { 33 | j.lock.Unlock() 34 | if err := cache.Set(j); err != nil { 35 | j.lock.Lock() 36 | j.Disabled = false 37 | return err 38 | } 39 | j.lock.Lock() 40 | } 41 | 42 | if j.jobTimer != nil { 43 | j.jobTimer.Stop() 44 | } 45 | 46 | return nil 47 | } 48 | -------------------------------------------------------------------------------- /job/caches_test.go: -------------------------------------------------------------------------------- 1 | package job 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | // This file contains tests that can be run against all types of JobCaches. 10 | 11 | func TestCache(t *testing.T) { 12 | 13 | tt := []struct { 14 | c JobCache 15 | }{ 16 | { 17 | c: NewLockFreeJobCache(&MockDB{}), 18 | }, 19 | { 20 | c: NewMemoryJobCache(&MockDB{}), 21 | }, 22 | } 23 | 24 | for _, row := range tt { 25 | t.Run("", func(t *testing.T) { 26 | testCache(t, row.c) 27 | }) 28 | } 29 | 30 | } 31 | 32 | func testCache(t *testing.T, cache JobCache) { 33 | 34 | t.Run("TestCacheDeleteJobNotFound", func(t *testing.T) { 35 | err := cache.Delete("not-a-real-id") 36 | assert.Equal(t, ErrJobDoesntExist, err) 37 | }) 38 | 39 | t.Run("TestCachePersist", func(t *testing.T) { 40 | err := cache.Persist() 41 | assert.NoError(t, err) 42 | }) 43 | 44 | } 45 | 46 | func TestCachePersistence(t *testing.T) { 47 | 48 | mdb1a := NewMemoryDB() 49 | cache1a := NewMemoryJobCache(mdb1a) 50 | 51 | mdb1b := NewMemoryDB() 52 | cache1b := NewMemoryJobCache(mdb1b) 53 | cache1b.PersistOnWrite = true 54 | 55 | mdb2a := NewMemoryDB() 56 | cache2a := NewLockFreeJobCache(mdb2a) 57 | 58 | mdb2b := NewMemoryDB() 59 | cache2b := NewLockFreeJobCache(mdb2b) 60 | cache2b.PersistOnWrite = true 61 | 62 | tt := []struct { 63 | db JobDB 64 | c JobCache 65 | shouldPersist bool 66 | }{ 67 | { 68 | db: mdb1a, 69 | c: cache1a, 70 | shouldPersist: false, 71 | }, 72 | { 73 | db: mdb1b, 74 | c: cache1b, 75 | shouldPersist: true, 76 | }, 77 | { 78 | db: mdb2a, 79 | c: cache2a, 80 | shouldPersist: false, 81 | }, 82 | { 83 | db: mdb2b, 84 | c: cache2b, 85 | shouldPersist: true, 86 | }, 87 | } 88 | 89 | for _, row := range tt { 90 | t.Run("", func(t *testing.T) { 91 | testCachePersistence(t, row.c, row.db, row.shouldPersist) 92 | }) 93 | } 94 | 95 | } 96 | 97 | // This battery of tests ensures that a JobCache behaves appropriately from a persistence perspective. 98 | // We verify that it's either persisting to the JobDB upon each creation/update/delete, or not, as appropriate. 99 | // Note that deletes always propagate to the db, and return an error if that fails, 100 | // but if transactional persistence is turned off the cache will still delete from itself. 101 | func testCachePersistence(t *testing.T, cache JobCache, db JobDB, shouldPersist bool) { 102 | 103 | t.Run("testCachePersistence", func(t *testing.T) { 104 | 105 | j := GetMockJob() 106 | assert.NoError(t, j.Init(cache)) // Saves the job 107 | 108 | saved, err := db.Get(j.Id) 109 | if shouldPersist { 110 | assert.NoError(t, err) 111 | assert.Equal(t, j.Id, saved.Id) 112 | } else { 113 | assert.IsType(t, ErrJobNotFound(""), err) 114 | assert.Equal(t, (*Job)(nil), saved) 115 | } 116 | 117 | t.Run("disable", func(t *testing.T) { 118 | j.Disable(cache) 119 | ret, err := db.Get(j.Id) 120 | if shouldPersist { 121 | assert.NoError(t, err) 122 | assert.Equal(t, true, ret.Disabled) 123 | } else { 124 | assert.IsType(t, ErrJobNotFound(""), err) 125 | assert.Equal(t, (*Job)(nil), ret) 126 | } 127 | 128 | }) 129 | 130 | t.Run("enable", func(t *testing.T) { 131 | j.Enable(cache) 132 | ret, err := db.Get(j.Id) 133 | if shouldPersist { 134 | assert.NoError(t, err) 135 | assert.Equal(t, !shouldPersist, ret.Disabled) 136 | } else { 137 | assert.IsType(t, ErrJobNotFound(""), err) 138 | assert.Equal(t, (*Job)(nil), ret) 139 | } 140 | }) 141 | 142 | t.Run("delete", func(t *testing.T) { 143 | 144 | // If we haven't been persisting, persist it to the DB now 145 | // because we need it there for this test. 146 | if !shouldPersist { 147 | assert.NoError(t, cache.Persist()) 148 | } 149 | 150 | assert.NoError(t, cache.Delete(j.Id)) 151 | ret, err := db.Get(j.Id) 152 | assert.IsType(t, ErrJobNotFound(""), err) 153 | assert.Equal(t, (*Job)(nil), ret) 154 | 155 | t.Run("errored", func(t *testing.T) { 156 | 157 | j := GetMockJob() 158 | assert.NoError(t, j.Init(cache)) 159 | if !shouldPersist { 160 | assert.NoError(t, cache.Persist()) 161 | } 162 | assert.NoError(t, db.Delete(j.Id)) 163 | 164 | assert.Error(t, cache.Delete(j.Id)) 165 | ret, err := cache.Get(j.Id) 166 | if shouldPersist { 167 | assert.NoError(t, err) 168 | assert.Equal(t, j.Id, ret.Id) 169 | } else { 170 | assert.Error(t, err) 171 | assert.Equal(t, (*Job)(nil), ret) 172 | } 173 | 174 | }) 175 | 176 | }) 177 | 178 | }) 179 | 180 | } 181 | -------------------------------------------------------------------------------- /job/clock.go: -------------------------------------------------------------------------------- 1 | package job 2 | 3 | import ( 4 | "sync" 5 | 6 | // This library abstracts the time functionality of the OS so that it can be controlled during unit tests. 7 | // It was selected over thejerf/abtime because abtime is geared towards precision timing rather than scheduling. 8 | // Other libraries were reviewed and rejected due to outstanding bugs. 9 | "github.com/mixer/clock" 10 | ) 11 | 12 | type Clock struct { 13 | clock.Clock 14 | lock sync.RWMutex 15 | } 16 | 17 | func (clk *Clock) SetClock(in clock.Clock) { 18 | clk.lock.Lock() 19 | defer clk.lock.Unlock() 20 | clk.Clock = in 21 | } 22 | 23 | func (clk *Clock) Time() clock.Clock { 24 | clk.lock.RLock() 25 | defer clk.lock.RUnlock() 26 | 27 | if clk.Clock == nil { 28 | clk.lock.RUnlock() 29 | clk.lock.Lock() 30 | clk.Clock = clock.C 31 | clk.lock.Unlock() 32 | clk.lock.RLock() 33 | } 34 | 35 | return clk.Clock 36 | } 37 | 38 | func (clk *Clock) TimeSet() (result bool) { 39 | clk.lock.RLock() 40 | result = clk.Clock != nil 41 | clk.lock.RUnlock() 42 | return 43 | } 44 | 45 | type Clocker interface { 46 | Time() clock.Clock 47 | TimeSet() bool 48 | } 49 | -------------------------------------------------------------------------------- /job/clock_test.go: -------------------------------------------------------------------------------- 1 | package job 2 | 3 | import ( 4 | "sync" 5 | "time" 6 | 7 | "github.com/mixer/clock" 8 | ) 9 | 10 | // Special hybrid clock that allows you to make time "play" in addition to moving it around manually. 11 | type HybridClock struct { 12 | *clock.MockClock 13 | offset time.Duration 14 | m sync.RWMutex 15 | } 16 | 17 | func NewHybridClock(start ...time.Time) *HybridClock { 18 | return &HybridClock{ 19 | MockClock: clock.NewMockClock(start...), 20 | } 21 | } 22 | 23 | func (hc *HybridClock) Play() { 24 | hc.m.Lock() 25 | defer hc.m.Unlock() 26 | hc.offset = time.Until(hc.MockClock.Now()) 27 | } 28 | 29 | func (hc *HybridClock) Pause() { 30 | hc.m.Lock() 31 | defer hc.m.Unlock() 32 | hc.offset = 0 33 | } 34 | 35 | func (hc *HybridClock) IsPlaying() (result bool) { 36 | hc.m.RLock() 37 | result = hc.offset != 0 38 | hc.m.RUnlock() 39 | return 40 | } 41 | 42 | func (hc *HybridClock) Now() time.Time { 43 | hc.m.RLock() 44 | defer hc.m.RUnlock() 45 | if hc.offset == 0 { 46 | return hc.MockClock.Now() 47 | } else { 48 | return time.Now().Add(hc.offset) 49 | } 50 | } 51 | 52 | func (hc *HybridClock) AddTime(d time.Duration) { 53 | hc.MockClock.AddTime(d) 54 | if hc.IsPlaying() { 55 | hc.Play() 56 | } 57 | } 58 | 59 | func (hc *HybridClock) SetTime(t time.Time) { 60 | hc.MockClock.SetTime(t) 61 | if hc.IsPlaying() { 62 | hc.Play() 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /job/db.go: -------------------------------------------------------------------------------- 1 | package job 2 | 3 | import ( 4 | "fmt" 5 | 6 | log "github.com/sirupsen/logrus" 7 | ) 8 | 9 | // ErrJobNotFound is raised when a Job is unable to be found within a database. 10 | type ErrJobNotFound string 11 | 12 | func (id ErrJobNotFound) Error() string { 13 | return fmt.Sprintf("Job with id of %s not found.", string(id)) 14 | } 15 | 16 | type JobDB interface { 17 | GetAll() ([]*Job, error) 18 | Get(id string) (*Job, error) 19 | Delete(id string) error 20 | Save(job *Job) error 21 | Close() error 22 | } 23 | 24 | func (j *Job) Delete(cache JobCache) error { 25 | var err error 26 | errOne := cache.Delete(j.Id) 27 | if errOne != nil { 28 | log.Errorf("Error occurred while trying to delete job from cache: %s", errOne) 29 | err = errOne 30 | } 31 | return err 32 | } 33 | 34 | func DeleteAll(cache JobCache) error { 35 | allJobs := cache.GetAll() 36 | allJobs.Lock.RLock() 37 | // make a copy of all jobs to prevent deadlock on delete 38 | jobsCopy := make([]*Job, 0, len(allJobs.Jobs)) 39 | for _, j := range allJobs.Jobs { 40 | jobsCopy = append(jobsCopy, j) 41 | } 42 | allJobs.Lock.RUnlock() 43 | 44 | for _, j := range jobsCopy { 45 | if err := j.Delete(cache); err != nil { 46 | return err 47 | } 48 | } 49 | return nil 50 | } 51 | -------------------------------------------------------------------------------- /job/db_test.go: -------------------------------------------------------------------------------- 1 | package job 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func TestDelete(t *testing.T) { 11 | cache := NewMockCache() 12 | job := GetMockJobWithGenericSchedule(time.Now()) 13 | job.Init(cache) 14 | 15 | err := job.Delete(cache) 16 | assert.NoError(t, err) 17 | 18 | val, err := cache.Get(job.Id) 19 | assert.Error(t, err) 20 | assert.Nil(t, val) 21 | } 22 | 23 | func TestDeleteDoesNotExists(t *testing.T) { 24 | cache := NewMockCache() 25 | jobOne := GetMockJobWithGenericSchedule(time.Now()) 26 | jobOne.Init(cache) 27 | jobTwo := GetMockJobWithGenericSchedule(time.Now()) 28 | 29 | err := jobTwo.Delete(cache) 30 | assert.Error(t, err) 31 | 32 | val, err := cache.Get(jobOne.Id) 33 | assert.NoError(t, err) 34 | assert.Equal(t, jobOne, val) 35 | } 36 | 37 | func TestDeleteAll(t *testing.T) { 38 | cache := NewMockCache() 39 | for i := 0; i < 10; i++ { 40 | job := GetMockJobWithGenericSchedule(time.Now()) 41 | job.Init(cache) 42 | } 43 | 44 | err := DeleteAll(cache) 45 | assert.NoError(t, err) 46 | 47 | allJobs := cache.GetAll() 48 | assert.Empty(t, allJobs.Jobs) 49 | } 50 | -------------------------------------------------------------------------------- /job/job_exclude_from_race_test.go: -------------------------------------------------------------------------------- 1 | //go:build !race 2 | // +build !race 3 | 4 | package job 5 | 6 | import ( 7 | "fmt" 8 | "net/http" 9 | "net/http/httptest" 10 | "testing" 11 | "time" 12 | 13 | "github.com/stretchr/testify/assert" 14 | ) 15 | 16 | func TestRemoteJobRunner(t *testing.T) { 17 | testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 18 | fmt.Fprintln(w, "Hello, client") 19 | })) 20 | defer testServer.Close() 21 | 22 | mockRemoteJob := GetMockRemoteJob(RemoteProperties{ 23 | Url: testServer.URL, 24 | }) 25 | 26 | cache := NewMockCache() 27 | mockRemoteJob.Init(cache) 28 | cache.Start(0, 2*time.Second) // Retain 1 minute 29 | 30 | mockRemoteJob.Run(cache) 31 | 32 | mockRemoteJob.lock.RLock() 33 | assert.True(t, mockRemoteJob.Metadata.SuccessCount == 1) 34 | mockRemoteJob.lock.RUnlock() 35 | } 36 | 37 | func TestRemoteJobBadStatus(t *testing.T) { 38 | testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 39 | http.Error(w, "something failed", http.StatusInternalServerError) 40 | })) 41 | defer testServer.Close() 42 | 43 | mockRemoteJob := GetMockRemoteJob(RemoteProperties{ 44 | Url: testServer.URL, 45 | }) 46 | 47 | cache := NewMockCache() 48 | mockRemoteJob.Init(cache) 49 | cache.Start(0, 2*time.Second) // Retain 1 minute 50 | 51 | mockRemoteJob.Run(cache) 52 | assert.True(t, mockRemoteJob.Metadata.SuccessCount == 0) 53 | } 54 | 55 | func TestRemoteJobBadStatusSuccess(t *testing.T) { 56 | testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 57 | http.Error(w, "something failed", http.StatusInternalServerError) 58 | })) 59 | defer testServer.Close() 60 | 61 | mockRemoteJob := GetMockRemoteJob(RemoteProperties{ 62 | Url: testServer.URL, 63 | ExpectedResponseCodes: []int{500}, 64 | }) 65 | 66 | cache := NewMockCache() 67 | mockRemoteJob.Init(cache) 68 | cache.Start(0, 2*time.Second) // Retain 1 minute 69 | 70 | mockRemoteJob.Run(cache) 71 | 72 | mockRemoteJob.lock.Lock() 73 | assert.True(t, mockRemoteJob.Metadata.SuccessCount == 1) 74 | mockRemoteJob.lock.Unlock() 75 | } 76 | -------------------------------------------------------------------------------- /job/job_recurring_test.go: -------------------------------------------------------------------------------- 1 | // Tests for recurring jobs firing appropriately. 2 | package job 3 | 4 | import ( 5 | "fmt" 6 | "testing" 7 | "time" 8 | 9 | "github.com/mixer/clock" 10 | "github.com/stretchr/testify/assert" 11 | ) 12 | 13 | var recurTableTests = []struct { 14 | Name string 15 | Location string 16 | Start string 17 | Interval string 18 | Checkpoints []string 19 | }{ 20 | { 21 | Name: "Daily", 22 | Location: "America/Los_Angeles", 23 | Start: "2020-Jan-13 14:09", 24 | Interval: "P1D", 25 | Checkpoints: []string{ 26 | "2020-Jan-14 14:09", 27 | "2020-Jan-15 14:09", 28 | "2020-Jan-16 14:09", 29 | }, 30 | }, 31 | { 32 | Name: "Daily across DST boundary", 33 | Location: "America/Los_Angeles", 34 | Start: "2020-Mar-05 14:09", 35 | Interval: "P1D", 36 | Checkpoints: []string{ 37 | "2020-Mar-06 14:09", 38 | "2020-Mar-07 14:09", 39 | "2020-Mar-08 14:09", 40 | "2020-Mar-09 14:09", 41 | }, 42 | }, 43 | { 44 | Name: "24 Hourly across DST boundary", 45 | Location: "America/Los_Angeles", 46 | Start: "2020-Mar-05 14:09", 47 | Interval: "PT24H", 48 | Checkpoints: []string{ 49 | "2020-Mar-06 14:09", 50 | "2020-Mar-07 14:09", 51 | "2020-Mar-08 15:09", 52 | "2020-Mar-09 15:09", 53 | }, 54 | }, 55 | { 56 | Name: "Weekly", 57 | Location: "America/Los_Angeles", 58 | Start: "2020-Jan-13 14:09", 59 | Interval: "P1W", 60 | Checkpoints: []string{ 61 | "2020-Jan-20 14:09", 62 | "2020-Jan-27 14:09", 63 | "2020-Feb-03 14:09", 64 | }, 65 | }, 66 | { 67 | Name: "Monthly", 68 | Location: "America/Los_Angeles", 69 | Start: "2020-Jan-20 14:09", 70 | Interval: "P1M", 71 | Checkpoints: []string{ 72 | "2020-Feb-20 14:09", 73 | "2020-Mar-20 14:09", 74 | "2020-Apr-20 14:09", 75 | "2020-May-20 14:09", 76 | "2020-Jun-20 14:09", 77 | "2020-Jul-20 14:09", 78 | "2020-Aug-20 14:09", 79 | "2020-Sep-20 14:09", 80 | "2020-Oct-20 14:09", 81 | "2020-Nov-20 14:09", 82 | "2020-Dec-20 14:09", 83 | "2021-Jan-20 14:09", 84 | }, 85 | }, 86 | { 87 | Name: "Monthly with Normalization", 88 | Location: "America/Los_Angeles", 89 | Start: "2020-Jul-31 14:09", 90 | Interval: "P1M", 91 | Checkpoints: []string{ 92 | "2020-Aug-31 14:09", 93 | "2020-Oct-01 14:09", 94 | "2020-Nov-01 14:09", 95 | }, 96 | }, 97 | { 98 | Name: "Yearly across Leap Year boundary", 99 | Location: "America/Los_Angeles", 100 | Start: "2020-Jan-20 14:09", 101 | Interval: "P1Y", 102 | Checkpoints: []string{ 103 | "2021-Jan-20 14:09", 104 | "2022-Jan-20 14:09", 105 | "2023-Jan-20 14:09", 106 | "2024-Jan-20 14:09", 107 | "2025-Jan-20 14:09", 108 | }, 109 | }, 110 | } 111 | 112 | // This test works by using a series of checkpoints, spaced apart. 113 | // A job is scheduled 5 seconds after the first checkpoint. 114 | // By moving the clock to each checkpoint, and then 6 seconds later, 115 | // you can verify that the job hasn't run between the two checkpoints, 116 | // and only runs at the scheduled point. 117 | // 118 | // This is useful for ensuring that durations behave correctly on a grand scale. 119 | func TestRecur(t *testing.T) { 120 | 121 | for _, testStruct := range recurTableTests { 122 | 123 | func() { 124 | 125 | now := parseTimeInLocation(t, testStruct.Start, testStruct.Location) 126 | 127 | clk := clock.NewMockClock(now) 128 | 129 | start := now.Add(time.Second * 5) 130 | j := GetMockRecurringJobWithSchedule(start, testStruct.Interval) 131 | j.clk.SetClock(clk) 132 | j.succeedInstantly = true // Eliminate any potential variance due to the overhead of shelling out. 133 | 134 | cache := NewMockCache() 135 | j.Init(cache) 136 | j.ranChan = make(chan struct{}) 137 | 138 | checkpoints := append([]string{testStruct.Start}, testStruct.Checkpoints...) 139 | 140 | for i, chk := range checkpoints { 141 | 142 | clk.SetTime(parseTimeInLocation(t, chk, testStruct.Location)) 143 | 144 | select { 145 | case <-j.ranChan: 146 | t.Fatalf("Expected job not run on checkpoint %d of test %s.", i, testStruct.Name) 147 | case <-time.After(time.Second * 1): 148 | } 149 | 150 | j.lock.RLock() 151 | assert.Equal(t, i, int(j.Metadata.SuccessCount), fmt.Sprintf("1st Test of %s index %d", testStruct.Name, i)) 152 | j.lock.RUnlock() 153 | 154 | clk.AddTime(time.Second * 6) 155 | 156 | select { 157 | case <-j.ranChan: 158 | case <-time.After(time.Second * 5): 159 | t.Fatalf("Expected job to have run on checkpoint %d of test %s.", i, testStruct.Name) 160 | } 161 | 162 | j.lock.RLock() 163 | assert.Equal(t, i+1, int(j.Metadata.SuccessCount), fmt.Sprintf("2nd Test of %s index %d", testStruct.Name, i)) 164 | j.lock.RUnlock() 165 | 166 | time.Sleep(time.Millisecond * 500) 167 | } 168 | 169 | }() 170 | 171 | } 172 | 173 | } 174 | -------------------------------------------------------------------------------- /job/job_scheduling_test.go: -------------------------------------------------------------------------------- 1 | // Tests for scheduling code. 2 | package job 3 | 4 | import ( 5 | "time" 6 | 7 | log "github.com/sirupsen/logrus" 8 | "github.com/stretchr/testify/assert" 9 | 10 | "testing" 11 | ) 12 | 13 | var getWaitDurationTableTests = []struct { 14 | Job *Job 15 | JobFunc func() *Job 16 | ExpectedDuration time.Duration 17 | Name string 18 | }{ 19 | { 20 | JobFunc: func() *Job { 21 | return &Job{ 22 | // Schedule time is passed 23 | Schedule: "R/2015-10-17T11:44:54.389361-07:00/PT10S", 24 | Metadata: Metadata{ 25 | LastAttemptedRun: time.Now(), 26 | }, 27 | } 28 | }, 29 | ExpectedDuration: 10 * time.Second, 30 | Name: "Passed time, 10 seconds", 31 | }, 32 | { 33 | JobFunc: func() *Job { 34 | return &Job{ 35 | // Schedule time is passed 36 | Schedule: "R/2015-10-17T11:44:54.389361-07:00/PT1M", 37 | Metadata: Metadata{ 38 | LastAttemptedRun: time.Now(), 39 | }, 40 | } 41 | }, 42 | ExpectedDuration: time.Minute, 43 | Name: "Passed time, 1 minute", 44 | }, 45 | { 46 | JobFunc: func() *Job { 47 | return &Job{ 48 | // Schedule time is passed, no repetitions 49 | Schedule: "R0/2015-10-17T11:44:54.389361-07:00/", 50 | Metadata: Metadata{ 51 | LastAttemptedRun: time.Time{}, 52 | }, 53 | } 54 | }, 55 | ExpectedDuration: 0 * time.Second, 56 | Name: "Passed time, 0 seconds", 57 | }, 58 | { 59 | JobFunc: func() *Job { 60 | return &Job{ 61 | // Schedule time is passed 62 | Schedule: "R/2015-10-17T11:44:54.389361-07:00/P1D", 63 | Metadata: Metadata{ 64 | LastAttemptedRun: time.Now(), 65 | }, 66 | } 67 | }, 68 | ExpectedDuration: 24 * time.Hour, 69 | Name: "Passed time, 1 day", 70 | }, 71 | } 72 | 73 | func TestGetWatiDuration(t *testing.T) { 74 | for _, testStruct := range getWaitDurationTableTests { 75 | testStruct.Job = testStruct.JobFunc() 76 | err := testStruct.Job.InitDelayDuration(false) 77 | assert.NoError(t, err) 78 | actualDuration := testStruct.Job.GetWaitDuration() 79 | log.Warnf("LastAttempted: %s", testStruct.Job.Metadata.LastAttemptedRun) 80 | assert.InDelta(t, float64(testStruct.ExpectedDuration), float64(actualDuration), float64(time.Millisecond*50), "Test of "+testStruct.Name) 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /job/runner.go: -------------------------------------------------------------------------------- 1 | package job 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "errors" 7 | "fmt" 8 | "io" 9 | "net/http" 10 | "os/exec" 11 | "strings" 12 | "text/template" 13 | "time" 14 | 15 | "github.com/mattn/go-shellwords" 16 | log "github.com/sirupsen/logrus" 17 | ) 18 | 19 | const HTTP_CODE_OK = 200 20 | 21 | type JobRunner struct { 22 | job *Job 23 | meta Metadata 24 | numberOfAttempts uint 25 | currentRetries uint 26 | currentStat *JobStat 27 | } 28 | 29 | var ( 30 | ErrJobDisabled = errors.New("Job cannot run, as it is disabled") 31 | ErrJobDeleted = errors.New("Job cannot run, as it is deleted") 32 | ErrCmdIsEmpty = errors.New("Job Command is empty.") 33 | ErrJobTypeInvalid = errors.New("Job Type is not valid.") 34 | ErrInvalidDelimiters = errors.New("Job has invalid templating delimiters.") 35 | ) 36 | 37 | // Run calls the appropriate run function, collects metadata around the success 38 | // or failure of the Job's execution, and schedules the next run. 39 | func (j *JobRunner) Run(cache JobCache) (*JobStat, Metadata, error) { 40 | j.job.lock.RLock() 41 | defer j.job.lock.RUnlock() 42 | 43 | j.meta.LastAttemptedRun = j.job.clk.Time().Now() 44 | 45 | _, err := cache.Get(j.job.Id) 46 | if errors.Is(err, ErrJobDoesntExist) { 47 | log.Infof("Job %s with id %s tried to run, but exited early because it has benn deleted", j.job.Name, j.job.Id) 48 | return nil, j.meta, ErrJobDeleted 49 | } 50 | if j.job.Disabled { 51 | log.Infof("Job %s tried to run, but exited early because its disabled.", j.job.Name) 52 | return nil, j.meta, ErrJobDisabled 53 | } 54 | 55 | log.Infof("Job %s:%s started.", j.job.Name, j.job.Id) 56 | 57 | j.runSetup() 58 | 59 | var out string 60 | for { 61 | var err error 62 | switch { 63 | case j.job.succeedInstantly: 64 | out = "Job succeeded instantly for test purposes." 65 | case j.job.JobType == LocalJob: 66 | out, err = j.LocalRun() 67 | case j.job.JobType == RemoteJob: 68 | out, err = j.RemoteRun() 69 | default: 70 | err = ErrJobTypeInvalid 71 | } 72 | 73 | if err != nil { 74 | // Log Error in Metadata 75 | // TODO - Error Reporting, email error 76 | log.Errorln("Error running job:", j.currentStat.JobId) 77 | log.Errorln(err) 78 | 79 | j.meta.ErrorCount++ 80 | j.meta.LastError = j.job.clk.Time().Now() 81 | 82 | // Handle retrying 83 | if j.shouldRetry() { 84 | j.currentRetries-- 85 | continue 86 | } 87 | 88 | j.collectStats(false) 89 | j.meta.NumberOfFinishedRuns++ 90 | 91 | // TODO: Wrap error into something better. 92 | return j.currentStat, j.meta, err 93 | } else { 94 | break 95 | } 96 | } 97 | 98 | log.Infof("Job %s:%s finished.", j.job.Name, j.job.Id) 99 | log.Debugf("Job %s:%s output: %s", j.job.Name, j.job.Id, out) 100 | j.meta.SuccessCount++ 101 | j.meta.NumberOfFinishedRuns++ 102 | j.meta.LastSuccess = j.job.clk.Time().Now() 103 | 104 | j.collectStats(true) 105 | 106 | // Run Dependent Jobs 107 | if len(j.job.DependentJobs) != 0 { 108 | for _, id := range j.job.DependentJobs { 109 | newJob, err := cache.Get(id) 110 | if err != nil { 111 | log.Errorf("Error retrieving dependent job with id of %s", id) 112 | } else { 113 | newJob.Run(cache) 114 | } 115 | } 116 | } 117 | 118 | return j.currentStat, j.meta, nil 119 | } 120 | 121 | // LocalRun executes the Job's local shell command 122 | func (j *JobRunner) LocalRun() (string, error) { 123 | return j.runCmd() 124 | } 125 | 126 | // RemoteRun sends a http request, and checks if the response is valid in time, 127 | func (j *JobRunner) RemoteRun() (string, error) { 128 | // Calculate a response timeout 129 | timeout := j.responseTimeout() 130 | 131 | ctx := context.Background() 132 | if timeout > 0 { 133 | var cncl func() 134 | ctx, cncl = context.WithTimeout(ctx, timeout) 135 | defer cncl() 136 | } 137 | 138 | // Get the actual url and body we're going to be using, 139 | // including any necessary templating. 140 | url, err := j.tryTemplatize(j.job.RemoteProperties.Url) 141 | if err != nil { 142 | return "", fmt.Errorf("Error templatizing url: %v", err) 143 | } 144 | body, err := j.tryTemplatize(j.job.RemoteProperties.Body) 145 | if err != nil { 146 | return "", fmt.Errorf("Error templatizing body: %v", err) 147 | } 148 | 149 | // Normalize the method passed by the user 150 | method := strings.ToUpper(j.job.RemoteProperties.Method) 151 | bodyBuffer := bytes.NewBufferString(body) 152 | req, err := http.NewRequest(method, url, bodyBuffer) 153 | if err != nil { 154 | return "", err 155 | } 156 | 157 | // Set default or user's passed headers 158 | j.setHeaders(req) 159 | 160 | // Do the request 161 | res, err := http.DefaultClient.Do(req.WithContext(ctx)) 162 | if err != nil { 163 | return "", err 164 | } 165 | defer res.Body.Close() 166 | b, err := io.ReadAll(res.Body) 167 | if err != nil { 168 | return "", err 169 | } 170 | 171 | // Check if we got any of the status codes the user asked for 172 | if j.checkExpected(res.StatusCode) { 173 | return string(b), nil 174 | } else { 175 | return "", errors.New(res.Status + string(b)) 176 | } 177 | } 178 | 179 | func initShParser() *shellwords.Parser { 180 | shParser := shellwords.NewParser() 181 | shParser.ParseEnv = true 182 | shParser.ParseBacktick = true 183 | return shParser 184 | } 185 | 186 | func (j *JobRunner) runCmd() (string, error) { 187 | j.numberOfAttempts++ 188 | 189 | // Get the actual command we're going to be running, 190 | // including any necessary templating. 191 | cmdText, err := j.tryTemplatize(j.job.Command) 192 | if err != nil { 193 | return "", fmt.Errorf("Error templatizing command: %v", err) 194 | } 195 | 196 | // Execute command 197 | shParser := initShParser() 198 | args, err := shParser.Parse(cmdText) 199 | if err != nil { 200 | return "", err 201 | } 202 | if len(args) == 0 { 203 | return "", ErrCmdIsEmpty 204 | } 205 | 206 | cmd := exec.Command(args[0], args[1:]...) //nolint:gosec // That's the job description 207 | out, err := cmd.CombinedOutput() 208 | if err != nil { 209 | return "", fmt.Errorf("%s: %s", err, strings.TrimSpace(string(out))) 210 | } 211 | return strings.TrimSpace(string(out)), nil 212 | } 213 | 214 | func (j *JobRunner) tryTemplatize(content string) (string, error) { 215 | delims := j.job.TemplateDelimiters 216 | 217 | if delims == "" { 218 | return content, nil 219 | } 220 | 221 | split := strings.Split(delims, " ") 222 | if len(split) != 2 { //nolint:gomnd 223 | return "", ErrInvalidDelimiters 224 | } 225 | 226 | left, right := split[0], split[1] 227 | if left == "" || right == "" { 228 | return "", ErrInvalidDelimiters 229 | } 230 | 231 | t, err := template.New("tmpl").Delims(left, right).Parse(content) 232 | if err != nil { 233 | return "", fmt.Errorf("Error parsing template: %v", err) 234 | } 235 | 236 | b := bytes.NewBuffer(nil) 237 | if err := t.Execute(b, j.job); err != nil { 238 | return "", fmt.Errorf("Error executing template: %v", err) 239 | } 240 | 241 | return b.String(), nil 242 | } 243 | 244 | func (j *JobRunner) shouldRetry() bool { 245 | // Check number of retries left 246 | if j.currentRetries == 0 { 247 | return false 248 | } 249 | 250 | // Check Epsilon 251 | if j.job.Epsilon != "" && j.job.Schedule != "" { 252 | if !j.job.epsilonDuration.IsZero() { 253 | timeSinceStart := j.job.clk.Time().Now().Sub(j.job.NextRunAt) 254 | timeLeftToRetry := j.job.epsilonDuration.RelativeTo(j.job.clk.Time().Now()) - timeSinceStart 255 | if timeLeftToRetry < 0 { 256 | return false 257 | } 258 | } 259 | } 260 | 261 | return true 262 | } 263 | 264 | func (j *JobRunner) runSetup() { 265 | // Setup Job Stat 266 | j.currentStat = NewJobStat(j.job.Id) 267 | 268 | // Init retries 269 | j.currentRetries = j.job.Retries 270 | } 271 | 272 | func (j *JobRunner) collectStats(success bool) { 273 | j.currentStat.ExecutionDuration = j.job.clk.Time().Now().Sub(j.currentStat.RanAt) 274 | j.currentStat.Success = success 275 | j.currentStat.NumberOfRetries = j.job.Retries - j.currentRetries 276 | } 277 | 278 | func (j *JobRunner) checkExpected(statusCode int) bool { 279 | // j.job.lock.Lock() 280 | // defer j.job.lock.Unlock() 281 | 282 | // If no expected response codes passed, add 200 status code as expected 283 | if len(j.job.RemoteProperties.ExpectedResponseCodes) == 0 { 284 | j.job.RemoteProperties.ExpectedResponseCodes = append(j.job.RemoteProperties.ExpectedResponseCodes, HTTP_CODE_OK) 285 | } 286 | for _, expected := range j.job.RemoteProperties.ExpectedResponseCodes { 287 | if expected == statusCode { 288 | return true 289 | } 290 | } 291 | 292 | return false 293 | } 294 | 295 | // responseTimeout sets a default timeout if none specified 296 | func (j *JobRunner) responseTimeout() time.Duration { 297 | responseTimeout := j.job.RemoteProperties.Timeout 298 | if responseTimeout == 0 { 299 | 300 | // set default to 30 seconds 301 | responseTimeout = 30 302 | } 303 | return time.Duration(responseTimeout) * time.Second 304 | } 305 | 306 | // setHeaders sets default and user specific headers to the http request 307 | func (j *JobRunner) setHeaders(req *http.Request) { 308 | // j.job.lock.Lock() 309 | // defer j.job.lock.Unlock() 310 | 311 | if j.job.RemoteProperties.Headers == nil { 312 | j.job.RemoteProperties.Headers = http.Header{} 313 | } 314 | // A valid assumption is that the user is sending something in json cause we're past 2017 315 | if j.job.RemoteProperties.Headers["Content-Type"] == nil { 316 | j.job.RemoteProperties.Headers["Content-Type"] = []string{"application/json"} 317 | } 318 | req.Header = j.job.RemoteProperties.Headers 319 | } 320 | -------------------------------------------------------------------------------- /job/runner_test.go: -------------------------------------------------------------------------------- 1 | package job 2 | 3 | import ( 4 | "io" 5 | "net/http" 6 | "net/http/httptest" 7 | "testing" 8 | 9 | "github.com/stretchr/testify/assert" 10 | ) 11 | 12 | func TestTemplatize(t *testing.T) { 13 | 14 | t.Run("command", func(t *testing.T) { 15 | 16 | t.Run("raw", func(t *testing.T) { 17 | j := &Job{ 18 | Name: "mock_job", 19 | Command: "echo mr.{{$.Owner}}", 20 | Owner: "jedi@master.com", 21 | } 22 | r := JobRunner{ 23 | job: j, 24 | } 25 | out, err := r.LocalRun() 26 | assert.NoError(t, err) 27 | assert.Equal(t, "mr.{{$.Owner}}", out) 28 | }) 29 | 30 | t.Run("templated", func(t *testing.T) { 31 | j := &Job{ 32 | Name: "mock_job", 33 | Command: "echo mr.{{$.Owner}}", 34 | Owner: "jedi@master.com", 35 | TemplateDelimiters: "{{ }}", 36 | } 37 | r := JobRunner{ 38 | job: j, 39 | } 40 | out, err := r.LocalRun() 41 | assert.NoError(t, err) 42 | assert.Equal(t, "mr.jedi@master.com", out) 43 | }) 44 | 45 | }) 46 | 47 | t.Run("url", func(t *testing.T) { 48 | 49 | srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 50 | val := r.URL.Query().Get("val") 51 | _, err := w.Write([]byte(val)) 52 | if err != nil { 53 | http.Error(w, err.Error(), 400) 54 | return 55 | } 56 | w.WriteHeader(200) 57 | })) 58 | 59 | t.Run("raw", func(t *testing.T) { 60 | j := &Job{ 61 | Name: "mock_job", 62 | Owner: "jedi@master.com", 63 | RemoteProperties: RemoteProperties{ 64 | Url: "http://" + srv.Listener.Addr().String() + "/path?val=a_{{$.Name}}", 65 | }, 66 | } 67 | r := JobRunner{ 68 | job: j, 69 | } 70 | out, err := r.RemoteRun() 71 | assert.NoError(t, err) 72 | assert.Equal(t, "a_{{$.Name}}", out) 73 | }) 74 | 75 | t.Run("templated", func(t *testing.T) { 76 | j := &Job{ 77 | Name: "mock_job", 78 | Owner: "jedi@master.com", 79 | RemoteProperties: RemoteProperties{ 80 | Url: "http://" + srv.Listener.Addr().String() + "/path?val=a_{{$.Name}}", 81 | }, 82 | TemplateDelimiters: "{{ }}", 83 | } 84 | r := JobRunner{ 85 | job: j, 86 | } 87 | out, err := r.RemoteRun() 88 | assert.NoError(t, err) 89 | assert.Equal(t, "a_mock_job", out) 90 | }) 91 | 92 | }) 93 | 94 | t.Run("body", func(t *testing.T) { 95 | 96 | srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 97 | b, _ := io.ReadAll(r.Body) 98 | w.Write(b) 99 | w.WriteHeader(200) 100 | })) 101 | 102 | t.Run("raw", func(t *testing.T) { 103 | j := &Job{ 104 | Name: "mock_job", 105 | Owner: "jedi@master.com", 106 | RemoteProperties: RemoteProperties{ 107 | Url: "http://" + srv.Listener.Addr().String() + "/path", 108 | Body: `{"hello": "world", "foo": "young-${$.Owner}"}`, 109 | }, 110 | } 111 | r := JobRunner{ 112 | job: j, 113 | } 114 | out, err := r.RemoteRun() 115 | assert.NoError(t, err) 116 | assert.Equal(t, `{"hello": "world", "foo": "young-${$.Owner}"}`, out) 117 | }) 118 | 119 | t.Run("templated", func(t *testing.T) { 120 | j := &Job{ 121 | Name: "mock_job", 122 | Owner: "jedi@master.com", 123 | RemoteProperties: RemoteProperties{ 124 | Url: "http://" + srv.Listener.Addr().String() + "/path", 125 | Body: `{"hello": "world", "foo": "young-${$.Owner}"}`, 126 | }, 127 | TemplateDelimiters: "${ }", 128 | } 129 | r := JobRunner{ 130 | job: j, 131 | } 132 | out, err := r.RemoteRun() 133 | assert.NoError(t, err) 134 | assert.Equal(t, `{"hello": "world", "foo": "young-jedi@master.com"}`, out) 135 | }) 136 | 137 | }) 138 | 139 | } 140 | -------------------------------------------------------------------------------- /job/stats.go: -------------------------------------------------------------------------------- 1 | package job 2 | 3 | import ( 4 | "time" 5 | ) 6 | 7 | // KalaStats is the struct for storing app-level metrics 8 | type KalaStats struct { 9 | ActiveJobs int `json:"active_jobs"` 10 | DisabledJobs int `json:"disabled_jobs"` 11 | Jobs int `json:"jobs"` 12 | 13 | ErrorCount uint `json:"error_count"` 14 | SuccessCount uint `json:"success_count"` 15 | 16 | NextRunAt time.Time `json:"next_run_at"` 17 | LastAttemptedRun time.Time `json:"last_attempted_run"` 18 | 19 | CreatedAt time.Time `json:"created"` 20 | } 21 | 22 | // NewKalaStats is used to easily generate a current app-level metrics report. 23 | func NewKalaStats(cache JobCache) *KalaStats { 24 | ks := &KalaStats{ 25 | CreatedAt: time.Now(), 26 | } 27 | jobs := cache.GetAll() 28 | jobs.Lock.RLock() 29 | defer jobs.Lock.RUnlock() 30 | 31 | ks.Jobs = len(jobs.Jobs) 32 | if ks.Jobs == 0 { 33 | return ks 34 | } 35 | 36 | nextRun := time.Time{} 37 | lastRun := time.Time{} 38 | for _, job := range jobs.Jobs { 39 | func() { 40 | job.lock.RLock() 41 | defer job.lock.RUnlock() 42 | 43 | if job.Disabled { 44 | ks.DisabledJobs += 1 45 | } else { 46 | ks.ActiveJobs += 1 47 | } 48 | 49 | if nextRun.IsZero() { 50 | nextRun = job.NextRunAt 51 | } else if (nextRun.UnixNano() - job.NextRunAt.UnixNano()) > 0 { 52 | nextRun = job.NextRunAt 53 | } 54 | 55 | if lastRun.IsZero() { 56 | if !job.Metadata.LastAttemptedRun.IsZero() { 57 | lastRun = job.Metadata.LastAttemptedRun 58 | } 59 | } else if (lastRun.UnixNano() - job.Metadata.LastAttemptedRun.UnixNano()) < 0 { 60 | lastRun = job.Metadata.LastAttemptedRun 61 | } 62 | 63 | ks.ErrorCount += job.Metadata.ErrorCount 64 | ks.SuccessCount += job.Metadata.SuccessCount 65 | }() 66 | } 67 | ks.NextRunAt = nextRun 68 | ks.LastAttemptedRun = lastRun 69 | 70 | return ks 71 | } 72 | 73 | // JobStat is used to store metrics about a specific Job .Run() 74 | type JobStat struct { 75 | JobId string `json:"job_id"` 76 | RanAt time.Time `json:"ran_at"` 77 | NumberOfRetries uint `json:"number_of_retries"` 78 | Success bool `json:"success"` 79 | ExecutionDuration time.Duration `json:"execution_duration"` 80 | } 81 | 82 | func NewJobStat(id string) *JobStat { 83 | return &JobStat{ 84 | JobId: id, 85 | RanAt: time.Now(), 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /job/stats_test.go: -------------------------------------------------------------------------------- 1 | package job 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func TestJobStats(t *testing.T) { 11 | cache := NewMockCache() 12 | 13 | j := GetMockJobWithGenericSchedule(time.Now()) 14 | j.Init(cache) 15 | 16 | j.Run(cache) 17 | now := time.Now() 18 | 19 | assert.NotNil(t, j.Stats[0]) 20 | assert.Equal(t, j.Stats[0].JobId, j.Id) 21 | assert.WithinDuration(t, j.Stats[0].RanAt, now, time.Second) 22 | assert.Equal(t, j.Stats[0].NumberOfRetries, uint(0)) 23 | assert.True(t, j.Stats[0].Success) 24 | assert.NotNil(t, j.Stats[0].ExecutionDuration) 25 | } 26 | 27 | func TestKalaStats(t *testing.T) { 28 | cache := NewMockCache() 29 | 30 | for i := 0; i < 5; i++ { 31 | j := GetMockJobWithGenericSchedule(time.Now()) 32 | j.Init(cache) 33 | j.Run(cache) 34 | } 35 | now := time.Now() 36 | for i := 0; i < 5; i++ { 37 | j := GetMockJobWithGenericSchedule(time.Now()) 38 | j.Init(cache) 39 | assert.NoError(t, j.Disable(cache)) 40 | } 41 | 42 | kalaStat := NewKalaStats(cache) 43 | createdAt := time.Now() 44 | nextRunAt := time.Now().Add(time.Minute * 5) 45 | 46 | assert.Equal(t, kalaStat.Jobs, 10) 47 | assert.Equal(t, kalaStat.ActiveJobs, 5) 48 | assert.Equal(t, kalaStat.DisabledJobs, 5) 49 | assert.Equal(t, kalaStat.SuccessCount, uint(5)) 50 | assert.Equal(t, kalaStat.ErrorCount, uint(0)) 51 | assert.True(t, (nextRunAt.UnixNano()-kalaStat.NextRunAt.UnixNano()) > 0) 52 | assert.WithinDuration(t, kalaStat.NextRunAt, nextRunAt, time.Second) 53 | assert.WithinDuration(t, kalaStat.LastAttemptedRun, now, time.Millisecond*100) 54 | assert.WithinDuration(t, kalaStat.CreatedAt, createdAt, time.Millisecond*100) 55 | } 56 | 57 | func TestNextRunAt(t *testing.T) { 58 | cache := NewMockCache() 59 | 60 | sched := time.Now().Add(time.Second) 61 | j := GetMockJobWithSchedule(2, sched, "P1DT10M10S") 62 | j.Init(cache) 63 | assert.NoError(t, j.Disable(cache)) 64 | 65 | sched2 := time.Now().Add(2 * time.Second) 66 | j2 := GetMockJobWithSchedule(2, sched2, "P1DT10M10S") 67 | j2.Init(cache) 68 | assert.NoError(t, j2.Disable(cache)) 69 | 70 | kalaStat := NewKalaStats(cache) 71 | j.lock.RLock() 72 | assert.Equal(t, j.NextRunAt.UnixNano(), kalaStat.NextRunAt.UnixNano()) 73 | assert.Equal(t, j.Metadata.LastAttemptedRun.UnixNano(), kalaStat.LastAttemptedRun.UnixNano()) 74 | assert.NotEqual(t, j2.NextRunAt.UnixNano(), kalaStat.NextRunAt.UnixNano()) 75 | j.lock.RUnlock() 76 | } 77 | -------------------------------------------------------------------------------- /job/storage/boltdb/boltdb.go: -------------------------------------------------------------------------------- 1 | package boltdb 2 | 3 | import ( 4 | "bytes" 5 | "encoding/gob" 6 | "os" 7 | "strings" 8 | "time" 9 | 10 | "github.com/ajvb/kala/job" 11 | 12 | log "github.com/sirupsen/logrus" 13 | bolt "go.etcd.io/bbolt" 14 | ) 15 | 16 | var ( 17 | jobBucket = []byte("jobs") 18 | ) 19 | 20 | func GetBoltDB(path string) *BoltJobDB { 21 | if path != "" && !strings.HasSuffix(path, "/") { 22 | path += "/" 23 | } 24 | path += "jobdb.db" 25 | var perms os.FileMode = 0o0600 26 | database, err := bolt.Open(path, perms, &bolt.Options{Timeout: time.Second * 10}) //nolint:gomnd 27 | if err != nil { 28 | log.Fatal(err) 29 | } 30 | return &BoltJobDB{ 31 | path: path, 32 | dbConn: database, 33 | } 34 | } 35 | 36 | type BoltJobDB struct { 37 | dbConn *bolt.DB 38 | path string 39 | } 40 | 41 | func (db *BoltJobDB) Close() error { 42 | return db.dbConn.Close() 43 | } 44 | 45 | func (db *BoltJobDB) GetAll() ([]*job.Job, error) { 46 | allJobs := []*job.Job{} 47 | 48 | err := db.dbConn.Update(func(tx *bolt.Tx) error { 49 | bucket, err := tx.CreateBucketIfNotExists(jobBucket) 50 | if err != nil { 51 | return err 52 | } 53 | 54 | err = bucket.ForEach(func(k, v []byte) error { 55 | buffer := bytes.NewBuffer(v) 56 | dec := gob.NewDecoder(buffer) 57 | j := new(job.Job) 58 | err = dec.Decode(j) 59 | 60 | if err != nil { 61 | return err 62 | } 63 | 64 | err = j.InitDelayDuration(false) 65 | 66 | if err != nil { 67 | return err 68 | } 69 | 70 | allJobs = append(allJobs, j) 71 | 72 | return nil 73 | }) 74 | 75 | return err 76 | }) 77 | 78 | return allJobs, err 79 | } 80 | 81 | func (db *BoltJobDB) Get(id string) (*job.Job, error) { 82 | j := new(job.Job) 83 | 84 | err := db.dbConn.View(func(tx *bolt.Tx) error { 85 | b := tx.Bucket(jobBucket) 86 | 87 | v := b.Get([]byte(id)) 88 | if v == nil { 89 | return job.ErrJobNotFound(id) 90 | } 91 | 92 | buf := bytes.NewBuffer(v) 93 | err := gob.NewDecoder(buf).Decode(j) 94 | 95 | return err 96 | }) 97 | if err != nil { 98 | return nil, err 99 | } 100 | 101 | j.Id = id 102 | return j, nil 103 | } 104 | 105 | func (db *BoltJobDB) Delete(id string) error { 106 | err := db.dbConn.Update(func(tx *bolt.Tx) error { 107 | bucket := tx.Bucket(jobBucket) 108 | return bucket.Delete([]byte(id)) 109 | }) 110 | return err 111 | } 112 | 113 | func (db *BoltJobDB) Save(j *job.Job) error { 114 | err := db.dbConn.Update(func(tx *bolt.Tx) error { 115 | bucket, err := tx.CreateBucketIfNotExists(jobBucket) 116 | if err != nil { 117 | return err 118 | } 119 | 120 | buffer := new(bytes.Buffer) 121 | enc := gob.NewEncoder(buffer) 122 | err = enc.Encode(j) 123 | if err != nil { 124 | return err 125 | } 126 | 127 | err = bucket.Put([]byte(j.Id), buffer.Bytes()) 128 | if err != nil { 129 | return err 130 | } 131 | return nil 132 | }) 133 | return err 134 | } 135 | -------------------------------------------------------------------------------- /job/storage/boltdb/boltdb_test.go: -------------------------------------------------------------------------------- 1 | package boltdb 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "github.com/ajvb/kala/job" 8 | 9 | "github.com/stretchr/testify/assert" 10 | ) 11 | 12 | var testDbPath = "" 13 | 14 | func setupTest(t *testing.T) { 15 | db := GetBoltDB(testDbPath) 16 | defer db.Close() 17 | 18 | jobs, err := db.GetAll() 19 | assert.NoError(t, err) 20 | 21 | for _, j := range jobs { 22 | err = db.Delete(j.Id) 23 | assert.NoError(t, err) 24 | } 25 | 26 | } 27 | 28 | func TestSaveAndGetJob(t *testing.T) { 29 | setupTest(t) 30 | 31 | db := GetBoltDB(testDbPath) 32 | cache := job.NewLockFreeJobCache(db) 33 | defer db.Close() 34 | 35 | genericMockJob := job.GetMockJobWithGenericSchedule(time.Now()) 36 | genericMockJob.Init(cache) 37 | db.Save(genericMockJob) 38 | 39 | j, err := db.Get(genericMockJob.Id) 40 | assert.Nil(t, err) 41 | 42 | assert.WithinDuration(t, j.NextRunAt, genericMockJob.NextRunAt, 100*time.Microsecond) 43 | assert.Equal(t, j.Name, genericMockJob.Name) 44 | assert.Equal(t, j.Id, genericMockJob.Id) 45 | assert.Equal(t, j.Command, genericMockJob.Command) 46 | assert.Equal(t, j.Schedule, genericMockJob.Schedule) 47 | assert.Equal(t, j.Owner, genericMockJob.Owner) 48 | assert.Equal(t, j.Metadata.SuccessCount, genericMockJob.Metadata.SuccessCount) 49 | } 50 | 51 | func TestDeleteJob(t *testing.T) { 52 | setupTest(t) 53 | 54 | db := GetBoltDB(testDbPath) 55 | cache := job.NewLockFreeJobCache(db) 56 | defer db.Close() 57 | 58 | genericMockJob := job.GetMockJobWithGenericSchedule(time.Now()) 59 | genericMockJob.Init(cache) 60 | db.Save(genericMockJob) 61 | 62 | // Make sure its there 63 | j, err := db.Get(genericMockJob.Id) 64 | assert.Nil(t, err) 65 | assert.Equal(t, j.Name, genericMockJob.Name) 66 | retrievedJob, err := cache.Get(genericMockJob.Id) 67 | assert.NoError(t, err) 68 | assert.NotNil(t, retrievedJob) 69 | 70 | // Delete it 71 | genericMockJob.Delete(cache) 72 | 73 | k, err := db.Get(genericMockJob.Id) 74 | assert.Error(t, err) 75 | assert.Nil(t, k) 76 | retrievedJobTwo, err := cache.Get(genericMockJob.Id) 77 | assert.Error(t, err) 78 | assert.Nil(t, retrievedJobTwo) 79 | 80 | genericMockJob.Delete(cache) 81 | } 82 | 83 | func TestSaveAndGetAllJobs(t *testing.T) { 84 | setupTest(t) 85 | 86 | db := GetBoltDB(testDbPath) 87 | cache := job.NewLockFreeJobCache(db) 88 | defer db.Close() 89 | 90 | genericMockJobOne := job.GetMockJobWithGenericSchedule(time.Now()) 91 | genericMockJobOne.Init(cache) 92 | err := db.Save(genericMockJobOne) 93 | assert.NoError(t, err) 94 | 95 | genericMockJobTwo := job.GetMockJobWithGenericSchedule(time.Now()) 96 | genericMockJobTwo.Init(cache) 97 | err = db.Save(genericMockJobTwo) 98 | assert.NoError(t, err) 99 | 100 | jobs, err := db.GetAll() 101 | assert.Nil(t, err) 102 | assert.Equal(t, len(jobs), 2) 103 | } 104 | -------------------------------------------------------------------------------- /job/storage/consul/consul.go: -------------------------------------------------------------------------------- 1 | package consul 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "fmt" 7 | 8 | "github.com/ajvb/kala/job" 9 | 10 | "github.com/hashicorp/consul/api" 11 | 12 | log "github.com/sirupsen/logrus" 13 | ) 14 | 15 | var ( 16 | prefix = "kala/jobs/" 17 | ) 18 | 19 | func New(address string) *ConsulJobDB { 20 | config := api.DefaultConfig() 21 | if address != "" { 22 | config.Address = address 23 | } 24 | client, err := api.NewClient(config) 25 | if err != nil { 26 | log.Fatal(err) 27 | } 28 | return &ConsulJobDB{ 29 | conn: client.KV(), 30 | } 31 | } 32 | 33 | type ConsulJobDB struct { 34 | conn *api.KV 35 | } 36 | 37 | func (db *ConsulJobDB) Close() error { 38 | return nil 39 | } 40 | 41 | func (db *ConsulJobDB) GetAll() ([]*job.Job, error) { 42 | allJobs := []*job.Job{} 43 | 44 | pairs, _, err := db.conn.List(prefix, &api.QueryOptions{RequireConsistent: true}) 45 | if err != nil { 46 | return allJobs, err 47 | } 48 | for _, pair := range pairs { 49 | 50 | buffer := bytes.NewBuffer(pair.Value) 51 | dec := json.NewDecoder(buffer) 52 | j := new(job.Job) 53 | err = dec.Decode(j) 54 | if err != nil { 55 | continue 56 | } 57 | err = j.InitDelayDuration(false) 58 | if err != nil { 59 | continue 60 | } 61 | allJobs = append(allJobs, j) 62 | } 63 | 64 | return allJobs, err 65 | } 66 | 67 | func (db *ConsulJobDB) Get(id string) (*job.Job, error) { 68 | j := new(job.Job) 69 | 70 | pair, _, err := db.conn.Get(prefix+id, &api.QueryOptions{RequireConsistent: true}) 71 | if err != nil { 72 | return nil, err 73 | } 74 | if pair == nil { 75 | return nil, fmt.Errorf("ID %s not found", id) 76 | } 77 | buf := bytes.NewBuffer(pair.Value) 78 | err = json.NewDecoder(buf).Decode(j) 79 | if err != nil { 80 | return nil, err 81 | } 82 | return j, nil 83 | } 84 | 85 | func (db *ConsulJobDB) Delete(id string) error { 86 | _, err := db.conn.Delete(prefix+id, &api.WriteOptions{}) 87 | return err 88 | } 89 | 90 | func (db *ConsulJobDB) Save(j *job.Job) error { 91 | buffer := new(bytes.Buffer) 92 | enc := json.NewEncoder(buffer) 93 | err := enc.Encode(j) 94 | if err != nil { 95 | return err 96 | } 97 | pair := &api.KVPair{Key: prefix + j.Id, Value: buffer.Bytes()} 98 | _, err = db.conn.Put(pair, &api.WriteOptions{}) 99 | return err 100 | } 101 | -------------------------------------------------------------------------------- /job/storage/consul/consul_test.go: -------------------------------------------------------------------------------- 1 | package consul 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "github.com/ajvb/kala/job" 8 | 9 | "github.com/stretchr/testify/assert" 10 | ) 11 | 12 | func setupTest(t *testing.T) { 13 | db := New("") 14 | defer db.Close() 15 | 16 | jobs, err := db.GetAll() 17 | assert.NoError(t, err) 18 | for _, j := range jobs { 19 | err = db.Delete(j.Id) 20 | assert.NoError(t, err) 21 | } 22 | 23 | } 24 | 25 | func TestSaveAndGetJob(t *testing.T) { 26 | setupTest(t) 27 | 28 | db := New("") 29 | cache := job.NewLockFreeJobCache(db) 30 | defer db.Close() 31 | 32 | genericMockJob := job.GetMockJobWithGenericSchedule(time.Now()) 33 | genericMockJob.Init(cache) 34 | db.Save(genericMockJob) 35 | 36 | j, err := db.Get(genericMockJob.Id) 37 | assert.Nil(t, err) 38 | 39 | assert.WithinDuration(t, j.NextRunAt, genericMockJob.NextRunAt, 100*time.Microsecond) 40 | assert.Equal(t, j.Name, genericMockJob.Name) 41 | assert.Equal(t, j.Id, genericMockJob.Id) 42 | assert.Equal(t, j.Command, genericMockJob.Command) 43 | assert.Equal(t, j.Schedule, genericMockJob.Schedule) 44 | assert.Equal(t, j.Owner, genericMockJob.Owner) 45 | assert.Equal(t, j.Metadata.SuccessCount, genericMockJob.Metadata.SuccessCount) 46 | } 47 | 48 | func TestDeleteJob(t *testing.T) { 49 | setupTest(t) 50 | 51 | db := New("") 52 | cache := job.NewLockFreeJobCache(db) 53 | 54 | genericMockJob := job.GetMockJobWithGenericSchedule(time.Now()) 55 | genericMockJob.Init(cache) 56 | db.Save(genericMockJob) 57 | 58 | // Make sure its there 59 | j, err := db.Get(genericMockJob.Id) 60 | assert.Nil(t, err) 61 | assert.Equal(t, j.Name, genericMockJob.Name) 62 | retrievedJob, err := cache.Get(genericMockJob.Id) 63 | assert.NoError(t, err) 64 | assert.NotNil(t, retrievedJob) 65 | 66 | // Delete it 67 | err = genericMockJob.Delete(cache) 68 | assert.Nil(t, err) 69 | // 70 | // fmt.Printf("%#v", genericMockJob) 71 | k, err := db.Get(genericMockJob.Id) 72 | assert.Error(t, err) 73 | assert.Nil(t, k) 74 | retrievedJobTwo, err := cache.Get(genericMockJob.Id) 75 | assert.Error(t, err) 76 | assert.Nil(t, retrievedJobTwo) 77 | 78 | genericMockJob.Delete(cache) 79 | } 80 | 81 | func TestSaveAndGetAllJobs(t *testing.T) { 82 | setupTest(t) 83 | 84 | db := New("") 85 | cache := job.NewLockFreeJobCache(db) 86 | defer db.Close() 87 | 88 | genericMockJobOne := job.GetMockJobWithGenericSchedule(time.Now()) 89 | genericMockJobOne.Init(cache) 90 | err := db.Save(genericMockJobOne) 91 | assert.NoError(t, err) 92 | 93 | genericMockJobTwo := job.GetMockJobWithGenericSchedule(time.Now()) 94 | genericMockJobTwo.Init(cache) 95 | err = db.Save(genericMockJobTwo) 96 | assert.NoError(t, err) 97 | 98 | jobs, err := db.GetAll() 99 | assert.Nil(t, err) 100 | assert.Equal(t, 2, len(jobs)) 101 | } 102 | -------------------------------------------------------------------------------- /job/storage/mongo/mongo.go: -------------------------------------------------------------------------------- 1 | package mongo 2 | 3 | import ( 4 | "github.com/ajvb/kala/job" 5 | 6 | log "github.com/sirupsen/logrus" 7 | "gopkg.in/mgo.v2" 8 | "gopkg.in/mgo.v2/bson" 9 | ) 10 | 11 | var ( 12 | database = "kala" 13 | collection = "jobs" 14 | ) 15 | 16 | // DB is concrete implementation of the JobDB interface, that uses Redis for persistence. 17 | type DB struct { 18 | collection *mgo.Collection 19 | database *mgo.Database 20 | session *mgo.Session 21 | } 22 | 23 | // New instantiates a new DB. 24 | func New(addrs string, cred *mgo.Credential) *DB { 25 | session, err := mgo.Dial(addrs) 26 | if err != nil { 27 | log.Fatal(err) 28 | } 29 | if cred.Username != "" { 30 | err = session.Login(cred) 31 | if err != nil { 32 | log.Fatal(err) 33 | } 34 | } 35 | db := session.DB(database) 36 | c := db.C(collection) 37 | session.SetMode(mgo.Monotonic, true) 38 | if err := c.EnsureIndexKey("id"); err != nil { 39 | log.Fatal(err) 40 | } 41 | return &DB{ 42 | collection: c, 43 | database: db, 44 | session: session, 45 | } 46 | } 47 | 48 | // GetAll returns all persisted Jobs. 49 | func (d DB) GetAll() ([]*job.Job, error) { 50 | jobs := []*job.Job{} 51 | err := d.collection.Find(bson.M{}).All(&jobs) 52 | if err != nil { 53 | return jobs, err 54 | } 55 | return jobs, nil 56 | } 57 | 58 | // Get returns a persisted Job. 59 | func (d DB) Get(id string) (*job.Job, error) { 60 | result := job.Job{} 61 | err := d.collection.Find(bson.M{"id": id}).One(&result) 62 | if err != nil { 63 | return nil, err 64 | } 65 | return &result, nil 66 | } 67 | 68 | // Delete deletes a persisted Job. 69 | func (d DB) Delete(id string) error { 70 | err := d.collection.Remove(bson.M{"id": id}) 71 | if err != nil { 72 | return err 73 | } 74 | 75 | return nil 76 | } 77 | 78 | // Save persists a Job. 79 | func (d DB) Save(j *job.Job) error { 80 | err := d.collection.Insert(j) 81 | if err != nil { 82 | return err 83 | } 84 | 85 | return nil 86 | } 87 | 88 | // Close closes the connection to Redis. 89 | func (d DB) Close() error { 90 | d.session.Close() 91 | return nil 92 | } 93 | -------------------------------------------------------------------------------- /job/storage/mongo/mongo_test.go: -------------------------------------------------------------------------------- 1 | package mongo 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "github.com/ajvb/kala/job" 8 | mgo "gopkg.in/mgo.v2" 9 | 10 | "github.com/stretchr/testify/assert" 11 | ) 12 | 13 | func NewTestDb(t *testing.T) *DB { 14 | collection = "test" 15 | var db = New("", &mgo.Credential{}) 16 | 17 | jobs, err := db.GetAll() 18 | assert.NoError(t, err) 19 | for _, j := range jobs { 20 | err = db.Delete(j.Id) 21 | assert.NoError(t, err) 22 | } 23 | return db 24 | } 25 | 26 | func TestSaveAndGetJob(t *testing.T) { 27 | db := NewTestDb(t) 28 | 29 | cache := job.NewLockFreeJobCache(db) 30 | defer db.Close() 31 | 32 | genericMockJob := job.GetMockJobWithGenericSchedule(time.Now()) 33 | genericMockJob.Init(cache) 34 | err := db.Save(genericMockJob) 35 | if assert.NoError(t, err) { 36 | j, err := db.Get(genericMockJob.Id) 37 | if assert.Nil(t, err) { 38 | // TODO WIP 39 | assert.WithinDuration(t, j.NextRunAt, genericMockJob.NextRunAt, 5000*time.Microsecond) 40 | assert.Equal(t, j.Name, genericMockJob.Name) 41 | assert.Equal(t, j.Id, genericMockJob.Id) 42 | assert.Equal(t, j.Command, genericMockJob.Command) 43 | assert.Equal(t, j.Schedule, genericMockJob.Schedule) 44 | assert.Equal(t, j.Owner, genericMockJob.Owner) 45 | assert.Equal(t, j.Metadata.SuccessCount, genericMockJob.Metadata.SuccessCount) 46 | } 47 | } 48 | } 49 | 50 | func TestDeleteJob(t *testing.T) { 51 | db := NewTestDb(t) 52 | 53 | cache := job.NewLockFreeJobCache(db) 54 | 55 | genericMockJob := job.GetMockJobWithGenericSchedule(time.Now()) 56 | genericMockJob.Init(cache) 57 | err := db.Save(genericMockJob) 58 | if assert.NoError(t, err) { 59 | 60 | // Make sure its there 61 | j, err := db.Get(genericMockJob.Id) 62 | assert.Nil(t, err) 63 | assert.Equal(t, j.Name, genericMockJob.Name) 64 | retrievedJob, err := cache.Get(genericMockJob.Id) 65 | assert.NoError(t, err) 66 | assert.NotNil(t, retrievedJob) 67 | 68 | // Delete it 69 | err = genericMockJob.Delete(cache) 70 | assert.Nil(t, err) 71 | 72 | k, err := db.Get(genericMockJob.Id) 73 | assert.Error(t, err) 74 | assert.Nil(t, k) 75 | retrievedJobTwo, err := cache.Get(genericMockJob.Id) 76 | assert.Error(t, err) 77 | assert.Nil(t, retrievedJobTwo) 78 | 79 | genericMockJob.Delete(cache) 80 | } 81 | } 82 | 83 | func TestSaveAndGetAllJobs(t *testing.T) { 84 | db := NewTestDb(t) 85 | 86 | cache := job.NewLockFreeJobCache(db) 87 | 88 | genericMockJobOne := job.GetMockJobWithGenericSchedule(time.Now()) 89 | genericMockJobOne.Init(cache) 90 | err := db.Save(genericMockJobOne) 91 | assert.NoError(t, err) 92 | 93 | genericMockJobTwo := job.GetMockJobWithGenericSchedule(time.Now()) 94 | genericMockJobTwo.Init(cache) 95 | err = db.Save(genericMockJobTwo) 96 | assert.NoError(t, err) 97 | 98 | jobs, err := db.GetAll() 99 | assert.Nil(t, err) 100 | assert.Equal(t, 2, len(jobs)) 101 | } 102 | 103 | func TestEnd(t *testing.T) { 104 | db := NewTestDb(t) 105 | db.Close() 106 | } 107 | -------------------------------------------------------------------------------- /job/storage/mysql/mysql.go: -------------------------------------------------------------------------------- 1 | package mysql 2 | 3 | import ( 4 | "crypto/tls" 5 | "database/sql" 6 | "encoding/json" 7 | "fmt" 8 | 9 | "github.com/go-sql-driver/mysql" 10 | 11 | "github.com/jmoiron/sqlx" 12 | 13 | "github.com/ajvb/kala/job" 14 | 15 | log "github.com/sirupsen/logrus" 16 | ) 17 | 18 | var ( 19 | TableName = "jobs" 20 | ) 21 | 22 | type DB struct { 23 | conn *sqlx.DB 24 | } 25 | 26 | // New instantiates a new DB. 27 | func New(dsn string, tlsConfig *tls.Config) *DB { 28 | if tlsConfig != nil { 29 | log.Infof("Register TLS config") 30 | err := mysql.RegisterTLSConfig("custom", tlsConfig) 31 | if err != nil { 32 | log.Fatal(err) 33 | } 34 | } 35 | connection, err := sqlx.Open("mysql", dsn) 36 | if err != nil { 37 | log.Fatal(err) 38 | } 39 | // passive attempt to create table 40 | _, _ = connection.Exec(fmt.Sprintf(`create table %s (id varchar(36), job JSON, primary key (id)) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;`, TableName)) 41 | return &DB{ 42 | conn: connection, 43 | } 44 | } 45 | 46 | // GetAll returns all persisted Jobs. 47 | func (d DB) GetAll() ([]*job.Job, error) { 48 | query := fmt.Sprintf(`select job from %[1]s;`, TableName) 49 | var results []sql.NullString 50 | err := d.conn.Select(&results, query) 51 | if err != nil && err != sql.ErrNoRows { 52 | return nil, err 53 | } 54 | err = nil 55 | jobs := make([]*job.Job, 0, len(results)) 56 | for _, v := range results { 57 | if v.Valid { 58 | j := job.Job{} 59 | if err = json.Unmarshal([]byte(v.String), &j); err != nil { 60 | break 61 | } 62 | if err = j.InitDelayDuration(false); err != nil { 63 | break 64 | } 65 | jobs = append(jobs, &j) 66 | } 67 | } 68 | 69 | return jobs, err 70 | } 71 | 72 | // Get returns a persisted Job. 73 | func (d DB) Get(id string) (*job.Job, error) { 74 | template := `select job from %[1]s where id = ?;` 75 | query := fmt.Sprintf(template, TableName) 76 | var r sql.NullString 77 | err := d.conn.QueryRow(query, id).Scan(&r) 78 | if err != nil { 79 | return nil, err 80 | } 81 | result := &job.Job{} 82 | if r.Valid { 83 | err = json.Unmarshal([]byte(r.String), &result) 84 | } 85 | return result, err 86 | } 87 | 88 | // Delete deletes a persisted Job. 89 | func (d DB) Delete(id string) error { 90 | query := fmt.Sprintf(`delete from %[1]s where id = ?;`, TableName) 91 | _, err := d.conn.Exec(query, id) 92 | return err 93 | } 94 | 95 | // Save persists a Job. 96 | func (d DB) Save(j *job.Job) error { 97 | template := `replace into %[1]s (id, job) values(?, ?);` 98 | query := fmt.Sprintf(template, TableName) 99 | r, err := json.Marshal(j) 100 | if err != nil { 101 | return err 102 | } 103 | transaction, err := d.conn.Begin() 104 | if err != nil { 105 | return err 106 | } 107 | statement, err := transaction.Prepare(query) 108 | if err != nil { 109 | transaction.Rollback() //nolint:errcheck // adding insult to injury 110 | return err 111 | } 112 | defer statement.Close() 113 | _, err = statement.Exec(j.Id, string(r)) 114 | if err != nil { 115 | transaction.Rollback() //nolint:errcheck // adding insult to injury 116 | return err 117 | } 118 | return transaction.Commit() 119 | } 120 | 121 | // Close closes the connection to Postgres. 122 | func (d DB) Close() error { 123 | return d.conn.Close() 124 | } 125 | -------------------------------------------------------------------------------- /job/storage/mysql/mysql_test.go: -------------------------------------------------------------------------------- 1 | //go:build linux 2 | // +build linux 3 | 4 | package mysql 5 | 6 | import ( 7 | "database/sql" 8 | "encoding/json" 9 | "os" 10 | "testing" 11 | "time" 12 | 13 | "github.com/DATA-DOG/go-sqlmock" 14 | "github.com/jmoiron/sqlx" 15 | mysqltest "github.com/lestrrat-go/test-mysqld" 16 | "github.com/stretchr/testify/assert" 17 | 18 | "github.com/ajvb/kala/job" 19 | ) 20 | 21 | func NewTestDb() (*DB, sqlmock.Sqlmock) { 22 | connection, m, _ := sqlmock.New() 23 | 24 | var db = &DB{ 25 | conn: sqlx.NewDb(connection, "sqlmock"), 26 | } 27 | return db, m 28 | } 29 | 30 | func TestSaveAndGetJob(t *testing.T) { 31 | db, m := NewTestDb() 32 | 33 | cache := job.NewLockFreeJobCache(db) 34 | defer db.Close() 35 | 36 | genericMockJob := job.GetMockJobWithGenericSchedule(time.Now()) 37 | genericMockJob.Init(cache) 38 | 39 | j, err := json.Marshal(genericMockJob) 40 | if assert.NoError(t, err) { 41 | m.ExpectBegin() 42 | m.ExpectPrepare("replace .*"). 43 | ExpectExec(). 44 | WithArgs(genericMockJob.Id, string(j)). 45 | WillReturnResult(sqlmock.NewResult(1, 1)) 46 | m.ExpectCommit() 47 | err := db.Save(genericMockJob) 48 | if assert.NoError(t, err) { 49 | m.ExpectQuery("select .*"). 50 | WithArgs(genericMockJob.Id). 51 | WillReturnRows(sqlmock.NewRows([]string{"job"}).AddRow(j)) 52 | j2, err := db.Get(genericMockJob.Id) 53 | if assert.Nil(t, err) { 54 | assert.WithinDuration(t, j2.NextRunAt, genericMockJob.NextRunAt, 400*time.Microsecond) 55 | assert.Equal(t, j2.Name, genericMockJob.Name) 56 | assert.Equal(t, j2.Id, genericMockJob.Id) 57 | assert.Equal(t, j2.Command, genericMockJob.Command) 58 | assert.Equal(t, j2.Schedule, genericMockJob.Schedule) 59 | assert.Equal(t, j2.Owner, genericMockJob.Owner) 60 | assert.Equal(t, j2.Metadata.SuccessCount, genericMockJob.Metadata.SuccessCount) 61 | } 62 | } 63 | } 64 | } 65 | 66 | func TestDeleteJob(t *testing.T) { 67 | db, m := NewTestDb() 68 | 69 | cache := job.NewLockFreeJobCache(db) 70 | 71 | genericMockJob := job.GetMockJobWithGenericSchedule(time.Now()) 72 | genericMockJob.Init(cache) 73 | 74 | j, err := json.Marshal(genericMockJob) 75 | if assert.NoError(t, err) { 76 | 77 | m.ExpectBegin() 78 | m.ExpectPrepare("replace .*"). 79 | ExpectExec(). 80 | WithArgs(genericMockJob.Id, string(j)). 81 | WillReturnResult(sqlmock.NewResult(1, 1)) 82 | m.ExpectCommit() 83 | 84 | err := db.Save(genericMockJob) 85 | if assert.NoError(t, err) { 86 | 87 | // Delete it 88 | m.ExpectExec("delete .*"). 89 | WithArgs(genericMockJob.Id). 90 | WillReturnResult(sqlmock.NewResult(1, 1)) 91 | 92 | err = db.Delete(genericMockJob.Id) 93 | assert.Nil(t, err) 94 | 95 | // Verify deletion 96 | m.ExpectQuery("select .*"). 97 | WithArgs(genericMockJob.Id). 98 | WillReturnError(sql.ErrNoRows) 99 | 100 | k, err := db.Get(genericMockJob.Id) 101 | assert.Error(t, err) 102 | assert.Nil(t, k) 103 | } 104 | } 105 | 106 | } 107 | 108 | func TestSaveAndGetAllJobs(t *testing.T) { 109 | db, m := NewTestDb() 110 | 111 | genericMockJobOne := job.GetMockJobWithGenericSchedule(time.Now()) 112 | genericMockJobTwo := job.GetMockJobWithGenericSchedule(time.Now()) 113 | 114 | jobOne, err := json.Marshal(genericMockJobOne) 115 | if assert.NoError(t, err) { 116 | jobTwo, err := json.Marshal(genericMockJobTwo) 117 | if assert.NoError(t, err) { 118 | 119 | m.ExpectQuery(".*").WillReturnRows(sqlmock.NewRows([]string{"jobs"}).AddRow(jobOne).AddRow(jobTwo)) 120 | 121 | jobs, err := db.GetAll() 122 | assert.Nil(t, err) 123 | assert.Equal(t, 2, len(jobs)) 124 | } 125 | } 126 | 127 | } 128 | 129 | func TestRealDb(t *testing.T) { 130 | 131 | dsn := os.Getenv("MYSQL_DSN") 132 | if dsn == "" { 133 | mysqld, err := mysqltest.NewMysqld(nil) 134 | if assert.NoError(t, err) { 135 | dsn = mysqld.Datasource("test", "", "", 0) 136 | defer mysqld.Stop() 137 | } else { 138 | t.FailNow() 139 | } 140 | } 141 | 142 | db := New(dsn, nil) 143 | cache := job.NewLockFreeJobCache(db) 144 | 145 | genericMockJobOne := job.GetMockJobWithGenericSchedule(time.Now().Add(time.Hour)) 146 | genericMockJobTwo := job.GetMockJobWithGenericSchedule(time.Now().Add(time.Hour)) 147 | 148 | genericMockJobOne.Init(cache) 149 | genericMockJobTwo.Init(cache) 150 | 151 | t.Logf("Mock job one: %s", genericMockJobOne.Id) 152 | t.Logf("Mock job two: %s", genericMockJobTwo.Id) 153 | 154 | err := db.Save(genericMockJobOne) 155 | if assert.NoError(t, err) { 156 | 157 | err := db.Save(genericMockJobTwo) 158 | if assert.NoError(t, err) { 159 | 160 | jobs, err := db.GetAll() 161 | if assert.NoError(t, err) { 162 | 163 | assert.Equal(t, 2, len(jobs)) 164 | 165 | if jobs[0].Id == genericMockJobTwo.Id { 166 | jobs[0], jobs[1] = jobs[1], jobs[0] 167 | } 168 | 169 | assert.WithinDuration(t, jobs[0].NextRunAt, genericMockJobOne.NextRunAt, 400*time.Microsecond) 170 | assert.Equal(t, jobs[0].Name, genericMockJobOne.Name) 171 | assert.Equal(t, jobs[0].Id, genericMockJobOne.Id) 172 | assert.Equal(t, jobs[0].Command, genericMockJobOne.Command) 173 | assert.Equal(t, jobs[0].Schedule, genericMockJobOne.Schedule) 174 | assert.Equal(t, jobs[0].Owner, genericMockJobOne.Owner) 175 | assert.Equal(t, jobs[0].Metadata.SuccessCount, genericMockJobOne.Metadata.SuccessCount) 176 | 177 | assert.WithinDuration(t, jobs[1].NextRunAt, genericMockJobTwo.NextRunAt, 400*time.Microsecond) 178 | assert.Equal(t, jobs[1].Name, genericMockJobTwo.Name) 179 | assert.Equal(t, jobs[1].Id, genericMockJobTwo.Id) 180 | assert.Equal(t, jobs[1].Command, genericMockJobTwo.Command) 181 | assert.Equal(t, jobs[1].Schedule, genericMockJobTwo.Schedule) 182 | assert.Equal(t, jobs[1].Owner, genericMockJobTwo.Owner) 183 | assert.Equal(t, jobs[1].Metadata.SuccessCount, genericMockJobTwo.Metadata.SuccessCount) 184 | 185 | job2, err := db.Get(genericMockJobTwo.Id) 186 | if assert.NoError(t, err) { 187 | 188 | assert.WithinDuration(t, job2.NextRunAt, genericMockJobTwo.NextRunAt, 400*time.Microsecond) 189 | assert.Equal(t, job2.Name, genericMockJobTwo.Name) 190 | assert.Equal(t, job2.Id, genericMockJobTwo.Id) 191 | assert.Equal(t, job2.Command, genericMockJobTwo.Command) 192 | assert.Equal(t, job2.Schedule, genericMockJobTwo.Schedule) 193 | assert.Equal(t, job2.Owner, genericMockJobTwo.Owner) 194 | assert.Equal(t, job2.Metadata.SuccessCount, genericMockJobTwo.Metadata.SuccessCount) 195 | 196 | t.Logf("Deleting job with id %s", job2.Id) 197 | 198 | err := db.Delete(job2.Id) 199 | if assert.NoError(t, err) { 200 | 201 | jobs, err := db.GetAll() 202 | if assert.NoError(t, err) { 203 | 204 | assert.Equal(t, 1, len(jobs)) 205 | 206 | assert.WithinDuration(t, jobs[0].NextRunAt, genericMockJobOne.NextRunAt, 400*time.Microsecond) 207 | assert.Equal(t, jobs[0].Name, genericMockJobOne.Name) 208 | assert.Equal(t, jobs[0].Id, genericMockJobOne.Id) 209 | assert.Equal(t, jobs[0].Command, genericMockJobOne.Command) 210 | assert.Equal(t, jobs[0].Schedule, genericMockJobOne.Schedule) 211 | assert.Equal(t, jobs[0].Owner, genericMockJobOne.Owner) 212 | assert.Equal(t, jobs[0].Metadata.SuccessCount, genericMockJobOne.Metadata.SuccessCount) 213 | 214 | } 215 | } 216 | } 217 | } 218 | } 219 | } 220 | } 221 | 222 | func TestPersistEpsilon(t *testing.T) { 223 | 224 | dsn := os.Getenv("MYSQL_DSN") 225 | if dsn == "" { 226 | mysqld, err := mysqltest.NewMysqld(nil) 227 | if assert.NoError(t, err) { 228 | dsn = mysqld.Datasource("test", "root", "", 0) 229 | defer mysqld.Stop() 230 | } else { 231 | t.FailNow() 232 | } 233 | } 234 | 235 | db := New(dsn, nil) 236 | defer db.Close() 237 | 238 | cache := job.NewMemoryJobCache(db) 239 | 240 | mockJob := job.GetMockRecurringJobWithSchedule(time.Now().Add(time.Second*1), "PT1H") 241 | mockJob.Epsilon = "PT1H" 242 | mockJob.Command = "asdf" 243 | mockJob.Retries = 2 244 | 245 | err := mockJob.Init(cache) 246 | if assert.NoError(t, err) { 247 | 248 | err := db.Save(mockJob) 249 | if assert.NoError(t, err) { 250 | 251 | retrieved, err := db.GetAll() 252 | if assert.NoError(t, err) { 253 | 254 | retrieved[0].Run(cache) 255 | 256 | } 257 | } 258 | } 259 | } 260 | -------------------------------------------------------------------------------- /job/storage/postgres/postgres.go: -------------------------------------------------------------------------------- 1 | package postgres 2 | 3 | import ( 4 | "database/sql" 5 | "encoding/json" 6 | "fmt" 7 | 8 | _ "github.com/lib/pq" 9 | 10 | "github.com/ajvb/kala/job" 11 | 12 | log "github.com/sirupsen/logrus" 13 | ) 14 | 15 | const TABLE_NAME = "jobs" 16 | 17 | type DB struct { 18 | conn *sql.DB 19 | } 20 | 21 | // New instantiates a new DB. 22 | func New(dsn string) *DB { 23 | connection, err := sql.Open("postgres", dsn) 24 | if err != nil { 25 | log.Fatal(err) 26 | } 27 | // passive attempt to create table 28 | _, _ = connection.Exec(fmt.Sprintf(`create table %s (job jsonb);`, TABLE_NAME)) 29 | return &DB{ 30 | conn: connection, 31 | } 32 | } 33 | 34 | // GetAll returns all persisted Jobs. 35 | func (d DB) GetAll() ([]*job.Job, error) { 36 | query := fmt.Sprintf(`select coalesce(json_agg(j.job), '[]'::json) from (select * from %[1]s) as j;`, TABLE_NAME) 37 | 38 | var r sql.NullString 39 | err := d.conn.QueryRow(query).Scan(&r) 40 | if err != nil && err != sql.ErrNoRows { 41 | return nil, err 42 | } 43 | err = nil 44 | jobs := []*job.Job{} 45 | if r.Valid { 46 | err = json.Unmarshal([]byte(r.String), &jobs) 47 | } 48 | 49 | jobsInitiated := []*job.Job{} 50 | for _, j := range jobs { 51 | if err = j.InitDelayDuration(false); err != nil { 52 | break 53 | } 54 | jobsInitiated = append(jobsInitiated, j) 55 | } 56 | 57 | return jobsInitiated, err 58 | } 59 | 60 | // Get returns a persisted Job. 61 | func (d DB) Get(id string) (*job.Job, error) { 62 | template := `select to_jsonb(j.job) from (select * from %[1]s where job ->> 'id' = $1) as j;` 63 | query := fmt.Sprintf(template, TABLE_NAME) 64 | var r sql.NullString 65 | err := d.conn.QueryRow(query, id).Scan(&r) 66 | if err != nil { 67 | return nil, err 68 | } 69 | result := &job.Job{} 70 | if r.Valid { 71 | err = json.Unmarshal([]byte(r.String), &result) 72 | } 73 | return result, err 74 | } 75 | 76 | // Delete deletes a persisted Job. 77 | func (d DB) Delete(id string) error { 78 | query := fmt.Sprintf(`delete from %v where job ->> 'id' = $1;`, TABLE_NAME) 79 | _, err := d.conn.Exec(query, id) 80 | return err 81 | } 82 | 83 | // Save persists a Job. 84 | func (d DB) Save(j *job.Job) error { 85 | template := `update %[1]s SET job = $1 where job ->> 'id' = '` + j.Id + `'` 86 | 87 | _, errFind := d.Get(j.Id) 88 | if errFind != nil { 89 | template = `insert into %[1]s (job) values($1);` 90 | } 91 | 92 | query := fmt.Sprintf(template, TABLE_NAME) 93 | r, err := json.Marshal(j) 94 | if err != nil { 95 | return err 96 | } 97 | transaction, err := d.conn.Begin() 98 | if err != nil { 99 | return err 100 | } 101 | statement, err := transaction.Prepare(query) 102 | if err != nil { 103 | transaction.Rollback() //nolint:errcheck // adding insult to injury 104 | return err 105 | } 106 | defer statement.Close() 107 | _, err = statement.Exec(string(r)) 108 | if err != nil { 109 | transaction.Rollback() //nolint:errcheck // adding insult to injury 110 | return err 111 | } 112 | return transaction.Commit() 113 | } 114 | 115 | // Close closes the connection to Postgres. 116 | func (d DB) Close() error { 117 | return d.conn.Close() 118 | } 119 | -------------------------------------------------------------------------------- /job/storage/postgres/postgres_int_test.go: -------------------------------------------------------------------------------- 1 | // +build integration 2 | 3 | package postgres 4 | 5 | import ( 6 | "database/sql" 7 | "encoding/json" 8 | "fmt" 9 | "os" 10 | "testing" 11 | "time" 12 | 13 | "github.com/ajvb/kala/job" 14 | "github.com/ory/dockertest/v3" 15 | "github.com/stretchr/testify/assert" 16 | 17 | _ "github.com/lib/pq" 18 | "github.com/ory/dockertest/v3/docker" 19 | log "github.com/sirupsen/logrus" 20 | ) 21 | 22 | var db *sql.DB 23 | var dbDocker *DB 24 | 25 | func TestMain(m *testing.M) { 26 | // uses a sensible default on windows (tcp/http) and linux/osx (socket) 27 | pool, err := dockertest.NewPool("") 28 | if err != nil { 29 | log.Fatalf("Could not connect to docker: %s", err) 30 | } 31 | 32 | // pulls an image, creates a container based on it and runs it 33 | resource, err := pool.RunWithOptions(&dockertest.RunOptions{ 34 | Repository: "postgres", 35 | Tag: "11", 36 | Env: []string{ 37 | "POSTGRES_PASSWORD=secret", 38 | "POSTGRES_USER=user_name", 39 | "POSTGRES_DB=dbname", 40 | "listen_addresses = '*'", 41 | }, 42 | }, func(config *docker.HostConfig) { 43 | // set AutoRemove to true so that stopped container goes away by itself 44 | config.AutoRemove = true 45 | config.RestartPolicy = docker.RestartPolicy{Name: "no"} 46 | }) 47 | if err != nil { 48 | log.Fatalf("Could not start resource: %s", err) 49 | } 50 | 51 | hostAndPort := resource.GetHostPort("5432/tcp") 52 | databaseUrl := fmt.Sprintf("postgres://user_name:secret@%s/dbname?sslmode=disable", hostAndPort) 53 | 54 | log.Println("Connecting to database on url: ", databaseUrl) 55 | 56 | resource.Expire(120) // Tell docker to hard kill the container in 120 seconds 57 | 58 | // exponential backoff-retry, because the application in the container might not be ready to accept connections yet 59 | pool.MaxWait = 120 * time.Second 60 | if err = pool.Retry(func() error { 61 | db, err = sql.Open("postgres", databaseUrl) 62 | if err != nil { 63 | return err 64 | } 65 | 66 | // passive attempt to create table 67 | _, _ = db.Exec(fmt.Sprintf(`create table %s (job jsonb);`, TableName)) 68 | 69 | return db.Ping() 70 | }); err != nil { 71 | log.Fatalf("Could not connect to docker: %s", err) 72 | } 73 | 74 | dbDocker = &DB{ 75 | conn: db, 76 | } 77 | 78 | //Run tests 79 | code := m.Run() 80 | 81 | // You can't defer this because os.Exit doesn't care for defer 82 | if err := pool.Purge(resource); err != nil { 83 | log.Fatalf("Could not purge resource: %s", err) 84 | } 85 | 86 | os.Exit(code) 87 | } 88 | 89 | func TestRealPostgres(t *testing.T) { 90 | cache := job.NewLockFreeJobCache(dbDocker) 91 | defer dbDocker.Close() 92 | 93 | genericMockJob := job.GetMockJobWithGenericSchedule(time.Now()) 94 | genericMockJob.Init(cache) 95 | 96 | j, err := json.Marshal(genericMockJob) 97 | 98 | fmt.Println(string(j)) 99 | 100 | if assert.NoError(t, err) { 101 | err := dbDocker.Save(genericMockJob) 102 | if assert.NoError(t, err) { 103 | j2, err := dbDocker.Get(genericMockJob.Id) 104 | if assert.Nil(t, err) { 105 | assert.WithinDuration(t, j2.NextRunAt, genericMockJob.NextRunAt, 400*time.Microsecond) 106 | assert.Equal(t, j2.Name, genericMockJob.Name) 107 | assert.Equal(t, j2.Id, genericMockJob.Id) 108 | assert.Equal(t, j2.Command, genericMockJob.Command) 109 | assert.Equal(t, j2.Schedule, genericMockJob.Schedule) 110 | assert.Equal(t, j2.Owner, genericMockJob.Owner) 111 | assert.Equal(t, j2.Metadata.SuccessCount, genericMockJob.Metadata.SuccessCount) 112 | err := dbDocker.Delete(j2.Id) 113 | if assert.Nil(t, err) { 114 | jobs, _ := dbDocker.GetAll() 115 | assert.Equal(t, len(jobs), 0) 116 | } 117 | } 118 | } 119 | } 120 | 121 | } 122 | -------------------------------------------------------------------------------- /job/storage/postgres/postgres_test.go: -------------------------------------------------------------------------------- 1 | package postgres 2 | 3 | import ( 4 | "database/sql" 5 | "encoding/json" 6 | "fmt" 7 | "testing" 8 | "time" 9 | 10 | "github.com/DATA-DOG/go-sqlmock" 11 | 12 | "github.com/ajvb/kala/job" 13 | 14 | "github.com/stretchr/testify/assert" 15 | ) 16 | 17 | func NewTestDb() (*DB, sqlmock.Sqlmock) { 18 | connection, m, _ := sqlmock.New() 19 | var db = &DB{ 20 | conn: connection, 21 | } 22 | return db, m 23 | } 24 | 25 | func TestSaveAndGetJob(t *testing.T) { 26 | db, m := NewTestDb() 27 | 28 | cache := job.NewLockFreeJobCache(db) 29 | defer db.Close() 30 | 31 | genericMockJob := job.GetMockJobWithGenericSchedule(time.Now()) 32 | genericMockJob.Init(cache) 33 | 34 | j, err := json.Marshal(genericMockJob) 35 | if assert.NoError(t, err) { 36 | m.ExpectBegin() 37 | m.ExpectPrepare("insert .*"). 38 | ExpectExec(). 39 | WithArgs(string(j)). 40 | WillReturnResult(sqlmock.NewResult(1, 1)) 41 | m.ExpectCommit() 42 | err := db.Save(genericMockJob) 43 | if assert.NoError(t, err) { 44 | m.ExpectQuery("select .*"). 45 | WithArgs(genericMockJob.Id). 46 | WillReturnRows(sqlmock.NewRows([]string{"job"}).AddRow(j)) 47 | j2, err := db.Get(genericMockJob.Id) 48 | if assert.Nil(t, err) { 49 | assert.WithinDuration(t, j2.NextRunAt, genericMockJob.NextRunAt, 400*time.Microsecond) 50 | assert.Equal(t, j2.Name, genericMockJob.Name) 51 | assert.Equal(t, j2.Id, genericMockJob.Id) 52 | assert.Equal(t, j2.Command, genericMockJob.Command) 53 | assert.Equal(t, j2.Schedule, genericMockJob.Schedule) 54 | assert.Equal(t, j2.Owner, genericMockJob.Owner) 55 | assert.Equal(t, j2.Metadata.SuccessCount, genericMockJob.Metadata.SuccessCount) 56 | } 57 | } 58 | } 59 | } 60 | 61 | func TestDeleteJob(t *testing.T) { 62 | db, m := NewTestDb() 63 | 64 | cache := job.NewLockFreeJobCache(db) 65 | 66 | genericMockJob := job.GetMockJobWithGenericSchedule(time.Now()) 67 | genericMockJob.Init(cache) 68 | 69 | j, err := json.Marshal(genericMockJob) 70 | if assert.NoError(t, err) { 71 | 72 | m.ExpectBegin() 73 | m.ExpectPrepare("insert .*"). 74 | ExpectExec(). 75 | WithArgs(string(j)). 76 | WillReturnResult(sqlmock.NewResult(1, 1)) 77 | m.ExpectCommit() 78 | 79 | err := db.Save(genericMockJob) 80 | if assert.NoError(t, err) { 81 | 82 | // Delete it 83 | m.ExpectExec("delete .*"). 84 | WithArgs(genericMockJob.Id). 85 | WillReturnResult(sqlmock.NewResult(1, 1)) 86 | 87 | err = db.Delete(genericMockJob.Id) 88 | assert.Nil(t, err) 89 | 90 | // Verify deletion 91 | m.ExpectQuery("select .*"). 92 | WithArgs(genericMockJob.Id). 93 | WillReturnError(sql.ErrNoRows) 94 | 95 | k, err := db.Get(genericMockJob.Id) 96 | assert.Error(t, err) 97 | assert.Nil(t, k) 98 | } 99 | } 100 | 101 | } 102 | 103 | func TestSaveAndGetAllJobs(t *testing.T) { 104 | db, m := NewTestDb() 105 | 106 | genericMockJobOne := job.GetMockJobWithGenericSchedule(time.Now()) 107 | genericMockJobTwo := job.GetMockJobWithGenericSchedule(time.Now()) 108 | 109 | jobOne, err := json.Marshal(genericMockJobOne) 110 | if assert.NoError(t, err) { 111 | if assert.NoError(t, err) { 112 | jobTwo, err := json.Marshal(genericMockJobTwo) 113 | assert.NoError(t, err) 114 | 115 | aggregate := fmt.Sprintf(`[%s,%s]`, jobOne, jobTwo) 116 | m.ExpectQuery(".*").WillReturnRows(sqlmock.NewRows([]string{"jobs"}).AddRow(aggregate)) 117 | 118 | jobs, err := db.GetAll() 119 | assert.Nil(t, err) 120 | assert.Equal(t, 2, len(jobs)) 121 | } 122 | } 123 | 124 | } 125 | -------------------------------------------------------------------------------- /job/storage/redis/redis.go: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import ( 4 | "github.com/ajvb/kala/job" 5 | 6 | "github.com/garyburd/redigo/redis" 7 | log "github.com/sirupsen/logrus" 8 | ) 9 | 10 | var ( 11 | // HashKey is the hash key where jobs are persisted. 12 | HashKey = "kala:jobs" 13 | ) 14 | 15 | // DB is concrete implementation of the JobDB interface, that uses Redis for persistence. 16 | type DB struct { 17 | conn redis.Conn 18 | } 19 | 20 | // New instantiates a new DB. 21 | func New(address string, password redis.DialOption, sendPassword bool) *DB { 22 | var conn redis.Conn 23 | var err error 24 | if address == "" { 25 | address = "127.0.0.1:6379" 26 | } 27 | if sendPassword { 28 | conn, err = redis.Dial("tcp", address, password) 29 | } else { 30 | conn, err = redis.Dial("tcp", address) 31 | } 32 | if err != nil { 33 | log.Fatal(err) 34 | } 35 | return &DB{ 36 | conn: conn, 37 | } 38 | } 39 | 40 | // GetAll returns all persisted Jobs. 41 | func (d DB) GetAll() ([]*job.Job, error) { 42 | jobs := []*job.Job{} 43 | 44 | vals, err := d.conn.Do("HVALS", HashKey) 45 | if err != nil { 46 | return jobs, err 47 | } 48 | 49 | for _, val := range vals.([]interface{}) { 50 | j, err := job.NewFromBytes(val.([]byte)) 51 | if err != nil { 52 | return nil, err 53 | } 54 | 55 | err = j.InitDelayDuration(false) 56 | if err != nil { 57 | return nil, err 58 | } 59 | 60 | jobs = append(jobs, j) 61 | } 62 | 63 | return jobs, nil 64 | } 65 | 66 | // Get returns a persisted Job. 67 | func (d DB) Get(id string) (*job.Job, error) { 68 | val, err := d.conn.Do("HGET", HashKey, id) 69 | if err != nil { 70 | return nil, err 71 | } 72 | if val == nil { 73 | return nil, job.ErrJobNotFound(id) 74 | } 75 | 76 | return job.NewFromBytes(val.([]byte)) 77 | } 78 | 79 | // Delete deletes a persisted Job. 80 | func (d DB) Delete(id string) error { 81 | _, err := d.conn.Do("HDEL", HashKey, id) 82 | if err != nil { 83 | return err 84 | } 85 | 86 | return nil 87 | } 88 | 89 | // Save persists a Job. 90 | func (d DB) Save(j *job.Job) error { 91 | bytes, err := j.Bytes() 92 | if err != nil { 93 | return err 94 | } 95 | 96 | _, err = d.conn.Do("HSET", HashKey, j.Id, bytes) 97 | if err != nil { 98 | return err 99 | } 100 | 101 | return nil 102 | } 103 | 104 | // Close closes the connection to Redis. 105 | func (d DB) Close() error { 106 | err := d.conn.Close() 107 | if err != nil { 108 | return err 109 | } 110 | return nil 111 | } 112 | -------------------------------------------------------------------------------- /job/storage/redis/redis_test.go: -------------------------------------------------------------------------------- 1 | package redis 2 | 3 | import ( 4 | "errors" 5 | "testing" 6 | "time" 7 | 8 | "github.com/ajvb/kala/job" 9 | "github.com/rafaeljusto/redigomock" 10 | "github.com/stretchr/testify/assert" 11 | ) 12 | 13 | type testJob struct { 14 | Job *job.Job 15 | Bytes []byte 16 | } 17 | 18 | var ( 19 | conn = redigomock.NewConn() 20 | db = mockRedisDB() 21 | cache = job.NewLockFreeJobCache(db) 22 | testJobs = initTestJobs(3) 23 | ) 24 | 25 | // mockRedisDB returns a new DB with a mock Redis connection. 26 | func mockRedisDB() DB { 27 | return DB{ 28 | conn: conn, 29 | } 30 | } 31 | 32 | // testJobs initializes n testJobs 33 | func initTestJobs(n int) []testJob { 34 | tjs := []testJob{} 35 | 36 | for i := 0; i < n; i++ { 37 | j := job.GetMockJobWithGenericSchedule(time.Now()) 38 | j.Init(cache) 39 | 40 | bytes, err := j.Bytes() 41 | if err != nil { 42 | panic(err) 43 | } 44 | t := testJob{ 45 | Job: j, 46 | Bytes: bytes, 47 | } 48 | 49 | tjs = append(tjs, t) 50 | } 51 | 52 | return tjs 53 | } 54 | 55 | func TestSaveJob(t *testing.T) { 56 | testJob := testJobs[0] 57 | 58 | // Expect a HSET operation to be performed the job hash key, job ID and encoded job 59 | conn.Command("HSET", HashKey, testJob.Job.Id, testJob.Bytes). 60 | Expect("ok") 61 | 62 | err := db.Save(testJob.Job) 63 | assert.Nil(t, err) 64 | 65 | // Test error handling 66 | conn.Command("HSET", HashKey, testJob.Job.Id, testJob.Bytes). 67 | ExpectError(errors.New("Redis error")) 68 | 69 | err = db.Save(testJob.Job) 70 | assert.NotNil(t, err) 71 | } 72 | 73 | func TestGetJob(t *testing.T) { 74 | testJob := testJobs[0] 75 | 76 | // Expect a HGET operation to be preformed with the job hash key and job ID 77 | conn.Command("HGET", HashKey, testJob.Job.Id). 78 | Expect(testJob.Bytes). 79 | ExpectError(nil) 80 | 81 | storedJob, err := db.Get(testJob.Job.Id) 82 | assert.Nil(t, err) 83 | 84 | assert.WithinDuration(t, storedJob.NextRunAt, testJob.Job.NextRunAt, 100*time.Microsecond) 85 | assert.Equal(t, testJob.Job.Name, storedJob.Name) 86 | assert.Equal(t, testJob.Job.Id, storedJob.Id) 87 | assert.Equal(t, testJob.Job.Command, storedJob.Command) 88 | assert.Equal(t, testJob.Job.Schedule, storedJob.Schedule) 89 | assert.Equal(t, testJob.Job.Owner, storedJob.Owner) 90 | assert.Equal(t, testJob.Job.Metadata.SuccessCount, storedJob.Metadata.SuccessCount) 91 | 92 | // Test error handling 93 | conn.Command("HGET", HashKey, testJob.Job.Id). 94 | ExpectError(errors.New("Redis error")) 95 | 96 | _, err = db.Get(testJob.Job.Id) 97 | assert.NotNil(t, err) 98 | } 99 | 100 | func TestDeleteJob(t *testing.T) { 101 | testJob := testJobs[0] 102 | 103 | // Expect a HDEL operation to be preformed with the job ID 104 | conn.Command("HDEL", HashKey, testJob.Job.Id). 105 | Expect("ok"). 106 | ExpectError(nil) 107 | 108 | err := db.Delete(testJob.Job.Id) 109 | assert.Nil(t, err) 110 | 111 | // Test error handling 112 | conn.Command("HDEL", HashKey, testJob.Job.Id). 113 | ExpectError(errors.New("Redis error")) 114 | 115 | err = db.Delete(testJob.Job.Id) 116 | assert.NotNil(t, err) 117 | } 118 | 119 | func TestGetAllJobs(t *testing.T) { 120 | // Expect a HVALS operation to be preformed with the job hash key 121 | conn.Command("HVALS", HashKey). 122 | Expect([]interface{}{ 123 | testJobs[0].Bytes, 124 | testJobs[1].Bytes, 125 | testJobs[2].Bytes, 126 | }). 127 | ExpectError(nil) 128 | 129 | jobs, err := db.GetAll() 130 | assert.Nil(t, err) 131 | 132 | for i, j := range jobs { 133 | assert.WithinDuration(t, testJobs[i].Job.NextRunAt, j.NextRunAt, 100*time.Microsecond) 134 | assert.Equal(t, testJobs[i].Job.Name, j.Name) 135 | assert.Equal(t, testJobs[i].Job.Id, j.Id) 136 | assert.Equal(t, testJobs[i].Job.Command, j.Command) 137 | assert.Equal(t, testJobs[i].Job.Schedule, j.Schedule) 138 | assert.Equal(t, testJobs[i].Job.Owner, j.Owner) 139 | assert.Equal(t, testJobs[i].Job.Metadata.SuccessCount, j.Metadata.SuccessCount) 140 | } 141 | 142 | // Test erorr handling 143 | conn.Command("HVALS", HashKey). 144 | ExpectError(errors.New("Redis error")) 145 | 146 | _, err = db.GetAll() 147 | assert.NotNil(t, err) 148 | } 149 | 150 | func TestNew(t *testing.T) { 151 | 152 | } 153 | 154 | func TestClose(t *testing.T) { 155 | closedConn := false 156 | 157 | conn.CloseMock = func() error { 158 | closedConn = true 159 | return nil 160 | } 161 | 162 | err := db.Close() 163 | 164 | assert.Nil(t, err) 165 | assert.True(t, closedConn) 166 | 167 | // Reset closedConn 168 | closedConn = false 169 | 170 | conn.CloseMock = func() error { 171 | closedConn = false 172 | return errors.New("Error closing connection") 173 | } 174 | 175 | err = db.Close() 176 | 177 | assert.NotNil(t, err) 178 | assert.False(t, closedConn) 179 | } 180 | -------------------------------------------------------------------------------- /job/test_utils.go: -------------------------------------------------------------------------------- 1 | package job 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "sync" 7 | "testing" 8 | "time" 9 | 10 | "github.com/ajvb/kala/utils/iso8601" 11 | ) 12 | 13 | type MockDBGetAll struct { 14 | MockDB 15 | response []*Job 16 | } 17 | 18 | func (d *MockDBGetAll) GetAll() ([]*Job, error) { 19 | return d.response, nil 20 | } 21 | 22 | type MockDB struct{} 23 | 24 | func (m *MockDB) GetAll() ([]*Job, error) { 25 | return nil, nil 26 | } 27 | func (m *MockDB) Get(id string) (*Job, error) { 28 | return nil, nil 29 | } 30 | func (m *MockDB) Delete(id string) error { 31 | return nil 32 | } 33 | func (m *MockDB) Save(job *Job) error { 34 | return nil 35 | } 36 | func (m *MockDB) Close() error { 37 | return nil 38 | } 39 | 40 | func NewMockCache() *LockFreeJobCache { 41 | return NewLockFreeJobCache(&MockDB{}) 42 | } 43 | 44 | func GetMockJob() *Job { 45 | return &Job{ 46 | Name: "mock_job", 47 | Command: "bash -c 'date'", 48 | Owner: "example@example.com", 49 | Retries: 2, 50 | } 51 | } 52 | 53 | func GetMockFailingJob() *Job { 54 | return &Job{ 55 | Name: "mock_failing_job", 56 | Command: "asdf", 57 | Owner: "example@example.com", 58 | Retries: 2, 59 | } 60 | } 61 | 62 | func GetMockRemoteJob(props RemoteProperties) *Job { 63 | return &Job{ 64 | Name: "mock_remote_job", 65 | Command: "", 66 | JobType: RemoteJob, 67 | RemoteProperties: props, 68 | } 69 | } 70 | 71 | func GetMockJobWithSchedule(repeat int, scheduleTime time.Time, delay string) *Job { 72 | genericMockJob := GetMockJob() 73 | 74 | parsedTime := scheduleTime.Format(time.RFC3339) 75 | scheduleStr := fmt.Sprintf("R%d/%s/%s", repeat, parsedTime, delay) 76 | genericMockJob.Schedule = scheduleStr 77 | 78 | return genericMockJob 79 | } 80 | 81 | func GetMockRecurringJobWithSchedule(scheduleTime time.Time, delay string) *Job { 82 | genericMockJob := GetMockJob() 83 | 84 | parsedTime := scheduleTime.Format(time.RFC3339) 85 | scheduleStr := fmt.Sprintf("R/%s/%s", parsedTime, delay) 86 | genericMockJob.Schedule = scheduleStr 87 | genericMockJob.timesToRepeat = -1 88 | 89 | parsedDuration, _ := iso8601.FromString(delay) 90 | 91 | genericMockJob.delayDuration = parsedDuration 92 | 93 | return genericMockJob 94 | } 95 | 96 | func GetMockJobStats(oldDate time.Time, count int) []*JobStat { 97 | stats := make([]*JobStat, 0) 98 | for i := 1; i <= count; i++ { 99 | el := &JobStat{ 100 | JobId: "stats-id-" + fmt.Sprint(i), 101 | NumberOfRetries: 0, 102 | ExecutionDuration: 10000, 103 | Success: true, 104 | RanAt: oldDate, 105 | } 106 | stats = append(stats, el) 107 | } 108 | return stats 109 | } 110 | 111 | func GetMockJobWithGenericSchedule(now time.Time) *Job { 112 | fiveMinutesFromNow := now.Add(time.Minute * 5) 113 | return GetMockJobWithSchedule(2, fiveMinutesFromNow, "P1DT10M10S") 114 | } 115 | 116 | func parseTime(t *testing.T, value string) time.Time { 117 | now, err := time.Parse("2006-Jan-02 15:04", value) 118 | if err != nil { 119 | t.Fatal(err) 120 | } 121 | return now 122 | } 123 | 124 | func parseTimeInLocation(t *testing.T, value string, location string) time.Time { 125 | loc, err := time.LoadLocation(location) 126 | if err != nil { 127 | t.Fatal(err) 128 | } 129 | now, err := time.ParseInLocation("2006-Jan-02 15:04", value, loc) 130 | if err != nil { 131 | t.Fatal(err) 132 | } 133 | return now 134 | } 135 | 136 | // Used to hand off execution briefly so that jobs can run and so on. 137 | func briefPause() { 138 | time.Sleep(time.Millisecond * 100) 139 | } 140 | 141 | func awaitJobRan(t *testing.T, j *Job, timeout time.Duration) { 142 | t.Helper() 143 | select { 144 | case <-j.ranChan: 145 | case <-time.After(timeout): 146 | t.Fatal("Job failed to run") 147 | } 148 | } 149 | 150 | var _ JobDB = (*MemoryDB)(nil) 151 | 152 | type MemoryDB struct { 153 | m map[string]*Job 154 | lock sync.RWMutex 155 | } 156 | 157 | func NewMemoryDB() *MemoryDB { 158 | return &MemoryDB{ 159 | m: map[string]*Job{}, 160 | } 161 | } 162 | 163 | func (m *MemoryDB) GetAll() (ret []*Job, _ error) { 164 | m.lock.RLock() 165 | defer m.lock.RUnlock() 166 | for _, v := range m.m { 167 | ret = append(ret, v) 168 | } 169 | return 170 | } 171 | 172 | func (m *MemoryDB) Get(id string) (*Job, error) { 173 | m.lock.RLock() 174 | defer m.lock.RUnlock() 175 | j, exist := m.m[id] 176 | if !exist { 177 | return nil, ErrJobNotFound(id) 178 | } 179 | return j, nil 180 | } 181 | 182 | func (m *MemoryDB) Delete(id string) error { 183 | m.lock.Lock() 184 | defer m.lock.Unlock() 185 | if _, exists := m.m[id]; !exists { 186 | return errors.New("Doesn't exist") // Used for testing 187 | } 188 | delete(m.m, id) 189 | // log.Printf("After delete: %+v", m) 190 | return nil 191 | } 192 | 193 | func (m *MemoryDB) Save(j *Job) error { 194 | m.lock.Lock() 195 | defer m.lock.Unlock() 196 | m.m[j.Id] = j 197 | return nil 198 | } 199 | 200 | func (m *MemoryDB) Close() error { 201 | return nil 202 | } 203 | -------------------------------------------------------------------------------- /load/README.md: -------------------------------------------------------------------------------- 1 | # Load Testing 2 | 3 | These files can be run with [k6](https://github.com/loadimpact/k6). They're pretty rudimentary as far as load tests go but they did help to locate a deadlock, so they are being preserved for posterity. 4 | 5 | Example command: 6 | ``` 7 | k6 run load/loadtest.js 8 | ``` 9 | -------------------------------------------------------------------------------- /load/loadtest.js: -------------------------------------------------------------------------------- 1 | import http from "k6/http"; 2 | import { check, sleep } from "k6"; 3 | import { Trend } from 'k6/metrics'; 4 | 5 | export let options = { 6 | stages: [ 7 | // Ramp-up from 1 to 5 VUs in 10s 8 | { 9 | duration: "10s", 10 | target: 50 11 | }, 12 | 13 | // Stay at rest on 5 VUs for 5s 14 | { 15 | duration: "5s", 16 | target: 50 17 | }, 18 | 19 | // Ramp-down from 5 to 0 VUs for 5s 20 | { 21 | duration: "5s", 22 | target: 0 23 | }, 24 | // Ramp-down from 5 to 0 VUs for 5s 25 | { 26 | duration: "10s", 27 | target: 1 28 | } 29 | ] 30 | }; 31 | 32 | let jobs = new Trend('jobs'); 33 | let activeJobs = new Trend('active_jobs'); 34 | let errorCount = new Trend('error_count'); 35 | let successCount = new Trend('success_count'); 36 | 37 | var count = 0; 38 | 39 | function getSchedule(now) { 40 | now.setSeconds(now.getSeconds() + 1); 41 | return 'R5/' + now.toISOString() + '/PT5S'; 42 | } 43 | 44 | export default function() { 45 | let payload = JSON.stringify({ 46 | name: 'job' + (count++), 47 | schedule: getSchedule(new Date()), 48 | command: "bash -c 'date'" 49 | }); 50 | let write = http.post("http://localhost:8000/api/v1/job/", payload); 51 | check(write, { 52 | "status is ok": (r) => r.status === 201 53 | }); 54 | sleep(1); 55 | let read = http.get("http://localhost:8000/api/v1/job/"); 56 | check(read, { 57 | "status is ok": (r) => r.status === 200 58 | }); 59 | sleep(1); 60 | let stats = http.get("http://localhost:8000/api/v1/stats/"); 61 | check(stats, { 62 | "status is ok": (r) => r.status === 200 63 | }); 64 | let parsed = JSON.parse(stats.body); 65 | jobs.add(parsed.Stats.jobs); 66 | activeJobs.add(parsed.Stats.active_jobs); 67 | errorCount.add(parsed.Stats.error_count); 68 | successCount.add(parsed.Stats.success_count); 69 | sleep(1); 70 | } 71 | -------------------------------------------------------------------------------- /load/spiketest.js: -------------------------------------------------------------------------------- 1 | import http from "k6/http"; 2 | import { check, sleep } from "k6"; 3 | import { Trend } from 'k6/metrics'; 4 | 5 | export let options = { 6 | stages: [ 7 | // Ramp-up from 1 to 5 VUs in 10s 8 | { 9 | duration: "10s", 10 | target: 5 11 | }, 12 | 13 | // Stay at rest on 5 VUs for 5s 14 | { 15 | duration: "5s", 16 | target: 200 17 | }, 18 | 19 | // Ramp-down from 5 to 0 VUs for 5s 20 | { 21 | duration: "5s", 22 | target: 5 23 | }, 24 | // Ramp-down from 5 to 0 VUs for 5s 25 | { 26 | duration: "10s", 27 | target: 1 28 | } 29 | ] 30 | }; 31 | 32 | let jobs = new Trend('jobs'); 33 | let activeJobs = new Trend('active_jobs'); 34 | let errorCount = new Trend('error_count'); 35 | let successCount = new Trend('success_count'); 36 | 37 | var count = 0; 38 | 39 | function getSchedule(now) { 40 | now.setSeconds(now.getSeconds() + 1); 41 | return 'R5/' + now.toISOString() + '/PT5S'; 42 | } 43 | 44 | export default function() { 45 | let payload = JSON.stringify({ 46 | name: 'job' + (count++), 47 | schedule: getSchedule(new Date()), 48 | command: "bash -c 'date'" 49 | }); 50 | let write = http.post("http://localhost:8000/api/v1/job/", payload); 51 | check(write, { 52 | "status is ok": (r) => r.status === 201 53 | }); 54 | sleep(1); 55 | let read = http.get("http://localhost:8000/api/v1/job/"); 56 | check(read, { 57 | "status is ok": (r) => r.status === 200 58 | }); 59 | sleep(1); 60 | let stats = http.get("http://localhost:8000/api/v1/stats/"); 61 | check(stats, { 62 | "status is ok": (r) => r.status === 200 63 | }); 64 | let parsed = JSON.parse(stats.body); 65 | jobs.add(parsed.Stats.jobs); 66 | activeJobs.add(parsed.Stats.active_jobs); 67 | errorCount.add(parsed.Stats.error_count); 68 | successCount.add(parsed.Stats.success_count); 69 | sleep(1); 70 | } 71 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "github.com/ajvb/kala/cmd" 5 | 6 | log "github.com/sirupsen/logrus" 7 | ) 8 | 9 | func init() { 10 | log.SetLevel(log.InfoLevel) 11 | } 12 | 13 | func main() { 14 | if err := cmd.RootCmd.Execute(); err != nil { 15 | log.Fatal(err) 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /tools.go: -------------------------------------------------------------------------------- 1 | // +build tools 2 | 3 | package main 4 | 5 | import ( 6 | _ "github.com/elazarl/go-bindata-assetfs" 7 | _ "github.com/go-bindata/go-bindata" 8 | ) 9 | -------------------------------------------------------------------------------- /utils/iso8601/iso8601.go: -------------------------------------------------------------------------------- 1 | package iso8601 2 | 3 | import ( 4 | "bytes" 5 | "errors" 6 | "fmt" 7 | "regexp" 8 | "strconv" 9 | "text/template" 10 | "time" 11 | ) 12 | 13 | var ( 14 | // ErrBadFormat is returned when parsing fails 15 | ErrBadFormat = errors.New("bad format string") 16 | 17 | //nolint:lll 18 | tmpl = template.Must(template.New("duration").Parse(`P{{if .Years}}{{.Years}}Y{{end}}{{if .Months}}{{.Months}}M{{end}}{{if .Weeks}}{{.Weeks}}W{{end}}{{if .Days}}{{.Days}}D{{end}}{{if .HasTimePart}}T{{end }}{{if .Hours}}{{.Hours}}H{{end}}{{if .Minutes}}{{.Minutes}}M{{end}}{{if .Seconds}}{{.Seconds}}S{{end}}`)) 19 | 20 | //nolint:lll 21 | full = regexp.MustCompile(`P(?:(?P\d+)Y)?(?:(?P\d+)M)?(?:(?P\d+)D)?(?:(?P