├── .github ├── dependabot.yml ├── release-drafter.yml └── workflows │ ├── ci.yaml │ ├── govulncheck.yml │ ├── lint.yaml │ └── release-drafter.yml ├── .gitignore ├── LICENSE ├── Makefile ├── Makefile.release ├── README.md ├── broker ├── broker.go ├── inmemory.go ├── inmemory_test.go ├── log.go ├── log_test.go ├── queues.go ├── queues_test.go ├── rabbitmq.go └── rabbitmq_test.go ├── cli ├── banner.go ├── cli.go ├── health.go ├── migrate.go └── run.go ├── cmd └── main.go ├── conf ├── conf.go └── conf_test.go ├── configs ├── config.ci.toml ├── config.compose.toml ├── config.distributed.toml └── sample.config.toml ├── datastore ├── datastore.go └── postgres │ ├── postgres.go │ ├── postgres_test.go │ └── record.go ├── db └── postgres │ └── schema.go ├── docker-compose.yaml ├── docs ├── cli_v3.jpg ├── logo.svg ├── swagger.json └── webui.png ├── engine ├── broker.go ├── broker_test.go ├── coordinator.go ├── coordinator_test.go ├── datastore.go ├── datastore_test.go ├── default.go ├── default_test.go ├── engine.go ├── engine_test.go ├── locker.go └── worker.go ├── examples ├── aws_create_master.yaml ├── aws_split_and_stitch.yaml ├── each.yaml ├── hello.yaml ├── hls.yaml ├── job_defaults.yaml ├── job_output.yaml ├── parallel.yaml ├── prepost.yaml ├── resize_image.yaml ├── retry.yaml ├── slow.yaml ├── split_and_stitch.yaml ├── subjob.yaml └── timeout.yaml ├── go.mod ├── go.sum ├── health ├── health.go └── health_test.go ├── input ├── job.go ├── task.go ├── validate.go └── validate_test.go ├── internal ├── cache │ ├── cache.go │ └── cache_test.go ├── coordinator │ ├── api │ │ ├── api.go │ │ ├── api_test.go │ │ ├── context.go │ │ └── context_test.go │ ├── coordinator.go │ ├── coordinator_test.go │ ├── handlers │ │ ├── cancel.go │ │ ├── cancel_test.go │ │ ├── completed.go │ │ ├── completed_test.go │ │ ├── error.go │ │ ├── error_test.go │ │ ├── heartbeat.go │ │ ├── heartbeat_test.go │ │ ├── job.go │ │ ├── job_test.go │ │ ├── log.go │ │ ├── log_test.go │ │ ├── pending.go │ │ ├── pending_test.go │ │ ├── progress.go │ │ ├── progress_test.go │ │ ├── schedule.go │ │ ├── started.go │ │ └── started_test.go │ └── scheduler │ │ ├── scheduler.go │ │ └── scheduler_test.go ├── eval │ ├── eval.go │ ├── eval_test.go │ ├── funcs.go │ └── funcs_test.go ├── fns │ └── fns.go ├── hash │ ├── hash.go │ └── hash_test.go ├── host │ ├── host.go │ └── host_test.go ├── httpx │ ├── httpx.go │ └── httpx_test.go ├── logging │ ├── logging.go │ └── writer.go ├── netx │ ├── netx.go │ └── netx_test.go ├── redact │ ├── redact.go │ └── redact_test.go ├── reexec │ ├── command_linux.go │ ├── command_unix.go │ ├── command_unsupported.go │ ├── reexec_test.go │ └── rexec.go ├── slices │ ├── slices.go │ └── slirces_test.go ├── syncx │ ├── map.go │ └── map_test.go ├── uuid │ ├── uuid.go │ └── uuid_test.go ├── webhook │ ├── webhook.go │ └── webhook_test.go ├── wildcard │ ├── wildcard.go │ └── wildcard_test.go └── worker │ ├── api.go │ ├── api_test.go │ ├── worker.go │ └── worker_test.go ├── job.go ├── job_test.go ├── locker ├── inmemory.go ├── inmemory_test.go ├── locker.go ├── postgres.go └── postgres_test.go ├── middleware ├── job │ ├── job.go │ ├── job_test.go │ ├── redact.go │ ├── redact_test.go │ ├── webhook.go │ └── webhook_test.go ├── node │ ├── node.go │ └── node_test.go ├── task │ ├── hostenv.go │ ├── hostenv_test.go │ ├── redact.go │ ├── redact_test.go │ ├── task.go │ ├── task_test.go │ ├── webhook.go │ └── webhook_test.go └── web │ └── web.go ├── mount.go ├── node.go ├── role.go ├── runtime ├── docker │ ├── archive.go │ ├── archive_test.go │ ├── auth.go │ ├── auth_test.go │ ├── bind.go │ ├── bind_test.go │ ├── config.go │ ├── docker.go │ ├── docker_test.go │ ├── reference.go │ ├── reference_test.go │ ├── tmpfs.go │ ├── tmpfs_test.go │ ├── volume.go │ └── volume_test.go ├── mount.go ├── multi.go ├── multi_test.go ├── podman │ ├── podman.go │ ├── podman_test.go │ ├── volume.go │ └── volume_test.go ├── runtime.go └── shell │ ├── setid_unix.go │ ├── setid_unsupported.go │ ├── shell.go │ └── shell_test.go ├── stats.go ├── task.go ├── task_test.go ├── user.go └── version.go /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "gomod" 4 | directory: "/" 5 | labels: 6 | - "🤖 Dependencies" 7 | schedule: 8 | interval: weekly 9 | - package-ecosystem: "github-actions" 10 | directory: "/" 11 | schedule: 12 | interval: weekly 13 | labels: 14 | - "🤖 Dependencies" 15 | -------------------------------------------------------------------------------- /.github/release-drafter.yml: -------------------------------------------------------------------------------- 1 | name-template: 'v$RESOLVED_VERSION' 2 | tag-template: 'v$RESOLVED_VERSION' 3 | categories: 4 | - title: '🔒 Security:' 5 | labels: 6 | - '🔒 Security' 7 | - title: '❗ Breaking Changes:' 8 | labels: 9 | - '❗ Breaking Change' 10 | - title: '🚀 Features:' 11 | labels: 12 | - '✏️ Feature' 13 | - title: '🐛 Bug Fixes:' 14 | labels: 15 | - '🐛 Bug' 16 | - title: '📚 Documentation:' 17 | labels: 18 | - '📒 Documentation' 19 | - title: '🧹 Updates:' 20 | labels: 21 | - '🧹 Updates' 22 | - '🤖 Dependencies' 23 | change-template: '- $TITLE (#$NUMBER)' 24 | change-title-escapes: '\<*_&' # You can add # and @ to disable mentions, and add ` to disable code blocks. 25 | exclude-contributors: 26 | - dependabot 27 | - dependabot[bot] 28 | version-resolver: 29 | major: 30 | labels: 31 | - 'major' 32 | minor: 33 | labels: 34 | - 'minor' 35 | - '❗ Breaking Change' 36 | patch: 37 | labels: 38 | - '✏️ Feature' 39 | - '📒 Documentation' 40 | - '🐛 Bug' 41 | - '🤖 Dependencies' 42 | - '🧹 Updates' 43 | - '🔒 Security' 44 | default: patch 45 | template: | 46 | $CHANGES 47 | 48 | **Full Changelog**: https://github.com/$OWNER/$REPOSITORY/compare/$PREVIOUS_TAG...v$RESOLVED_VERSION 49 | 50 | Thanks to $CONTRIBUTORS for making this release possible. 51 | 52 | autolabeler: 53 | - label: '📒 Documentation' 54 | files: 55 | - '*.md' 56 | title: 57 | - '/(docs|doc:|\[doc\]|typos|comment|documentation)/i' 58 | - label: '🐛 Bug' 59 | title: 60 | - '/(fix|race|bug|missing|correct)/i' 61 | - label: '🧹 Updates' 62 | title: 63 | - '/(improve|update|update|refactor|deprecated|remove|unused|test)/i' 64 | - label: '🤖 Dependencies' 65 | title: 66 | - '/(bump|dependencies)/i' 67 | - label: '✏️ Feature' 68 | title: 69 | - '/(feature|feat|create|implement|add)/i' -------------------------------------------------------------------------------- /.github/workflows/ci.yaml: -------------------------------------------------------------------------------- 1 | name: ci 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | paths-ignore: 8 | - "**.md" 9 | - LICENSE 10 | - "docker-compose.yml" 11 | - ".github/ISSUE_TEMPLATE/*.yml" 12 | - ".github/dependabot.yml" 13 | - ".github/release-drafter.yml" 14 | pull_request: 15 | branches: 16 | - "*" 17 | paths-ignore: 18 | - "**.md" 19 | - LICENSE 20 | - "docker-compose.dev.yml" 21 | - ".github/ISSUE_TEMPLATE/*.yml" 22 | - ".github/dependabot.yml" 23 | - ".github/release-drafter.yml" 24 | 25 | jobs: 26 | ci: 27 | strategy: 28 | matrix: 29 | go-version: [1.21.x] 30 | postgres-version: [15, 16] 31 | runs-on: ubuntu-24.04 32 | services: 33 | postgres: 34 | image: postgres:${{ matrix.postgres-version }} 35 | env: 36 | POSTGRES_DB: tork 37 | POSTGRES_PASSWORD: tork 38 | POSTGRES_USER: tork 39 | POSTGRES_PORT: 5432 40 | ports: 41 | - 5432:5432 42 | # Set health checks to wait until postgres has started 43 | options: >- 44 | --health-cmd pg_isready 45 | --health-interval 10s 46 | --health-timeout 5s 47 | --health-retries 5 48 | rabbitmq: 49 | image: rabbitmq:3-management 50 | ports: 51 | - 5672:5672 52 | - 15672:15672 53 | steps: 54 | - name: Check out repository code 55 | uses: actions/checkout@v4 56 | 57 | - name: Set up Go 58 | uses: actions/setup-go@v5 59 | with: 60 | go-version: '${{ matrix.go-version }}' 61 | 62 | - name: Build Tork 63 | run: | 64 | go build -o tork cmd/main.go 65 | 66 | - name: Run DB migration 67 | run: | 68 | TORK_CONFIG=configs/config.ci.toml ./tork migration 69 | 70 | - name: Test Tork 71 | run: | 72 | # start tork 73 | ./tork run standalone & 74 | PID=$! 75 | # give it a second to start 76 | sleep 1 77 | 78 | # perform a health check 79 | STATUS=$(curl -f -s http://localhost:8000/health | jq -r .status) 80 | echo "STATUS: $STATUS" 81 | if [ "$STATUS" != "UP" ]; then 82 | exit 1 83 | fi 84 | 85 | # submit a simple job 86 | JOB_ID=$(curl -s -X POST --data-binary @examples/hello.yaml \ 87 | -H "Content-type: text/yaml" http://localhost:8000/jobs | jq -r .id) 88 | for i in {1..5} 89 | do 90 | JOB_STATE=$(curl -s http://localhost:8000/jobs/$JOB_ID | jq -r .state) 91 | echo "$JOB_ID $JOB_STATE" 92 | if [ "$JOB_STATE" == "COMPLETED" ]; then 93 | break 94 | fi 95 | sleep 0.5 96 | done 97 | 98 | # submit a simple job 99 | JOB_ID=$(curl -s -X POST --data-binary @examples/hello.yaml \ 100 | -H "Content-type: text/yaml" http://localhost:8000/jobs | jq -r .id) 101 | for i in {1..10} 102 | do 103 | JOB_STATE=$(curl -s http://localhost:8000/jobs/$JOB_ID | jq -r .state) 104 | echo "$JOB_ID $JOB_STATE" 105 | if [ "$JOB_STATE" == "COMPLETED" ]; then 106 | break 107 | fi 108 | sleep 0.5 109 | done 110 | if [ "$JOB_STATE" != "COMPLETED" ]; then 111 | exit 1 112 | fi 113 | JOB_RESULT=$(curl -s http://localhost:8000/jobs/$JOB_ID | jq -r .result) 114 | if [ "$JOB_RESULT" != "hello world" ]; then 115 | echo "invalid job result" 116 | exit 1 117 | fi 118 | 119 | # terminate Tork 120 | kill -9 $PID 121 | 122 | - name: Run tests 123 | run: go test ./... -v -race 124 | -------------------------------------------------------------------------------- /.github/workflows/govulncheck.yml: -------------------------------------------------------------------------------- 1 | name: govulncheck 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | paths-ignore: 8 | - "**.md" 9 | - LICENSE 10 | - "docker-compose.yml" 11 | - ".github/ISSUE_TEMPLATE/*.yml" 12 | - ".github/dependabot.yml" 13 | - ".github/release-drafter.yml" 14 | pull_request: 15 | branches: 16 | - "*" 17 | paths-ignore: 18 | - "**.md" 19 | - LICENSE 20 | - "docker-compose.dev.yml" 21 | - ".github/ISSUE_TEMPLATE/*.yml" 22 | - ".github/dependabot.yml" 23 | - ".github/release-drafter.yml" 24 | 25 | jobs: 26 | govulncheck-check: 27 | runs-on: ubuntu-latest 28 | env: 29 | GO111MODULE: on 30 | steps: 31 | - name: Fetch Repository 32 | uses: actions/checkout@v4 33 | 34 | - name: Install Go 35 | uses: actions/setup-go@v5 36 | with: 37 | go-version: "stable" 38 | check-latest: true 39 | cache: false 40 | 41 | - name: Install Govulncheck 42 | run: go install golang.org/x/vuln/cmd/govulncheck@latest 43 | 44 | - name: Run Govulncheck 45 | run: govulncheck ./... 46 | -------------------------------------------------------------------------------- /.github/workflows/lint.yaml: -------------------------------------------------------------------------------- 1 | name: lint 2 | on: 3 | push: 4 | branches: 5 | - main 6 | paths-ignore: 7 | - "**.md" 8 | - LICENSE 9 | - "docker-compose.yml" 10 | - ".github/ISSUE_TEMPLATE/*.yml" 11 | - ".github/dependabot.yml" 12 | - ".github/release-drafter.yml" 13 | pull_request: 14 | branches: 15 | - "*" 16 | paths-ignore: 17 | - "**.md" 18 | - LICENSE 19 | - "docker-compose.dev.yml" 20 | - ".github/ISSUE_TEMPLATE/*.yml" 21 | - ".github/dependabot.yml" 22 | - ".github/release-drafter.yml" 23 | 24 | permissions: 25 | contents: read 26 | # Optional: allow read access to pull request. Use with `only-new-issues` option. 27 | # pull-requests: read 28 | 29 | jobs: 30 | lint: 31 | name: lint 32 | runs-on: ubuntu-latest 33 | steps: 34 | - uses: actions/checkout@v4 35 | 36 | - uses: actions/setup-go@v5 37 | with: 38 | go-version: '1.21.x' 39 | cache: false 40 | 41 | - name: golangci-lint 42 | uses: golangci/golangci-lint-action@v7 43 | with: 44 | version: v2.0 45 | -------------------------------------------------------------------------------- /.github/workflows/release-drafter.yml: -------------------------------------------------------------------------------- 1 | name: Release Drafter 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request_target: 8 | types: [opened, reopened, synchronize] 9 | 10 | permissions: 11 | contents: read 12 | 13 | jobs: 14 | update_release_draft: 15 | permissions: 16 | contents: write 17 | pull-requests: write 18 | runs-on: ubuntu-latest 19 | steps: 20 | - uses: release-drafter/release-drafter@v5 21 | env: 22 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # General 2 | .DS_Store 3 | 4 | # Binaries for programs and plugins 5 | *.exe 6 | *.exe~ 7 | *.dll 8 | *.so 9 | *.dylib 10 | 11 | # Test binary, built with `go test -c` 12 | *.test 13 | 14 | # Output of the go coverage tool, specifically when used with LiteIDE 15 | *.out 16 | 17 | # Dependency directories (remove the comment below to include it) 18 | # vendor/ 19 | 20 | # Go workspace file 21 | go.work 22 | 23 | # build and files 24 | .build/ 25 | .release/ 26 | 27 | # local config 28 | config.local.toml 29 | 30 | # Binaries 31 | tork 32 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Arik Cohen 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for building Tork 2 | GITCOMMIT:=$(shell git describe --dirty --always) 3 | BINARY:=tork 4 | SYSTEM:= 5 | CHECKS:=check 6 | BUILDOPTS:=-v 7 | GOPATH?=$(HOME)/go 8 | MAKEPWD:=$(dir $(realpath $(firstword $(MAKEFILE_LIST)))) 9 | CGO_ENABLED?=0 10 | 11 | .PHONY: all 12 | all: tork 13 | 14 | tork: *.go go.* $(wildcard */**/*.go) 15 | CGO_ENABLED=$(CGO_ENABLED) $(SYSTEM) go build $(BUILDOPTS) -ldflags="-s -w -X github.com/runabol/tork.GitCommit=$(GITCOMMIT)" -o $(BINARY) cmd/main.go 16 | 17 | .PHONY: clean 18 | clean: 19 | go clean 20 | rm -f tork 21 | 22 | .PHONY: generate-swagger 23 | generate-swagger: docs/swagger.json 24 | 25 | docs/swagger.json: *.go go.* $(wildcard */**/*.go) 26 | # Note: this command comes from https://github.com/swaggo/swag 27 | swag init --parseDependency -g internal/coordinator/api/api.go --output docs 28 | rm docs/docs.go 29 | rm docs/swagger.yaml 30 | -------------------------------------------------------------------------------- /Makefile.release: -------------------------------------------------------------------------------- 1 | # Makefile for releasing Tork 2 | # 3 | # The release is controlled from version.go. The version found there is 4 | # used to tag the git repo and to build the assets that are uploaded to GitHub. 5 | # 6 | # Steps to release: 7 | # 1. Update version.go. 8 | # 2. make -f Makefile.release release 9 | # 3. make -f Makefile.release github-push 10 | 11 | ifeq (, $(shell which curl)) 12 | $(error "No curl in $$PATH, please install") 13 | endif 14 | 15 | NAME:=tork 16 | VERSION:=$(shell grep 'Version' version.go | awk '{ print $$3 }' | head -n1 | tr -d '"') 17 | GITHUB:=runabol 18 | LINUX_ARCH:=amd64 arm64 19 | 20 | all: 21 | @echo Use the 'release' target to build a release 22 | 23 | release: build tar 24 | 25 | .PHONY: build 26 | build: 27 | @go version 28 | @echo Cleaning old builds 29 | @rm -rf .build && mkdir .build 30 | @echo Building: darwin/amd64 - $(VERSION) 31 | mkdir -p .build/darwin/amd64 && $(MAKE) tork BINARY=.build/darwin/amd64/$(NAME) SYSTEM="GOOS=darwin GOARCH=amd64" BUILDOPTS="" 32 | @echo Building: darwin/arm64 - $(VERSION) 33 | mkdir -p .build/darwin/arm64 && $(MAKE) tork BINARY=.build/darwin/arm64/$(NAME) SYSTEM="GOOS=darwin GOARCH=arm64" BUILDOPTS="" 34 | @echo Building: windows/amd64 - $(VERSION) 35 | mkdir -p .build/windows/amd64 && $(MAKE) tork BINARY=.build/windows/amd64/$(NAME).exe SYSTEM="GOOS=windows GOARCH=amd64" BUILDOPTS="" 36 | @echo Building: linux/$(LINUX_ARCH) - $(VERSION) ;\ 37 | for arch in $(LINUX_ARCH); do \ 38 | mkdir -p .build/linux/$$arch && $(MAKE) tork BINARY=.build/linux/$$arch/$(NAME) SYSTEM="GOOS=linux GOARCH=$$arch" BUILDOPTS="" ;\ 39 | done 40 | 41 | .PHONY: tar 42 | tar: 43 | @echo Cleaning old releases 44 | @rm -rf .release && mkdir .release 45 | gtar -zcf .release/$(NAME)_$(VERSION)_darwin_amd64.tgz -C .build/darwin/amd64 $(NAME) 46 | gtar -zcf .release/$(NAME)_$(VERSION)_darwin_arm64.tgz -C .build/darwin/arm64 $(NAME) 47 | gtar -zcf .release/$(NAME)_$(VERSION)_windows_amd64.tgz -C .build/windows/amd64 $(NAME).exe 48 | for arch in $(LINUX_ARCH); do \ 49 | gtar -zcf .release/$(NAME)_$(VERSION)_linux_$$arch.tgz -C .build/linux/$$arch $(NAME) ;\ 50 | done 51 | 52 | .PHONY: github-push 53 | github-push: 54 | ifeq ($(GITHUB_ACCESS_TOKEN),) 55 | $(error "Please set the GITHUB_ACCESS_TOKEN environment variable") 56 | else 57 | @echo Releasing: $(VERSION) 58 | @$(eval RELEASE:=$(shell curl -s -H "Authorization: token ${GITHUB_ACCESS_TOKEN}" "https://api.github.com/repos/$(GITHUB)/$(NAME)/releases" | jq '.[0]' | grep -m 1 '"id"' | tr -cd '[[:digit:]]')) 59 | @echo ReleaseID: $(RELEASE) 60 | @( cd .release; for asset in `ls -A *tgz`; do \ 61 | echo $$asset; \ 62 | curl -o /dev/null -X POST \ 63 | -H "Content-Type: application/gzip" \ 64 | -H "Authorization: token ${GITHUB_ACCESS_TOKEN}" \ 65 | --data-binary "@$$asset" \ 66 | "https://uploads.github.com/repos/$(GITHUB)/$(NAME)/releases/$(RELEASE)/assets?name=$${asset}" ; \ 67 | done ) 68 | @( cd .release; for asset in `ls -A *tgz`; do \ 69 | sha256sum $$asset > $$asset.sha256; \ 70 | done ) 71 | @( cd .release; for asset in `ls -A *sha256`; do \ 72 | echo $$asset; \ 73 | curl -o /dev/null -X POST \ 74 | -H "Content-Type: text/plain" \ 75 | -H "Authorization: token ${GITHUB_ACCESS_TOKEN}" \ 76 | --data-binary "@$$asset" \ 77 | "https://uploads.github.com/repos/$(GITHUB)/$(NAME)/releases/$(RELEASE)/assets?name=$${asset}" ; \ 78 | done ) 79 | endif 80 | 81 | .PHONY: version 82 | version: 83 | @echo $(VERSION) 84 | 85 | .PHONY: clean 86 | clean: 87 | rm -rf release 88 | rm -rf build -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |

2 | tork 3 |
4 |

5 | 6 |

7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 |

18 | 19 |

20 | Features • 21 | Installation • 22 | Documentation • 23 | Quick Start • 24 | REST API • 25 | Web UI 26 |

27 | 28 | Tork is a highly-scalable, general-purpose workflow engine. 29 | 30 | ## Features: 31 | 32 |

33 | tork 34 |
35 |

36 | 37 | - [REST API](https://www.tork.run/rest) 38 | - [Highly extensible](https://www.tork.run/extend) 39 | - Horizontally scalable 40 | - Task isolation - tasks are executed within a container to provide isolation, idempotency, and in order to enforce resource [limits](https://www.tork.run/tasks#limits) 41 | - Automatic recovery of tasks in the event of a worker crash 42 | - Supports both stand-alone and [distributed](https://www.tork.run/installation#running-in-a-distributed-mode) setup 43 | - [Retry failed tasks](https://www.tork.run/tasks#retry) 44 | - [Middleware](https://www.tork.run/extend#middleware) 45 | - Support for [Docker](https://www.tork.run/runtime#docker), [Podman](https://www.tork.run/runtime#podman) and [Shell](https://www.tork.run/runtime#shell) runtimes. 46 | - [Webhooks](https://www.tork.run/jobs#webhooks) 47 | - [Pre/Post tasks](https://www.tork.run/tasks#pre-post-tasks) 48 | - No single point of failure 49 | - Task timeout 50 | - [Full-text search](https://www.tork.run/rest#list-jobs) 51 | - [Expression Language](https://www.tork.run/tasks#expressions) 52 | - [Conditional Tasks](https://www.tork.run/tasks#expressions) 53 | - [Parallel Tasks](https://www.tork.run/tasks#parallel-task) 54 | - [For-Each Task](https://www.tork.run/tasks#each-task) 55 | - [Subjob Task](https://www.tork.run/tasks#sub-job-task) 56 | - [Task Priority](https://www.tork.run/tasks#priority) 57 | - [Secrets](https://www.tork.run/tasks#secrets) 58 | - [Scheduled Jobs](https://tork.run/jobs#scheduled-jobs) 59 | - [Web UI](https://www.tork.run/web-ui) 60 | 61 | ## Documentation 62 | 63 | See [tork.run](https://tork.run) for the full documentation. 64 | 65 | ## Quick Start 66 | 67 | See the [Quick Start Guide](https://www.tork.run/quick-start) for detailed instructions on how to get up and running with Tork. 68 | 69 | ## REST API 70 | 71 | See the [REST API](https://www.tork.run/rest) documentation. 72 | 73 | ## Web UI 74 | 75 | [Tork Web](https://www.tork.run/web-ui) is a web based tool for interacting with Tork. 76 | 77 | ![Web UI](docs/webui.png "Web UI") 78 | 79 | ## License 80 | 81 | Copyright (c) 2023-present Arik Cohen. Tork is free and open-source software licensed under the MIT License. 82 | -------------------------------------------------------------------------------- /broker/broker.go: -------------------------------------------------------------------------------- 1 | package broker 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/runabol/tork" 7 | ) 8 | 9 | type Provider func() (Broker, error) 10 | 11 | const ( 12 | BROKER_INMEMORY = "inmemory" 13 | BROKER_RABBITMQ = "rabbitmq" 14 | TOPIC_JOB = "job.*" 15 | TOPIC_JOB_COMPLETED = "job.completed" 16 | TOPIC_JOB_FAILED = "job.failed" 17 | TOPIC_SCHEDULED_JOB = "scheduled.job" 18 | ) 19 | 20 | // Broker is the message-queue, pub/sub mechanism used for delivering tasks. 21 | type Broker interface { 22 | PublishTask(ctx context.Context, qname string, t *tork.Task) error 23 | SubscribeForTasks(qname string, handler func(t *tork.Task) error) error 24 | 25 | PublishTaskProgress(ctx context.Context, t *tork.Task) error 26 | SubscribeForTaskProgress(handler func(t *tork.Task) error) error 27 | 28 | PublishHeartbeat(ctx context.Context, n *tork.Node) error 29 | SubscribeForHeartbeats(handler func(n *tork.Node) error) error 30 | 31 | PublishJob(ctx context.Context, j *tork.Job) error 32 | SubscribeForJobs(handler func(j *tork.Job) error) error 33 | 34 | PublishEvent(ctx context.Context, topic string, event any) error 35 | SubscribeForEvents(ctx context.Context, pattern string, handler func(event any)) error 36 | 37 | PublishTaskLogPart(ctx context.Context, p *tork.TaskLogPart) error 38 | SubscribeForTaskLogPart(handler func(p *tork.TaskLogPart)) error 39 | 40 | Queues(ctx context.Context) ([]QueueInfo, error) 41 | HealthCheck(ctx context.Context) error 42 | Shutdown(ctx context.Context) error 43 | } 44 | -------------------------------------------------------------------------------- /broker/log.go: -------------------------------------------------------------------------------- 1 | package broker 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | "github.com/rs/zerolog/log" 8 | "github.com/runabol/tork" 9 | ) 10 | 11 | type LogShipper struct { 12 | Broker Broker 13 | TaskID string 14 | part int 15 | q chan []byte 16 | } 17 | 18 | func NewLogShipper(broker Broker, taskID string) *LogShipper { 19 | f := &LogShipper{ 20 | Broker: broker, 21 | TaskID: taskID, 22 | q: make(chan []byte, 1000), 23 | } 24 | go f.startFlushTimer() 25 | return f 26 | } 27 | 28 | func (r *LogShipper) Write(p []byte) (int, error) { 29 | pc := make([]byte, len(p)) 30 | copy(pc, p) 31 | r.q <- pc 32 | return len(p), nil 33 | } 34 | 35 | func (r *LogShipper) startFlushTimer() { 36 | ticker := time.NewTicker(time.Second) 37 | buffer := make([]byte, 0) 38 | for { 39 | select { 40 | case p := <-r.q: 41 | buffer = append(buffer, p...) 42 | case <-ticker.C: 43 | if len(buffer) > 0 { 44 | r.part = r.part + 1 45 | if err := r.Broker.PublishTaskLogPart(context.Background(), &tork.TaskLogPart{ 46 | Number: r.part, 47 | TaskID: r.TaskID, 48 | Contents: string(buffer), 49 | }); err != nil { 50 | log.Error().Err(err).Msgf("error forwarding task log part") 51 | } 52 | buffer = buffer[:0] // clear buffer 53 | } 54 | } 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /broker/log_test.go: -------------------------------------------------------------------------------- 1 | package broker 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | "time" 7 | 8 | "github.com/runabol/tork" 9 | "github.com/stretchr/testify/assert" 10 | ) 11 | 12 | func TestForwardTimeout(t *testing.T) { 13 | b := NewInMemoryBroker() 14 | 15 | processed := make(chan any) 16 | err := b.SubscribeForTaskLogPart(func(p *tork.TaskLogPart) { 17 | assert.Equal(t, "hello\n", p.Contents) 18 | processed <- 1 19 | }) 20 | assert.NoError(t, err) 21 | 22 | fwd := NewLogShipper(b, "some-task-id") 23 | for i := 0; i < 1; i++ { 24 | _, err = fwd.Write([]byte("hello\n")) 25 | assert.NoError(t, err) 26 | <-time.After(time.Millisecond * 1100) 27 | } 28 | 29 | <-processed 30 | } 31 | 32 | func TestForwardBatch(t *testing.T) { 33 | b := NewInMemoryBroker() 34 | 35 | processed := make(chan any) 36 | err := b.SubscribeForTaskLogPart(func(p *tork.TaskLogPart) { 37 | assert.Equal(t, "hello 0\nhello 1\nhello 2\nhello 3\nhello 4\n", p.Contents) 38 | close(processed) 39 | }) 40 | assert.NoError(t, err) 41 | 42 | fwd := NewLogShipper(b, "some-task-id") 43 | 44 | for i := 0; i < 5; i++ { 45 | _, err = fmt.Fprintf(fwd, "hello %d\n", i) 46 | assert.NoError(t, err) 47 | } 48 | 49 | <-processed 50 | } 51 | 52 | func TestLogShipperWriteBufferFull(t *testing.T) { 53 | b := NewInMemoryBroker() 54 | err := b.SubscribeForTaskLogPart(func(p *tork.TaskLogPart) { 55 | 56 | }) 57 | assert.NoError(t, err) 58 | fwd := NewLogShipper(b, "some-task-id") 59 | for i := 0; i < 10_000; i++ { 60 | _, err := fwd.Write([]byte("some log message\n")) 61 | assert.NoError(t, err) 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /broker/queues.go: -------------------------------------------------------------------------------- 1 | package broker 2 | 3 | import ( 4 | "strings" 5 | 6 | "golang.org/x/exp/slices" 7 | ) 8 | 9 | const ( 10 | // The queue used by the API to insert new tasks into 11 | QUEUE_PENDING = "pending" 12 | // The queue used by workers to notify the coordinator 13 | // that a task has began processing 14 | QUEUE_STARTED = "started" 15 | // The queue used by workers to send tasks to when 16 | // a task completes successfully 17 | QUEUE_COMPLETED = "completed" 18 | // The queue used by workers to send tasks to when an error 19 | // occurs in processing 20 | QUEUE_ERROR = "error" 21 | // The default queue for tasks 22 | QUEUE_DEFAULT = "default" 23 | // The queue used by workers to periodically 24 | // notify the coordinator about their aliveness 25 | QUEUE_HEARTBEAT = "heartbeat" 26 | // The queue used by the Coordinator for job creation 27 | // and job-related state changes (e.g. cancellation) 28 | QUEUE_JOBS = "jobs" 29 | // The queue used by workers to send task 30 | // logs to the Coordinator 31 | QUEUE_LOGS = "logs" 32 | // The queue used by workers to send task 33 | // progress to the Coordinator 34 | QUEUE_PROGRESS = "progress" 35 | // The prefix used for queues that 36 | // are exclusive 37 | QUEUE_EXCLUSIVE_PREFIX = "x-" 38 | ) 39 | 40 | type QueueInfo struct { 41 | Name string `json:"name"` 42 | Size int `json:"size"` 43 | Subscribers int `json:"subscribers"` 44 | Unacked int `json:"unacked"` 45 | } 46 | 47 | func IsCoordinatorQueue(qname string) bool { 48 | coordQueues := []string{ 49 | QUEUE_PENDING, 50 | QUEUE_STARTED, 51 | QUEUE_COMPLETED, 52 | QUEUE_ERROR, 53 | QUEUE_HEARTBEAT, 54 | QUEUE_JOBS, 55 | QUEUE_LOGS, 56 | QUEUE_PROGRESS, 57 | } 58 | return slices.Contains(coordQueues, qname) 59 | } 60 | 61 | func IsWorkerQueue(qname string) bool { 62 | return !IsCoordinatorQueue(qname) 63 | } 64 | 65 | func IsTaskQueue(qname string) bool { 66 | return !IsCoordinatorQueue(qname) && !strings.HasPrefix(qname, QUEUE_EXCLUSIVE_PREFIX) 67 | } 68 | -------------------------------------------------------------------------------- /broker/queues_test.go: -------------------------------------------------------------------------------- 1 | package broker_test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/runabol/tork/broker" 7 | 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | func TestIsWorkerQueue(t *testing.T) { 12 | assert.Equal(t, true, broker.IsWorkerQueue("some-special-work-queues")) 13 | assert.Equal(t, true, broker.IsWorkerQueue(broker.QUEUE_DEFAULT)) 14 | assert.Equal(t, false, broker.IsWorkerQueue(broker.QUEUE_COMPLETED)) 15 | assert.Equal(t, false, broker.IsWorkerQueue(broker.QUEUE_ERROR)) 16 | assert.Equal(t, false, broker.IsWorkerQueue(broker.QUEUE_STARTED)) 17 | assert.Equal(t, false, broker.IsWorkerQueue(broker.QUEUE_PENDING)) 18 | } 19 | 20 | func TestIsCoordinatorQueue(t *testing.T) { 21 | assert.Equal(t, false, broker.IsCoordinatorQueue("some-special-work-queues")) 22 | assert.Equal(t, false, broker.IsCoordinatorQueue(broker.QUEUE_DEFAULT)) 23 | assert.Equal(t, true, broker.IsCoordinatorQueue(broker.QUEUE_COMPLETED)) 24 | assert.Equal(t, true, broker.IsCoordinatorQueue(broker.QUEUE_ERROR)) 25 | assert.Equal(t, true, broker.IsCoordinatorQueue(broker.QUEUE_STARTED)) 26 | assert.Equal(t, true, broker.IsCoordinatorQueue(broker.QUEUE_PENDING)) 27 | } 28 | -------------------------------------------------------------------------------- /cli/banner.go: -------------------------------------------------------------------------------- 1 | package cli 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/fatih/color" 7 | "github.com/rs/zerolog/log" 8 | "github.com/runabol/tork" 9 | "github.com/runabol/tork/conf" 10 | ) 11 | 12 | func displayBanner() { 13 | mode := conf.StringDefault("cli.banner.mode", "console") 14 | if mode == "off" { 15 | return 16 | } 17 | banner := color.WhiteString(fmt.Sprintf(` 18 | _______ _______ ______ ___ _ 19 | | || || _ | | | | | 20 | |_ _|| _ || | || | |_| | 21 | | | | | | || |_||_ | _| 22 | | | | |_| || __ || |_ 23 | | | | || | | || _ | 24 | |___| |_______||___| |_||___| |_| 25 | 26 | %s (%s) 27 | `, tork.Version, tork.GitCommit)) 28 | 29 | if mode == "console" { 30 | fmt.Println(banner) 31 | } else { 32 | log.Info().Msg(banner) 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /cli/cli.go: -------------------------------------------------------------------------------- 1 | package cli 2 | 3 | import ( 4 | "os" 5 | 6 | "github.com/runabol/tork/internal/logging" 7 | "github.com/runabol/tork/internal/reexec" 8 | ucli "github.com/urfave/cli/v2" 9 | ) 10 | 11 | type CLI struct { 12 | app *ucli.App 13 | } 14 | 15 | func New() *CLI { 16 | app := &ucli.App{ 17 | Name: "tork", 18 | Usage: "a distributed workflow engine", 19 | } 20 | c := &CLI{ 21 | app: app, 22 | } 23 | app.Before = c.before 24 | app.Commands = c.commands() 25 | return c 26 | } 27 | 28 | func (c *CLI) Run() error { 29 | if reexec.Init() { 30 | return nil 31 | } 32 | return c.app.Run(os.Args) 33 | } 34 | 35 | func (c *CLI) before(ctx *ucli.Context) error { 36 | displayBanner() 37 | 38 | if err := logging.SetupLogging(); err != nil { 39 | return err 40 | } 41 | return nil 42 | } 43 | 44 | func (c *CLI) commands() []*ucli.Command { 45 | return []*ucli.Command{ 46 | c.runCmd(), 47 | c.migrationCmd(), 48 | c.healthCmd(), 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /cli/health.go: -------------------------------------------------------------------------------- 1 | package cli 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "io" 7 | "net/http" 8 | 9 | "github.com/pkg/errors" 10 | "github.com/runabol/tork/conf" 11 | ucli "github.com/urfave/cli/v2" 12 | ) 13 | 14 | func (c *CLI) healthCmd() *ucli.Command { 15 | return &ucli.Command{ 16 | Name: "health", 17 | Usage: "Perform a health check", 18 | Action: health, 19 | } 20 | } 21 | 22 | func health(_ *ucli.Context) error { 23 | chk, err := http.Get(fmt.Sprintf("%s/health", conf.StringDefault("endpoint", "http://localhost:8000"))) 24 | if err != nil { 25 | return err 26 | } 27 | if chk.StatusCode != http.StatusOK { 28 | return errors.Errorf("Health check failed. Status Code: %d", chk.StatusCode) 29 | } 30 | body, err := io.ReadAll(chk.Body) 31 | if err != nil { 32 | return errors.Wrapf(err, "error reading body") 33 | } 34 | 35 | type resp struct { 36 | Status string `json:"status"` 37 | } 38 | r := resp{} 39 | 40 | if err := json.Unmarshal(body, &r); err != nil { 41 | return errors.Wrapf(err, "error unmarshalling body") 42 | } 43 | 44 | fmt.Printf("Status: %s\n", r.Status) 45 | 46 | return nil 47 | } 48 | -------------------------------------------------------------------------------- /cli/migrate.go: -------------------------------------------------------------------------------- 1 | package cli 2 | 3 | import ( 4 | "github.com/pkg/errors" 5 | "github.com/rs/zerolog/log" 6 | "github.com/runabol/tork/conf" 7 | "github.com/runabol/tork/datastore" 8 | "github.com/runabol/tork/datastore/postgres" 9 | schema "github.com/runabol/tork/db/postgres" 10 | ucli "github.com/urfave/cli/v2" 11 | ) 12 | 13 | func (c *CLI) migrationCmd() *ucli.Command { 14 | return &ucli.Command{ 15 | Name: "migration", 16 | Usage: "Run the db migration script", 17 | Action: migration, 18 | } 19 | } 20 | 21 | func migration(ctx *ucli.Context) error { 22 | dstype := conf.StringDefault("datastore.type", datastore.DATASTORE_POSTGRES) 23 | switch dstype { 24 | case datastore.DATASTORE_POSTGRES: 25 | dsn := conf.StringDefault( 26 | "datastore.postgres.dsn", 27 | "host=localhost user=tork password=tork dbname=tork port=5432 sslmode=disable", 28 | ) 29 | pg, err := postgres.NewPostgresDataStore(dsn) 30 | if err != nil { 31 | return err 32 | } 33 | if err := pg.ExecScript(schema.SCHEMA); err != nil { 34 | return errors.Wrapf(err, "error when trying to create db schema") 35 | } 36 | default: 37 | return errors.Errorf("can't perform db migration on: %s", dstype) 38 | } 39 | log.Info().Msg("migration completed!") 40 | return nil 41 | } 42 | -------------------------------------------------------------------------------- /cli/run.go: -------------------------------------------------------------------------------- 1 | package cli 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | 7 | "github.com/runabol/tork/engine" 8 | ucli "github.com/urfave/cli/v2" 9 | ) 10 | 11 | func (c *CLI) runCmd() *ucli.Command { 12 | return &ucli.Command{ 13 | Name: "run", 14 | Usage: "Run Tork", 15 | UsageText: "tork run mode (standalone|coordinator|worker)", 16 | Action: c.run, 17 | } 18 | } 19 | func (c *CLI) run(ctx *ucli.Context) error { 20 | mode := ctx.Args().First() 21 | if mode == "" { 22 | if err := ucli.ShowSubcommandHelp(ctx); err != nil { 23 | return err 24 | } 25 | fmt.Println("missing required argument: mode") 26 | os.Exit(1) 27 | } 28 | engine.SetMode(engine.Mode(mode)) 29 | if err := engine.Run(); err != nil { 30 | return err 31 | } 32 | return nil 33 | } 34 | -------------------------------------------------------------------------------- /cmd/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | 7 | "github.com/runabol/tork/cli" 8 | "github.com/runabol/tork/conf" 9 | ) 10 | 11 | func main() { 12 | if err := conf.LoadConfig(); err != nil { 13 | fmt.Println(err) 14 | os.Exit(1) 15 | } 16 | 17 | app := cli.New() 18 | 19 | if err := app.Run(); err != nil { 20 | fmt.Println(err) 21 | os.Exit(1) 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /conf/conf.go: -------------------------------------------------------------------------------- 1 | package conf 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "strings" 7 | "time" 8 | 9 | "github.com/knadh/koanf/parsers/toml" 10 | "github.com/knadh/koanf/providers/env" 11 | "github.com/knadh/koanf/providers/file" 12 | "github.com/knadh/koanf/v2" 13 | "github.com/pkg/errors" 14 | "github.com/rs/zerolog" 15 | "github.com/rs/zerolog/log" 16 | ) 17 | 18 | var konf = koanf.New(".") 19 | var logger = log.Output(zerolog.ConsoleWriter{Out: os.Stderr}) 20 | 21 | var defaultConfigPaths = []string{ 22 | "config.local.toml", 23 | "config.toml", 24 | "~/tork/config.toml", 25 | "/etc/tork/config.toml", 26 | } 27 | 28 | func LoadConfig() error { 29 | var paths []string 30 | userConfig := os.Getenv("TORK_CONFIG") 31 | if userConfig != "" { 32 | paths = []string{userConfig} 33 | } else { 34 | paths = defaultConfigPaths 35 | } 36 | // load configs from file paths 37 | var loaded bool 38 | for _, f := range paths { 39 | err := konf.Load(file.Provider(f), toml.Parser()) 40 | if errors.Is(err, os.ErrNotExist) { 41 | continue 42 | } 43 | if err != nil { 44 | return errors.Wrapf(err, "error loading config from %s", f) 45 | } 46 | logger.Info().Msgf("Config loaded from %s", f) 47 | loaded = true 48 | break 49 | } 50 | if !loaded && userConfig != "" { 51 | return errors.Errorf(fmt.Sprintf("could not find config file in: %s", userConfig)) 52 | } 53 | // load configs from env vars 54 | if err := konf.Load(env.Provider("TORK_", ".", func(s string) string { 55 | return strings.ReplaceAll(strings.ToLower(strings.TrimPrefix(s, "TORK_")), "_", ".") 56 | }), nil); err != nil { 57 | return errors.Wrapf(err, "error loading config from env") 58 | } 59 | return nil 60 | } 61 | 62 | func IntMap(key string) map[string]int { 63 | return konf.IntMap(key) 64 | } 65 | 66 | func Unmarshal(key string, o any) error { 67 | return konf.Unmarshal(key, o) 68 | } 69 | 70 | func BoolMap(key string) map[string]bool { 71 | return konf.BoolMap(key) 72 | } 73 | 74 | func StringMap(key string) map[string]string { 75 | return konf.StringMap(key) 76 | } 77 | 78 | func Strings(key string) []string { 79 | strs := konf.Strings(key) 80 | if len(strs) > 0 { 81 | return strs 82 | } 83 | str := konf.String(key) 84 | if str == "" { 85 | return []string{} 86 | } 87 | return strings.Split(str, ",") 88 | 89 | } 90 | 91 | func DurationDefault(key string, dv time.Duration) time.Duration { 92 | v := konf.Get(key) 93 | if v == nil { 94 | return dv 95 | } 96 | return konf.Duration(key) 97 | } 98 | 99 | func StringsDefault(key string, dv []string) []string { 100 | v := konf.Get(key) 101 | if v == nil { 102 | return dv 103 | } 104 | return Strings(key) 105 | } 106 | 107 | func IntDefault(key string, dv int) int { 108 | v := konf.Get(key) 109 | if v == nil { 110 | return dv 111 | } 112 | return konf.Int(key) 113 | } 114 | 115 | func String(key string) string { 116 | return konf.String(key) 117 | } 118 | 119 | func StringDefault(key, dv string) string { 120 | v := String(key) 121 | if v != "" { 122 | return v 123 | } 124 | return dv 125 | } 126 | 127 | func Bool(key string) bool { 128 | return konf.Bool(key) 129 | } 130 | 131 | func BoolDefault(key string, dv bool) bool { 132 | v := konf.Get(key) 133 | if v == nil { 134 | return dv 135 | } 136 | return Bool(key) 137 | } 138 | -------------------------------------------------------------------------------- /configs/config.ci.toml: -------------------------------------------------------------------------------- 1 | [datastore] 2 | type = "postgres" 3 | 4 | [datastore.postgres] 5 | dsn = "host=localhost user=tork password=tork dbname=tork port=5432 sslmode=disable" 6 | -------------------------------------------------------------------------------- /configs/config.compose.toml: -------------------------------------------------------------------------------- 1 | [datastore] 2 | type = "postgres" 3 | 4 | [datastore.postgres] 5 | dsn = "host=postgres user=tork password=tork dbname=tork port=5432 sslmode=disable" 6 | -------------------------------------------------------------------------------- /configs/config.distributed.toml: -------------------------------------------------------------------------------- 1 | [broker] 2 | type = "rabbitmq" 3 | 4 | [broker.rabbitmq] 5 | url = "amqp://guest:guest@localhost:5672/" 6 | 7 | [datastore] 8 | type = "postgres" 9 | 10 | [datastore.postgres] 11 | dsn = "host=localhost user=tork password=tork dbname=tork port=5432 sslmode=disable" 12 | -------------------------------------------------------------------------------- /configs/sample.config.toml: -------------------------------------------------------------------------------- 1 | [cli] 2 | banner.mode = "console" # off | console | log 3 | 4 | [client] 5 | endpoint = "http://localhost:8000" 6 | 7 | [logging] 8 | level = "debug" # debug | info | warn | error 9 | format = "pretty" # pretty | json 10 | 11 | [broker] 12 | type = "inmemory" # inmemory | rabbitmq 13 | 14 | [broker.rabbitmq] 15 | url = "amqp://guest:guest@localhost:5672/" 16 | consumer.timeout = "30m" 17 | management.url = "" # default: http://{rabbit_host}:15672/ 18 | durable.queues = false 19 | 20 | [datastore] 21 | type = "postgres" 22 | 23 | [datastore.retention] 24 | logs.duration = "168h" # 1 week 25 | jobs.duration = "8760h" # 1 year 26 | 27 | [datastore.postgres] 28 | dsn = "host=localhost user=tork password=tork dbname=tork port=5432 sslmode=disable" 29 | 30 | [coordinator] 31 | address = "localhost:8000" 32 | name = "Coordinator" 33 | 34 | [coordinator.api] 35 | endpoints.health = true # turn on|off the /health endpoint 36 | endpoints.jobs = true # turn on|off the /jobs endpoints 37 | endpoints.tasks = true # turn on|off the /tasks endpoints 38 | endpoints.nodes = true # turn on|off the /nodes endpoint 39 | endpoints.queues = true # turn on|off the /queues endpoint 40 | endpoints.metrics = true # turn on|off the /metrics endpoint 41 | endpoints.users = true # turn on|off the /users endpoints 42 | 43 | [coordinator.queues] 44 | completed = 1 # completed queue consumers 45 | error = 1 # error queue consumers 46 | pending = 1 # pending queue consumers 47 | started = 1 # started queue consumers 48 | heartbeat = 1 # heartbeat queue consumers 49 | jobs = 1 # jobs queue consumers 50 | 51 | # cors middleware 52 | [middleware.web.cors] 53 | enabled = false 54 | origins = "*" 55 | methods = "*" 56 | credentials = false 57 | headers = "*" 58 | 59 | # basic auth middleware 60 | [middleware.web.basicauth] 61 | enabled = false 62 | 63 | [middleware.web.keyauth] 64 | enabled = false 65 | key = "" # if left blank, it will auto-generate a key and print it to the logs on startup 66 | 67 | [middleware.web] 68 | bodylimit = "500K" # Limit can be specified as 4x, where x is one of the multiple from K, M, G, T or P. 69 | 70 | 71 | # rate limiter middleware 72 | [middleware.web.ratelimit] 73 | enabled = false 74 | rps = 20 # requests per second per IP 75 | 76 | # request logging 77 | [middleware.web.logger] 78 | enabled = true 79 | level = "DEBUG" # TRACE|DEBUG|INFO|WARN|ERROR 80 | skip = ["GET /health"] # supports wildcards (*) 81 | 82 | [middleware.job.redact] 83 | enabled = false 84 | 85 | [middleware.task.hostenv] 86 | vars = [ 87 | ] # list of host env vars to inject into tasks, supports aliases (e.g. SOME_HOST_VAR:OTHER_VAR) 88 | 89 | 90 | [worker] 91 | address = "localhost:8001" 92 | name = "Worker" 93 | 94 | [worker.queues] 95 | default = 1 # numbers of concurrent subscribers 96 | 97 | # default task limits 98 | [worker.limits] 99 | cpus = "" # supports fractions 100 | memory = "" # e.g. 100m 101 | timeout = "" # e.g. 3h 102 | 103 | 104 | [mounts.bind] 105 | allowed = false 106 | sources = [ 107 | ] # a list of paths that are allowed as mount sources. if empty all sources are allowed. 108 | 109 | [mounts.temp] 110 | dir = "/tmp" 111 | 112 | [runtime] 113 | type = "docker" # docker | shell 114 | 115 | [runtime.shell] 116 | cmd = ["bash", "-c"] # the shell command used to execute the run script 117 | uid = "" # set the uid for the the task process (recommended) 118 | gid = "" # set the gid for the the task process (recommended) 119 | 120 | [runtime.docker] 121 | config = "" 122 | privileged = false # run containers in privileged mode (not recommended) 123 | image.ttl = "24h" # Time-to-live for cached images since their last use 124 | 125 | [runtime.podman] 126 | privileged = false # run containers in privileged mode (not recommended) -------------------------------------------------------------------------------- /datastore/datastore.go: -------------------------------------------------------------------------------- 1 | package datastore 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/pkg/errors" 7 | "github.com/runabol/tork" 8 | ) 9 | 10 | type Provider func() (Datastore, error) 11 | 12 | var ( 13 | ErrTaskNotFound = errors.New("task not found") 14 | ErrNodeNotFound = errors.New("node not found") 15 | ErrJobNotFound = errors.New("job not found") 16 | ErrScheduledJobNotFound = errors.New("scheduled job not found") 17 | ErrUserNotFound = errors.New("user not found") 18 | ErrRoleNotFound = errors.New("role not found") 19 | ErrContextNotFound = errors.New("context not found") 20 | ) 21 | 22 | const ( 23 | DATASTORE_POSTGRES = "postgres" 24 | ) 25 | 26 | type Datastore interface { 27 | CreateTask(ctx context.Context, t *tork.Task) error 28 | UpdateTask(ctx context.Context, id string, modify func(u *tork.Task) error) error 29 | GetTaskByID(ctx context.Context, id string) (*tork.Task, error) 30 | GetActiveTasks(ctx context.Context, jobID string) ([]*tork.Task, error) 31 | GetNextTask(ctx context.Context, parentTaskID string) (*tork.Task, error) 32 | CreateTaskLogPart(ctx context.Context, p *tork.TaskLogPart) error 33 | GetTaskLogParts(ctx context.Context, taskID, q string, page, size int) (*Page[*tork.TaskLogPart], error) 34 | 35 | CreateNode(ctx context.Context, n *tork.Node) error 36 | UpdateNode(ctx context.Context, id string, modify func(u *tork.Node) error) error 37 | GetNodeByID(ctx context.Context, id string) (*tork.Node, error) 38 | GetActiveNodes(ctx context.Context) ([]*tork.Node, error) 39 | 40 | CreateJob(ctx context.Context, j *tork.Job) error 41 | UpdateJob(ctx context.Context, id string, modify func(u *tork.Job) error) error 42 | GetJobByID(ctx context.Context, id string) (*tork.Job, error) 43 | GetJobLogParts(ctx context.Context, jobID, q string, page, size int) (*Page[*tork.TaskLogPart], error) 44 | GetJobs(ctx context.Context, currentUser, q string, page, size int) (*Page[*tork.JobSummary], error) 45 | 46 | CreateScheduledJob(ctx context.Context, s *tork.ScheduledJob) error 47 | GetActiveScheduledJobs(ctx context.Context) ([]*tork.ScheduledJob, error) 48 | GetScheduledJobs(ctx context.Context, currentUser string, page, size int) (*Page[*tork.ScheduledJobSummary], error) 49 | GetScheduledJobByID(ctx context.Context, id string) (*tork.ScheduledJob, error) 50 | UpdateScheduledJob(ctx context.Context, id string, modify func(u *tork.ScheduledJob) error) error 51 | DeleteScheduledJob(ctx context.Context, id string) error 52 | 53 | CreateUser(ctx context.Context, u *tork.User) error 54 | GetUser(ctx context.Context, username string) (*tork.User, error) 55 | 56 | CreateRole(ctx context.Context, r *tork.Role) error 57 | GetRole(ctx context.Context, id string) (*tork.Role, error) 58 | GetRoles(ctx context.Context) ([]*tork.Role, error) 59 | GetUserRoles(ctx context.Context, userID string) ([]*tork.Role, error) 60 | AssignRole(ctx context.Context, userID, roleID string) error 61 | UnassignRole(ctx context.Context, userID, roleID string) error 62 | 63 | GetMetrics(ctx context.Context) (*tork.Metrics, error) 64 | 65 | WithTx(ctx context.Context, f func(tx Datastore) error) error 66 | 67 | HealthCheck(ctx context.Context) error 68 | } 69 | 70 | type Page[T any] struct { 71 | Items []T `json:"items"` 72 | Number int `json:"number"` 73 | Size int `json:"size"` 74 | TotalPages int `json:"totalPages"` 75 | TotalItems int `json:"totalItems"` 76 | } 77 | -------------------------------------------------------------------------------- /docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "3.1" 2 | services: 3 | postgres: 4 | image: postgres:15.3 5 | restart: always 6 | ports: 7 | - 5432:5432 8 | environment: 9 | POSTGRES_PASSWORD: tork 10 | POSTGRES_USER: tork 11 | POSTGRES_DB: tork 12 | rabbitmq: 13 | image: rabbitmq:3-management 14 | restart: always 15 | ports: 16 | - 5672:5672 17 | - 15672:15672 18 | swagger: 19 | image: swaggerapi/swagger-ui 20 | restart: always 21 | ports: 22 | - 8200:8080 23 | environment: 24 | SWAGGER_JSON: /code/docs/swagger.json 25 | volumes: 26 | - .:/code 27 | registry: 28 | image: registry:2 29 | restart: always 30 | ports: 31 | - 5001:5000 32 | web: 33 | image: runabol/tork-web 34 | platform: linux/amd64 35 | restart: always 36 | ports: 37 | - 8100:8100 38 | extra_hosts: 39 | - "host.docker.internal:host-gateway" 40 | environment: 41 | BACKEND_URL: "http://host.docker.internal:8000" 42 | migration: 43 | image: golang:1.23 44 | working_dir: /code 45 | environment: 46 | - TORK_CONFIG=configs/config.compose.toml 47 | command: | 48 | go run cmd/main.go migration 49 | volumes: 50 | - .:/code 51 | depends_on: 52 | - postgres 53 | -------------------------------------------------------------------------------- /docs/cli_v3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/runabol/tork/1c5299c29b2f233f02c03cb6320382ded23cf6f1/docs/cli_v3.jpg -------------------------------------------------------------------------------- /docs/logo.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docs/webui.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/runabol/tork/1c5299c29b2f233f02c03cb6320382ded23cf6f1/docs/webui.png -------------------------------------------------------------------------------- /engine/broker_test.go: -------------------------------------------------------------------------------- 1 | package engine 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/runabol/tork/broker" 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func Test_createBroker(t *testing.T) { 11 | eng := New(Config{Mode: ModeStandalone}) 12 | assert.Equal(t, StateIdle, eng.state) 13 | b, err := eng.createBroker(broker.BROKER_INMEMORY) 14 | assert.NoError(t, err) 15 | assert.IsType(t, &broker.InMemoryBroker{}, b) 16 | } 17 | 18 | func Test_createBrokerProvider(t *testing.T) { 19 | eng := New(Config{Mode: ModeStandalone}) 20 | assert.Equal(t, StateIdle, eng.state) 21 | eng.RegisterBrokerProvider("inmem2", func() (broker.Broker, error) { 22 | return broker.NewInMemoryBroker(), nil 23 | }) 24 | br, err := eng.createBroker("inmem2") 25 | assert.NoError(t, err) 26 | assert.IsType(t, &broker.InMemoryBroker{}, br) 27 | } 28 | -------------------------------------------------------------------------------- /engine/coordinator_test.go: -------------------------------------------------------------------------------- 1 | package engine 2 | 3 | import ( 4 | "bytes" 5 | "net/http" 6 | "net/http/httptest" 7 | "testing" 8 | 9 | "github.com/labstack/echo/v4" 10 | "github.com/rs/zerolog/log" 11 | "github.com/stretchr/testify/assert" 12 | ) 13 | 14 | func TestCORS(t *testing.T) { 15 | mw := cors() 16 | req, err := http.NewRequest("GET", "/health", nil) 17 | assert.NoError(t, err) 18 | w := httptest.NewRecorder() 19 | ctx := echo.New().NewContext(req, w) 20 | h := func(c echo.Context) error { 21 | return nil 22 | } 23 | x := mw(h) 24 | err = x(ctx) 25 | assert.NoError(t, err) 26 | assert.Equal(t, "Origin", w.Header().Get("Vary")) 27 | } 28 | 29 | func TestLogger(t *testing.T) { 30 | oldLogger := log.Logger 31 | var buf bytes.Buffer 32 | log.Logger = log.Logger.Output(&buf) 33 | defer func() { 34 | log.Logger = oldLogger 35 | }() 36 | mw := logger() 37 | req, err := http.NewRequest("GET", "/jobs", nil) 38 | assert.NoError(t, err) 39 | w := httptest.NewRecorder() 40 | ctx := echo.New().NewContext(req, w) 41 | h := func(c echo.Context) error { 42 | return nil 43 | } 44 | x := mw(h) 45 | err = x(ctx) 46 | assert.NoError(t, err) 47 | assert.NotEmpty(t, buf.String()) 48 | 49 | buf = bytes.Buffer{} 50 | log.Logger = log.Logger.Output(&buf) 51 | 52 | req, err = http.NewRequest("GET", "/health", nil) 53 | assert.NoError(t, err) 54 | w = httptest.NewRecorder() 55 | ctx = echo.New().NewContext(req, w) 56 | 57 | x = mw(h) 58 | err = x(ctx) 59 | assert.NoError(t, err) 60 | assert.Empty(t, buf.String()) 61 | } 62 | -------------------------------------------------------------------------------- /engine/datastore_test.go: -------------------------------------------------------------------------------- 1 | package engine 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/runabol/tork/datastore" 7 | "github.com/runabol/tork/datastore/postgres" 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | func Test_createDatastore(t *testing.T) { 12 | eng := New(Config{Mode: ModeStandalone}) 13 | assert.Equal(t, StateIdle, eng.state) 14 | ds, err := eng.createDatastore(datastore.DATASTORE_POSTGRES) 15 | assert.NoError(t, err) 16 | assert.IsType(t, &postgres.PostgresDatastore{}, ds) 17 | dsp, ok := ds.(*postgres.PostgresDatastore) 18 | assert.True(t, ok) 19 | assert.NoError(t, dsp.Close()) 20 | } 21 | 22 | func Test_createDatastoreProvider(t *testing.T) { 23 | eng := New(Config{Mode: ModeStandalone}) 24 | assert.Equal(t, StateIdle, eng.state) 25 | 26 | ds, err := postgres.NewTestDatastore() 27 | assert.NoError(t, err) 28 | 29 | eng.RegisterDatastoreProvider("inmem2", func() (datastore.Datastore, error) { 30 | return ds, nil 31 | }) 32 | 33 | ds2, err := eng.createDatastore("inmem2") 34 | assert.NoError(t, err) 35 | assert.IsType(t, &postgres.PostgresDatastore{}, ds2) 36 | assert.NoError(t, ds.Close()) 37 | } 38 | -------------------------------------------------------------------------------- /engine/default.go: -------------------------------------------------------------------------------- 1 | package engine 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/runabol/tork" 7 | "github.com/runabol/tork/broker" 8 | "github.com/runabol/tork/datastore" 9 | "github.com/runabol/tork/input" 10 | "github.com/runabol/tork/middleware/job" 11 | "github.com/runabol/tork/middleware/node" 12 | "github.com/runabol/tork/middleware/task" 13 | "github.com/runabol/tork/middleware/web" 14 | "github.com/runabol/tork/runtime" 15 | ) 16 | 17 | var defaultEngine *Engine = New(Config{}) 18 | 19 | func RegisterWebMiddleware(mw web.MiddlewareFunc) { 20 | defaultEngine.RegisterWebMiddleware(mw) 21 | } 22 | 23 | func RegisterTaskMiddleware(mw task.MiddlewareFunc) { 24 | defaultEngine.RegisterTaskMiddleware(mw) 25 | } 26 | 27 | func RegisterJobMiddleware(mw job.MiddlewareFunc) { 28 | defaultEngine.RegisterJobMiddleware(mw) 29 | } 30 | 31 | func RegisterNodeMiddleware(mw node.MiddlewareFunc) { 32 | defaultEngine.RegisterNodeMiddleware(mw) 33 | } 34 | 35 | func RegisterMounter(runtime, name string, mounter runtime.Mounter) { 36 | defaultEngine.RegisterMounter(runtime, name, mounter) 37 | } 38 | 39 | func RegisterRuntime(rt runtime.Runtime) { 40 | defaultEngine.RegisterRuntime(rt) 41 | } 42 | 43 | func RegisterDatastoreProvider(name string, provider datastore.Provider) { 44 | defaultEngine.RegisterDatastoreProvider(name, provider) 45 | } 46 | 47 | func RegisterBrokerProvider(name string, provider broker.Provider) { 48 | defaultEngine.RegisterBrokerProvider(name, provider) 49 | } 50 | 51 | func RegisterEndpoint(method, path string, handler web.HandlerFunc) { 52 | defaultEngine.RegisterEndpoint(method, path, handler) 53 | } 54 | 55 | func SubmitJob(ctx context.Context, ij *input.Job, listeners ...JobListener) (*tork.Job, error) { 56 | return defaultEngine.SubmitJob(ctx, ij, listeners...) 57 | } 58 | 59 | func Broker() broker.Broker { 60 | return defaultEngine.Broker() 61 | } 62 | 63 | func Datastore() datastore.Datastore { 64 | return defaultEngine.Datastore() 65 | } 66 | 67 | func Start() error { 68 | return defaultEngine.Start() 69 | } 70 | 71 | func Terminate() error { 72 | return defaultEngine.Terminate() 73 | } 74 | 75 | func SetMode(mode Mode) { 76 | defaultEngine.SetMode(mode) 77 | } 78 | 79 | func Run() error { 80 | return defaultEngine.Run() 81 | } 82 | -------------------------------------------------------------------------------- /engine/default_test.go: -------------------------------------------------------------------------------- 1 | package engine 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestDefaultRunStandalone(t *testing.T) { 10 | SetMode(ModeStandalone) 11 | 12 | assert.Equal(t, StateIdle, defaultEngine.state) 13 | err := Start() 14 | 15 | assert.NoError(t, err) 16 | assert.Equal(t, StateRunning, defaultEngine.state) 17 | 18 | err = Terminate() 19 | assert.NoError(t, err) 20 | assert.Equal(t, StateTerminated, defaultEngine.state) 21 | } 22 | -------------------------------------------------------------------------------- /engine/locker.go: -------------------------------------------------------------------------------- 1 | package engine 2 | 3 | import ( 4 | "github.com/pkg/errors" 5 | "github.com/runabol/tork/conf" 6 | "github.com/runabol/tork/locker" 7 | ) 8 | 9 | func (e *Engine) initLocker() error { 10 | ltype := conf.StringDefault("locker.type", conf.StringDefault("datastore.type", locker.LOCKER_INMEMORY)) 11 | locker, err := e.createLocker(ltype) 12 | if err != nil { 13 | return err 14 | } 15 | e.locker = locker 16 | return nil 17 | } 18 | 19 | func (e *Engine) createLocker(ltype string) (locker.Locker, error) { 20 | switch ltype { 21 | case locker.LOCKER_INMEMORY: 22 | return locker.NewInMemoryLocker(), nil 23 | case locker.LOCKER_POSTGRES: 24 | dsn := conf.StringDefault( 25 | "locker.postgres.dsn", 26 | conf.StringDefault("datastore.postgres.dsn", "host=localhost user=tork password=tork dbname=tork port=5432 sslmode=disable"), 27 | ) 28 | return locker.NewPostgresLocker(dsn) 29 | default: 30 | return nil, errors.Errorf("unknown locker type: %s", ltype) 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /engine/worker.go: -------------------------------------------------------------------------------- 1 | package engine 2 | 3 | import ( 4 | "github.com/pkg/errors" 5 | "github.com/runabol/tork/conf" 6 | "github.com/runabol/tork/internal/worker" 7 | "github.com/runabol/tork/middleware/task" 8 | 9 | "github.com/runabol/tork/runtime" 10 | "github.com/runabol/tork/runtime/docker" 11 | "github.com/runabol/tork/runtime/podman" 12 | "github.com/runabol/tork/runtime/shell" 13 | ) 14 | 15 | func (e *Engine) initWorker() error { 16 | // init the runtime 17 | rt, err := e.initRuntime() 18 | if err != nil { 19 | return err 20 | } 21 | // register host env middleware 22 | hostenv, err := task.NewHostEnv(conf.Strings("middleware.task.hostenv.vars")...) 23 | if err != nil { 24 | return err 25 | } 26 | e.cfg.Middleware.Task = append(e.cfg.Middleware.Task, hostenv.Execute) 27 | w, err := worker.NewWorker(worker.Config{ 28 | Name: conf.StringDefault("worker.name", "Worker"), 29 | Broker: e.brokerRef, 30 | Runtime: rt, 31 | Queues: conf.IntMap("worker.queues"), 32 | Limits: worker.Limits{ 33 | DefaultCPUsLimit: conf.String("worker.limits.cpus"), 34 | DefaultMemoryLimit: conf.String("worker.limits.memory"), 35 | DefaultTimeout: conf.String("worker.limits.timeout"), 36 | }, 37 | Address: conf.String("worker.address"), 38 | Middleware: e.cfg.Middleware.Task, 39 | }) 40 | if err != nil { 41 | return errors.Wrapf(err, "error creating worker") 42 | } 43 | if err := w.Start(); err != nil { 44 | return err 45 | } 46 | e.worker = w 47 | return nil 48 | } 49 | 50 | func (e *Engine) initRuntime() (runtime.Runtime, error) { 51 | if e.runtime != nil { 52 | return e.runtime, nil 53 | } 54 | runtimeType := conf.StringDefault("runtime.type", runtime.Docker) 55 | switch runtimeType { 56 | case runtime.Docker: 57 | mounter, ok := e.mounters[runtime.Docker] 58 | if !ok { 59 | mounter = runtime.NewMultiMounter() 60 | } 61 | // register bind mounter 62 | bm := docker.NewBindMounter(docker.BindConfig{ 63 | Allowed: conf.Bool("mounts.bind.allowed"), 64 | Sources: conf.Strings("mounts.bind.sources"), 65 | }) 66 | mounter.RegisterMounter("bind", bm) 67 | // register volume mounter 68 | vm, err := docker.NewVolumeMounter() 69 | if err != nil { 70 | return nil, err 71 | } 72 | mounter.RegisterMounter("volume", vm) 73 | // register tmpfs mounter 74 | mounter.RegisterMounter("tmpfs", docker.NewTmpfsMounter()) 75 | return docker.NewDockerRuntime( 76 | docker.WithMounter(mounter), 77 | docker.WithConfig(conf.String("runtime.docker.config")), 78 | docker.WithBroker(e.brokerRef), 79 | docker.WithPrivileged(conf.Bool("runtime.docker.privileged")), 80 | docker.WithImageTTL(conf.DurationDefault("runtime.docker.image.ttl", docker.DefaultImageTTL)), 81 | ) 82 | case runtime.Shell: 83 | return shell.NewShellRuntime(shell.Config{ 84 | CMD: conf.Strings("runtime.shell.cmd"), 85 | UID: conf.StringDefault("runtime.shell.uid", shell.DEFAULT_UID), 86 | GID: conf.StringDefault("runtime.shell.gid", shell.DEFAULT_GID), 87 | Broker: e.brokerRef, 88 | }), nil 89 | case runtime.Podman: 90 | mounter, ok := e.mounters[runtime.Podman] 91 | if !ok { 92 | mounter = runtime.NewMultiMounter() 93 | } 94 | // register bind mounter 95 | bm := docker.NewBindMounter(docker.BindConfig{ 96 | Allowed: conf.Bool("mounts.bind.allowed"), 97 | Sources: conf.Strings("mounts.bind.sources"), 98 | }) 99 | mounter.RegisterMounter("bind", bm) 100 | mounter.RegisterMounter("volume", podman.NewVolumeMounter()) 101 | return podman.NewPodmanRuntime( 102 | podman.WithBroker(e.brokerRef), 103 | podman.WithMounter(mounter), 104 | podman.WithPrivileged(conf.Bool("runtime.podman.privileged")), 105 | ), nil 106 | default: 107 | return nil, errors.Errorf("unknown runtime type: %s", runtimeType) 108 | } 109 | } 110 | -------------------------------------------------------------------------------- /examples/aws_create_master.yaml: -------------------------------------------------------------------------------- 1 | name: create test content master 2 | tasks: 3 | - name: stitch the chunks into a single video 4 | run: | 5 | mkdir /tmp/chunks 6 | wget "http://ftp.nluug.nl/pub/graphics/blender/demo/movies/ToS/tears_of_steel_1080p.mov" -O /tmp/chunks/chunk0.mov 7 | for i in 1 2 3 4 5; do 8 | cp /tmp/chunks/chunk0.mov "/tmp/chunks/chunk$i.mov" 9 | done 10 | for filename in /tmp/chunks/*.mov; do 11 | echo "file $filename" >> /tmp/chunks/chunks.txt 12 | done 13 | ffmpeg -f concat -safe 0 -i /tmp/chunks/chunks.txt -c:v copy -c:a copy /tmp/master.mov 14 | image: jrottenberg/ffmpeg:3.4-alpine 15 | post: 16 | - name: upload the final video to minio 17 | run: aws s3 cp /tmp/master.mov s3://$BUCKET_NAME/master.mov 18 | image: amazon/aws-cli:2.13.10 19 | env: 20 | BUCKET_NAME: my-bucket 21 | mounts: 22 | - type: volume 23 | target: /tmp 24 | -------------------------------------------------------------------------------- /examples/each.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: sample each job 3 | tasks: 4 | - name: hello task 5 | image: ubuntu:mantic 6 | run: echo start of job 7 | 8 | - name: sample each task 9 | each: 10 | list: "{{ sequence(1,5) }}" 11 | task: 12 | name: output task item 13 | var: eachTask{{item.index}} 14 | image: ubuntu:mantic 15 | env: 16 | ITEM: "{{item.value}}" 17 | run: echo -n $ITEM > $TORK_OUTPUT 18 | 19 | - name: sample each task with custom var 20 | each: 21 | list: "{{ sequence(1,5) }}" 22 | var: "myitem" 23 | task: 24 | name: output task item 25 | var: eachTask{{myitem.index}} 26 | image: ubuntu:mantic 27 | env: 28 | ITEM: "{{myitem.value}}" 29 | run: echo -n $ITEM > $TORK_OUTPUT 30 | 31 | - name: bye task 32 | image: ubuntu:mantic 33 | run: echo end of job 34 | -------------------------------------------------------------------------------- /examples/hello.yaml: -------------------------------------------------------------------------------- 1 | name: hello world 2 | output: "{{ tasks.hello }}" 3 | tasks: 4 | - var: hello 5 | name: simple task 6 | image: ubuntu:mantic 7 | run: echo -n hello world > $TORK_OUTPUT -------------------------------------------------------------------------------- /examples/job_defaults.yaml: -------------------------------------------------------------------------------- 1 | name: job defaults example 2 | 3 | defaults: 4 | limits: 5 | cpu: .5 6 | 7 | tasks: 8 | 9 | - name: sleep a little 10 | image: alpine:3.18.3 11 | run: sleep 0.1 12 | 13 | - name: sleep too much 14 | image: alpine:3.18.3 15 | run: sleep 2 -------------------------------------------------------------------------------- /examples/job_output.yaml: -------------------------------------------------------------------------------- 1 | name: job output example 2 | output: "hello {{ tasks.randomNumber }}" 3 | tasks: 4 | - var: randomNumber 5 | name: generate a random numbe 6 | image: ubuntu:mantic 7 | run: echo -n "$(shuf -i 1-10000 -n1)" > $TORK_OUTPUT -------------------------------------------------------------------------------- /examples/parallel.yaml: -------------------------------------------------------------------------------- 1 | name: sample parallel job 2 | tasks: 3 | - name: hello task 4 | image: ubuntu:mantic 5 | run: echo start of job 6 | 7 | - name: a parallel task 8 | parallel: 9 | tasks: 10 | - name: sleep for .1 seconds 11 | image: ubuntu:mantic 12 | run: sleep 0.1 13 | - name: sleep for .5 second 14 | image: ubuntu:mantic 15 | run: sleep 0.5 16 | - name: sleep for .15 seconds 17 | image: ubuntu:mantic 18 | run: sleep 0.15 19 | - name: fast task 1 20 | image: ubuntu:mantic 21 | run: echo fast 22 | - name: fast task 2 23 | image: ubuntu:mantic 24 | run: echo fast 25 | - name: fast task 3 26 | image: ubuntu:mantic 27 | run: echo fast 28 | 29 | - name: bye task 30 | image: ubuntu:mantic 31 | run: echo end of job 32 | -------------------------------------------------------------------------------- /examples/prepost.yaml: -------------------------------------------------------------------------------- 1 | name: pre- and post- tasks examples 2 | tasks: 3 | - name: simple task 4 | run: cat /xyz/pre > $TORK_OUTPUT 5 | image: ubuntu:mantic 6 | pre: 7 | - name: simple task 8 | run: echo im a pre task > /xyz/pre 9 | image: ubuntu:mantic 10 | post: 11 | - name: simple task 12 | run: echo im a post task > /xyz/post 13 | image: ubuntu:mantic 14 | mounts: 15 | - type: volume 16 | target: /xyz 17 | -------------------------------------------------------------------------------- /examples/resize_image.yaml: -------------------------------------------------------------------------------- 1 | name: Resizing image demo 2 | description: | 3 | This job takes an input source image and resizes it to multiple outputs. 4 | 5 | This demo assumes a locally running instace of Minio (AWS S3-like service) which will be used to 6 | store the outputs at. 7 | 8 | You can get a running instance of minio using the following command: 9 | 10 | docker network create minio 11 | 12 | docker run --name=minio --network=minio -d -p 9000:9000 -p 9001:9001 minio/minio server /data --console-address ":9001" 13 | 14 | You'll like have to change the endpointURL below to the IP address of your minio server 15 | 16 | The default credentials for a Minio Server are minioadmin/minioadmin 17 | inputs: 18 | accessKeyID: minioadmin # the default minio username 19 | secretKeyID: minioadmin # the default minio password 20 | endpointURL: http://minio:9000 21 | source: https://upload.wikimedia.org/wikipedia/commons/c/ca/Bbb-splash.png # or some other image 22 | target: s3://images 23 | tasks: 24 | - name: Extract the filename extension of the source 25 | var: fileExt 26 | image: alpine:3.18.3 27 | env: 28 | SOURCE: '{{ inputs.source }}' 29 | run: | 30 | FILENAME=$(basename -- "$SOURCE") 31 | EXT="${FILENAME##*.}" 32 | echo -n $EXT > $TORK_OUTPUT 33 | 34 | - name: Convert the image to various resolutions 35 | each: 36 | list: "{{ ['1920x1080','1366x768','1280x720','768x1024','100x100','200x200'] }}" 37 | task: 38 | name: 'Scale the image to {{ item.value }}' 39 | mounts: 40 | - type: volume 41 | target: /workdir 42 | networks: 43 | - minio 44 | image: dpokidov/imagemagick 45 | env: 46 | EXT: '{{ tasks.fileExt }}' 47 | SIZE: '{{ item.value }}' 48 | run: | 49 | mkdir /workdir/targets 50 | convert "/workdir/source.$EXT" -resize $SIZE "/workdir/targets/$SIZE.jpg" 51 | pre: 52 | - name: download the remote file 53 | image: alpine:3.18.3 54 | env: 55 | SOURCE: '{{ inputs.source }}' 56 | EXT: '{{ tasks.fileExt }}' 57 | run: | 58 | wget $SOURCE -O "/workdir/source.$EXT" 59 | post: 60 | - name: upload the converted image to minio 61 | run: aws --endpoint-url $ENDPOINT_URL s3 cp /workdir/targets/$SIZE.jpg $TARGET/$SIZE.jpg 62 | image: amazon/aws-cli:2.13.10 63 | env: 64 | AWS_ACCESS_KEY_ID: '{{inputs.accessKeyID}}' 65 | AWS_SECRET_ACCESS_KEY: '{{inputs.secretKeyID}}' 66 | TARGET: '{{inputs.target}}' 67 | ENDPOINT_URL: '{{inputs.endpointURL}}' 68 | SIZE: '{{ item.value }}' 69 | -------------------------------------------------------------------------------- /examples/retry.yaml: -------------------------------------------------------------------------------- 1 | name: sample retry job 2 | tasks: 3 | - name: a task that simulates a coin flip and fails unless the result is heads 4 | image: ubuntu:mantic 5 | entrypoint: ["bash","-c"] 6 | run: | 7 | COINFLIP=$((( RANDOM % 2 )) && echo "heads" || echo "tails") 8 | echo -n "$COINFLIP" 9 | echo -n "$COINFLIP" > $TORK_OUTPUT 10 | if [ "$COINFLIP" = "heads" ]; then 11 | exit 0 12 | else 13 | exit 1 14 | fi 15 | retry: 16 | limit: 2 -------------------------------------------------------------------------------- /examples/slow.yaml: -------------------------------------------------------------------------------- 1 | name: example of a "slow" job 2 | tasks: 3 | - name: say hello 4 | image: ubuntu:mantic 5 | run: echo hello 6 | 7 | - name: each 8 | each: 9 | list: "{{sequence(1,5)}}" 10 | task: 11 | name: sleep a little 12 | image: ubuntu:mantic 13 | env: 14 | SECONDS: "{{item.value}}" 15 | run: sleep $SECONDS 16 | 17 | - name: say bye 18 | image: ubuntu:mantic 19 | run: echo bye -------------------------------------------------------------------------------- /examples/subjob.yaml: -------------------------------------------------------------------------------- 1 | name: sample job with sub jobs 2 | tasks: 3 | - name: hello task 4 | image: ubuntu:mantic 5 | run: echo start of job 6 | 7 | - var: subJobOutput 8 | name: sub job with output 9 | subjob: 10 | name: my sub job with output 11 | output: "{{ tasks.dataStuff }}" 12 | tasks: 13 | - var: dataStuff 14 | name: hello sub task job 1 15 | image: ubuntu:mantic 16 | run: echo -n data > $TORK_OUTPUT 17 | 18 | - name: running 2 jobs in parallel 19 | parallel: 20 | tasks: 21 | - name: sample job 1 22 | subjob: 23 | name: my sub job 1 24 | tasks: 25 | - name: hello sub task job 1 26 | image: ubuntu:mantic 27 | run: echo start of sub-job 28 | - name: bye sub task job 1 29 | image: ubuntu:mantic 30 | run: echo end of sub-job 31 | 32 | - name: sample job 2 33 | subjob: 34 | name: my sub job 2 35 | tasks: 36 | - name: hello sub task job 2 37 | image: ubuntu:mantic 38 | run: echo start of sub-job 39 | - name: bye sub task job 2 40 | image: ubuntu:mantic 41 | run: echo end of sub-job 42 | 43 | - name: bye task 44 | image: ubuntu:mantic 45 | run: echo end of job -------------------------------------------------------------------------------- /examples/timeout.yaml: -------------------------------------------------------------------------------- 1 | name: sample timeout job 2 | tasks: 3 | - name: a task that simulates a long running process that timeout 4 | image: ubuntu:mantic 5 | run: sleep 10 6 | timeout: 5s -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/runabol/tork 2 | 3 | go 1.23.0 4 | 5 | toolchain go1.24.2 6 | 7 | retract v0.1.0 8 | 9 | require ( 10 | github.com/docker/cli v26.1.5+incompatible 11 | github.com/docker/docker v26.1.5+incompatible 12 | github.com/docker/go-units v0.5.0 13 | github.com/expr-lang/expr v1.17.2 14 | github.com/fatih/color v1.17.0 15 | github.com/go-co-op/gocron/v2 v2.13.0 16 | github.com/go-playground/validator/v10 v10.26.0 17 | github.com/google/uuid v1.6.0 18 | github.com/jmoiron/sqlx v1.3.5 19 | github.com/knadh/koanf/parsers/toml v0.1.0 20 | github.com/knadh/koanf/providers/env v0.1.0 21 | github.com/knadh/koanf/providers/file v1.2.0 22 | github.com/knadh/koanf/v2 v2.1.1 23 | github.com/labstack/echo/v4 v4.13.3 24 | github.com/lib/pq v1.10.9 25 | github.com/lithammer/shortuuid/v4 v4.2.0 26 | github.com/pkg/errors v0.9.1 27 | github.com/rabbitmq/amqp091-go v1.10.0 28 | github.com/robfig/cron/v3 v3.0.1 29 | github.com/rs/zerolog v1.33.0 30 | github.com/shirou/gopsutil/v3 v3.24.3 31 | github.com/stretchr/testify v1.10.0 32 | github.com/urfave/cli/v2 v2.27.2 33 | golang.org/x/crypto v0.36.0 34 | golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 35 | golang.org/x/sys v0.32.0 36 | golang.org/x/time v0.8.0 37 | gopkg.in/yaml.v3 v3.0.1 38 | gotest.tools/v3 v3.5.1 39 | ) 40 | 41 | require ( 42 | github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect 43 | github.com/Microsoft/go-winio v0.6.2 // indirect 44 | github.com/containerd/log v0.1.0 // indirect 45 | github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect 46 | github.com/davecgh/go-spew v1.1.1 // indirect 47 | github.com/distribution/reference v0.6.0 // indirect 48 | github.com/docker/go-connections v0.4.1-0.20231031175723-0b8c1f4e07a0 // indirect 49 | github.com/felixge/httpsnoop v1.0.4 // indirect 50 | github.com/fsnotify/fsnotify v1.9.0 // indirect 51 | github.com/gabriel-vasile/mimetype v1.4.8 // indirect 52 | github.com/go-logr/logr v1.4.1 // indirect 53 | github.com/go-logr/stdr v1.2.2 // indirect 54 | github.com/go-ole/go-ole v1.2.6 // indirect 55 | github.com/go-playground/locales v0.14.1 // indirect 56 | github.com/go-playground/universal-translator v0.18.1 // indirect 57 | github.com/go-sql-driver/mysql v1.7.0 // indirect 58 | github.com/go-viper/mapstructure/v2 v2.0.0-alpha.1 // indirect 59 | github.com/gogo/protobuf v1.3.2 // indirect 60 | github.com/google/go-cmp v0.6.0 // indirect 61 | github.com/jonboulle/clockwork v0.4.0 // indirect 62 | github.com/knadh/koanf/maps v0.1.1 // indirect 63 | github.com/kr/pretty v0.3.0 // indirect 64 | github.com/labstack/gommon v0.4.2 // indirect 65 | github.com/leodido/go-urn v1.4.0 // indirect 66 | github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect 67 | github.com/mattn/go-colorable v0.1.13 // indirect 68 | github.com/mattn/go-isatty v0.0.20 // indirect 69 | github.com/mattn/go-sqlite3 v1.14.18 // indirect 70 | github.com/mitchellh/copystructure v1.2.0 // indirect 71 | github.com/mitchellh/reflectwalk v1.0.2 // indirect 72 | github.com/moby/docker-image-spec v1.3.1 // indirect 73 | github.com/moby/term v0.5.0 // indirect 74 | github.com/morikuni/aec v1.0.0 // indirect 75 | github.com/opencontainers/go-digest v1.0.0 // indirect 76 | github.com/opencontainers/image-spec v1.1.0 // indirect 77 | github.com/pelletier/go-toml v1.9.5 // indirect 78 | github.com/pmezard/go-difflib v1.0.0 // indirect 79 | github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect 80 | github.com/rogpeppe/go-internal v1.12.0 // indirect 81 | github.com/russross/blackfriday/v2 v2.1.0 // indirect 82 | github.com/shoenig/go-m1cpu v0.1.6 // indirect 83 | github.com/sirupsen/logrus v1.9.3 // indirect 84 | github.com/tklauser/go-sysconf v0.3.12 // indirect 85 | github.com/tklauser/numcpus v0.6.1 // indirect 86 | github.com/valyala/bytebufferpool v1.0.0 // indirect 87 | github.com/valyala/fasttemplate v1.2.2 // indirect 88 | github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 // indirect 89 | github.com/yusufpapurcu/wmi v1.2.4 // indirect 90 | go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.51.0 // indirect 91 | go.opentelemetry.io/otel v1.26.0 // indirect 92 | go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.26.0 // indirect 93 | go.opentelemetry.io/otel/metric v1.26.0 // indirect 94 | go.opentelemetry.io/otel/sdk v1.26.0 // indirect 95 | go.opentelemetry.io/otel/trace v1.26.0 // indirect 96 | golang.org/x/net v0.38.0 // indirect 97 | golang.org/x/text v0.23.0 // indirect 98 | ) 99 | -------------------------------------------------------------------------------- /health/health.go: -------------------------------------------------------------------------------- 1 | package health 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "strings" 7 | 8 | "github.com/rs/zerolog/log" 9 | "github.com/runabol/tork" 10 | ) 11 | 12 | const ( 13 | StatusUp = "UP" 14 | StatusDown = "DOWN" 15 | 16 | ServiceDatastore = "datastore" 17 | ServiceBroker = "broker" 18 | ServiceRuntime = "runtime" 19 | ) 20 | 21 | type HealthIndicator func(ctx context.Context) error 22 | 23 | type HealthCheckResult struct { 24 | Status string `json:"status"` 25 | Version string `json:"version"` 26 | } 27 | 28 | type HealthCheck struct { 29 | indicators map[string]HealthIndicator 30 | } 31 | 32 | func (b *HealthCheck) WithIndicator(name string, ind HealthIndicator) *HealthCheck { 33 | name = strings.TrimSpace(name) 34 | if name == "" { 35 | panic("health indicator name must not be empty") 36 | } 37 | if _, ok := b.indicators[name]; ok { 38 | panic(fmt.Sprintf("health indicator with name %s already exists", name)) 39 | } 40 | b.indicators[name] = ind 41 | return b 42 | } 43 | 44 | func NewHealthCheck() *HealthCheck { 45 | return &HealthCheck{ 46 | indicators: make(map[string]HealthIndicator), 47 | } 48 | } 49 | 50 | func (b *HealthCheck) Do(ctx context.Context) HealthCheckResult { 51 | for name, ind := range b.indicators { 52 | if err := ind(ctx); err != nil { 53 | log.Error().Err(err).Msgf("failed %s healthcheck", name) 54 | return HealthCheckResult{ 55 | Status: StatusDown, 56 | Version: tork.Version, 57 | } 58 | } 59 | } 60 | return HealthCheckResult{ 61 | Status: StatusUp, 62 | Version: tork.Version, 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /health/health_test.go: -------------------------------------------------------------------------------- 1 | package health_test 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "testing" 7 | 8 | "github.com/runabol/tork/health" 9 | "github.com/stretchr/testify/assert" 10 | ) 11 | 12 | func TestHealthCheckOK(t *testing.T) { 13 | ctx := context.Background() 14 | ind := func(ctx context.Context) error { 15 | return nil 16 | } 17 | res := health.NewHealthCheck().WithIndicator("test", ind).Do(ctx) 18 | assert.Equal(t, health.StatusUp, res.Status) 19 | } 20 | 21 | func TestHealthCheckFailed(t *testing.T) { 22 | ctx := context.Background() 23 | ind := func(ctx context.Context) error { 24 | return errors.New("something happened") 25 | } 26 | res := health.NewHealthCheck().WithIndicator("test", ind).Do(ctx) 27 | assert.Equal(t, health.StatusDown, res.Status) 28 | } 29 | -------------------------------------------------------------------------------- /internal/coordinator/api/context.go: -------------------------------------------------------------------------------- 1 | package api 2 | 3 | import ( 4 | "context" 5 | "net/http" 6 | 7 | "github.com/labstack/echo/v4" 8 | ) 9 | 10 | type Context struct { 11 | ctx echo.Context 12 | api *API 13 | err error 14 | code int 15 | } 16 | 17 | func (c *Context) Request() *http.Request { 18 | return c.ctx.Request() 19 | } 20 | 21 | func (c *Context) Response() http.ResponseWriter { 22 | return c.ctx.Response().Unwrap() 23 | } 24 | 25 | func (c *Context) Get(key any) any { 26 | return c.ctx.Request().Context().Value(key) 27 | } 28 | 29 | func (c *Context) Set(key any, val any) { 30 | c.ctx.SetRequest(c.ctx.Request().WithContext(context.WithValue(c.ctx.Request().Context(), key, val))) 31 | } 32 | 33 | func (c *Context) NoContent(code int) error { 34 | return c.ctx.NoContent(code) 35 | } 36 | 37 | func (c *Context) Bind(i any) error { 38 | return c.ctx.Bind(i) 39 | } 40 | 41 | func (c *Context) String(code int, s string) error { 42 | return c.ctx.String(code, s) 43 | } 44 | 45 | func (c *Context) JSON(code int, data any) error { 46 | return c.ctx.JSON(code, data) 47 | } 48 | 49 | func (c *Context) Error(code int, err error) { 50 | c.err = err 51 | c.code = code 52 | } 53 | 54 | func (c *Context) Done() <-chan any { 55 | ch := make(chan any) 56 | go func() { 57 | select { 58 | case <-c.api.terminate: 59 | case <-c.Request().Context().Done(): 60 | } 61 | ch <- 1 62 | }() 63 | return ch 64 | } 65 | -------------------------------------------------------------------------------- /internal/coordinator/api/context_test.go: -------------------------------------------------------------------------------- 1 | package api 2 | 3 | import ( 4 | "net/http" 5 | "net/http/httptest" 6 | "testing" 7 | 8 | "github.com/labstack/echo/v4" 9 | 10 | "github.com/stretchr/testify/assert" 11 | ) 12 | 13 | func TestSetGetContext(t *testing.T) { 14 | req, err := http.NewRequest("GET", "/health", nil) 15 | assert.NoError(t, err) 16 | w := httptest.NewRecorder() 17 | ectx := echo.New().NewContext(req, w) 18 | ctx := Context{ 19 | ctx: ectx, 20 | } 21 | assert.Nil(t, ctx.Get("some-key")) 22 | ctx.Set("some-key", "some value") 23 | assert.Equal(t, "some value", ctx.Get("some-key")) 24 | } 25 | -------------------------------------------------------------------------------- /internal/coordinator/handlers/cancel.go: -------------------------------------------------------------------------------- 1 | package handlers 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/pkg/errors" 7 | "github.com/rs/zerolog/log" 8 | "github.com/runabol/tork" 9 | "github.com/runabol/tork/broker" 10 | "github.com/runabol/tork/datastore" 11 | "github.com/runabol/tork/middleware/job" 12 | ) 13 | 14 | type cancelHandler struct { 15 | ds datastore.Datastore 16 | broker broker.Broker 17 | } 18 | 19 | func NewCancelHandler(ds datastore.Datastore, b broker.Broker) job.HandlerFunc { 20 | h := &cancelHandler{ 21 | ds: ds, 22 | broker: b, 23 | } 24 | return h.handle 25 | } 26 | 27 | func (h *cancelHandler) handle(ctx context.Context, _ job.EventType, j *tork.Job) error { 28 | // mark the job as cancelled 29 | if err := h.ds.UpdateJob(ctx, j.ID, func(u *tork.Job) error { 30 | if u.State != tork.JobStateRunning && u.State != tork.JobStateScheduled { 31 | // job is not running -- nothing to cancel 32 | return nil 33 | } 34 | u.State = tork.JobStateCancelled 35 | return nil 36 | }); err != nil { 37 | return err 38 | } 39 | // if there's a parent task notify the parent job to cancel as well 40 | if j.ParentID != "" { 41 | pt, err := h.ds.GetTaskByID(ctx, j.ParentID) 42 | if err != nil { 43 | return errors.Wrapf(err, "error fetching parent task: %s", pt.ID) 44 | } 45 | pj, err := h.ds.GetJobByID(ctx, pt.JobID) 46 | if err != nil { 47 | return errors.Wrapf(err, "error fetching parent job: %s", pj.ID) 48 | } 49 | pj.State = tork.JobStateCancelled 50 | if err := h.broker.PublishJob(ctx, pj); err != nil { 51 | log.Error().Err(err).Msgf("error cancelling sub-job: %s", pj.ID) 52 | } 53 | } 54 | // cancel all running tasks 55 | if err := cancelActiveTasks(ctx, h.ds, h.broker, j.ID); err != nil { 56 | return err 57 | } 58 | 59 | return nil 60 | } 61 | 62 | func cancelActiveTasks(ctx context.Context, ds datastore.Datastore, b broker.Broker, jobID string) error { 63 | // get a list of active tasks for the job 64 | tasks, err := ds.GetActiveTasks(ctx, jobID) 65 | if err != nil { 66 | return errors.Wrapf(err, "error getting active tasks for job: %s", jobID) 67 | } 68 | for _, t := range tasks { 69 | t.State = tork.TaskStateCancelled 70 | // mark tasks as cancelled 71 | if err := ds.UpdateTask(ctx, t.ID, func(u *tork.Task) error { 72 | u.State = tork.TaskStateCancelled 73 | return nil 74 | }); err != nil { 75 | return errors.Wrapf(err, "error cancelling task: %s", t.ID) 76 | } 77 | // if this task is a sub-job, notify the sub-job to cancel 78 | if t.SubJob != nil { 79 | // cancel the sub-job 80 | sj, err := ds.GetJobByID(ctx, t.SubJob.ID) 81 | if err != nil { 82 | return err 83 | } 84 | sj.State = tork.JobStateCancelled 85 | if err := b.PublishJob(ctx, sj); err != nil { 86 | return errors.Wrapf(err, "error publishing cancelllation for sub-job %s", sj.ID) 87 | } 88 | } else if t.NodeID != "" { 89 | // notify the node currently running the task 90 | // to cancel it 91 | node, err := ds.GetNodeByID(ctx, t.NodeID) 92 | if err != nil { 93 | return err 94 | } 95 | if err := b.PublishTask(ctx, node.Queue, t); err != nil { 96 | return err 97 | } 98 | } 99 | } 100 | return nil 101 | } 102 | -------------------------------------------------------------------------------- /internal/coordinator/handlers/cancel_test.go: -------------------------------------------------------------------------------- 1 | package handlers 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | "time" 7 | 8 | "github.com/runabol/tork" 9 | "github.com/runabol/tork/broker" 10 | "github.com/runabol/tork/datastore/postgres" 11 | "github.com/runabol/tork/internal/uuid" 12 | "github.com/stretchr/testify/assert" 13 | ) 14 | 15 | func Test_cancelActiveTasks(t *testing.T) { 16 | ctx := context.Background() 17 | 18 | ds, err := postgres.NewTestDatastore() 19 | assert.NoError(t, err) 20 | b := broker.NewInMemoryBroker() 21 | 22 | j1 := &tork.Job{ 23 | ID: uuid.NewUUID(), 24 | State: tork.JobStatePending, 25 | Tasks: []*tork.Task{ 26 | { 27 | Name: "task-1", 28 | }, 29 | }, 30 | } 31 | 32 | err = ds.CreateJob(ctx, j1) 33 | assert.NoError(t, err) 34 | 35 | now := time.Now().UTC() 36 | 37 | err = ds.CreateTask(ctx, &tork.Task{ 38 | ID: uuid.NewUUID(), 39 | JobID: j1.ID, 40 | State: tork.TaskStateRunning, 41 | Position: 1, 42 | CreatedAt: &now, 43 | }) 44 | assert.NoError(t, err) 45 | 46 | actives, err := ds.GetActiveTasks(ctx, j1.ID) 47 | assert.NoError(t, err) 48 | assert.Len(t, actives, 1) 49 | 50 | err = cancelActiveTasks(ctx, ds, b, j1.ID) 51 | assert.NoError(t, err) 52 | 53 | actives, err = ds.GetActiveTasks(ctx, j1.ID) 54 | assert.NoError(t, err) 55 | assert.Len(t, actives, 0) 56 | assert.NoError(t, ds.Close()) 57 | } 58 | -------------------------------------------------------------------------------- /internal/coordinator/handlers/error.go: -------------------------------------------------------------------------------- 1 | package handlers 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | "github.com/pkg/errors" 8 | "github.com/rs/zerolog/log" 9 | "github.com/runabol/tork" 10 | "github.com/runabol/tork/broker" 11 | "github.com/runabol/tork/datastore" 12 | "github.com/runabol/tork/internal/eval" 13 | "github.com/runabol/tork/internal/uuid" 14 | "github.com/runabol/tork/middleware/job" 15 | "github.com/runabol/tork/middleware/task" 16 | ) 17 | 18 | type errorHandler struct { 19 | ds datastore.Datastore 20 | broker broker.Broker 21 | onJob job.HandlerFunc 22 | } 23 | 24 | func NewErrorHandler(ds datastore.Datastore, b broker.Broker, mw ...job.MiddlewareFunc) task.HandlerFunc { 25 | h := &errorHandler{ 26 | ds: ds, 27 | broker: b, 28 | onJob: job.ApplyMiddleware(NewJobHandler(ds, b), mw), 29 | } 30 | return h.handle 31 | } 32 | 33 | func (h *errorHandler) handle(ctx context.Context, et task.EventType, t *tork.Task) error { 34 | j, err := h.ds.GetJobByID(ctx, t.JobID) 35 | if err != nil { 36 | return errors.Wrapf(err, "unknown job: %s", t.JobID) 37 | } 38 | log.Debug(). 39 | Str("task-id", t.ID). 40 | Str("task-error", t.Error). 41 | Str("task-state", string(t.State)). 42 | Msg("received task failure") 43 | 44 | now := time.Now().UTC() 45 | t.FailedAt = &now 46 | 47 | // mark the task as FAILED 48 | if err := h.ds.UpdateTask(ctx, t.ID, func(u *tork.Task) error { 49 | if u.IsActive() { 50 | u.State = tork.TaskStateFailed 51 | u.FailedAt = t.FailedAt 52 | u.Error = t.Error 53 | } 54 | return nil 55 | }); err != nil { 56 | return errors.Wrapf(err, "error marking task %s as FAILED", t.ID) 57 | } 58 | // eligible for retry? 59 | if (j.State == tork.JobStateRunning || j.State == tork.JobStateScheduled) && 60 | t.Retry != nil && 61 | t.Retry.Attempts < t.Retry.Limit { 62 | // create a new retry task 63 | now := time.Now().UTC() 64 | rt := t.Clone() 65 | rt.ID = uuid.NewUUID() 66 | rt.CreatedAt = &now 67 | rt.Retry.Attempts = rt.Retry.Attempts + 1 68 | rt.State = tork.TaskStatePending 69 | rt.Error = "" 70 | rt.FailedAt = nil 71 | if err := eval.EvaluateTask(rt, j.Context.AsMap()); err != nil { 72 | return errors.Wrapf(err, "error evaluating task") 73 | } 74 | if err := h.ds.CreateTask(ctx, rt); err != nil { 75 | return errors.Wrapf(err, "error creating a retry task") 76 | } 77 | if err := h.broker.PublishTask(ctx, broker.QUEUE_PENDING, rt); err != nil { 78 | log.Error().Err(err).Msg("error publishing retry task") 79 | } 80 | } else { 81 | j.State = tork.JobStateFailed 82 | j.FailedAt = t.FailedAt 83 | return h.onJob(ctx, job.StateChange, j) 84 | } 85 | return nil 86 | } 87 | -------------------------------------------------------------------------------- /internal/coordinator/handlers/error_test.go: -------------------------------------------------------------------------------- 1 | package handlers 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | "time" 7 | 8 | "github.com/runabol/tork" 9 | "github.com/runabol/tork/broker" 10 | "github.com/runabol/tork/datastore/postgres" 11 | "github.com/runabol/tork/internal/uuid" 12 | "github.com/runabol/tork/middleware/task" 13 | "github.com/stretchr/testify/assert" 14 | ) 15 | 16 | func Test_handleFailedTask(t *testing.T) { 17 | ctx := context.Background() 18 | b := broker.NewInMemoryBroker() 19 | 20 | events := make(chan any) 21 | err := b.SubscribeForEvents(ctx, broker.TOPIC_JOB_FAILED, func(event any) { 22 | j, ok := event.(*tork.Job) 23 | assert.True(t, ok) 24 | assert.Equal(t, tork.JobStateFailed, j.State) 25 | close(events) 26 | }) 27 | assert.NoError(t, err) 28 | 29 | ds, err := postgres.NewTestDatastore() 30 | assert.NoError(t, err) 31 | 32 | handler := NewErrorHandler(ds, b) 33 | assert.NotNil(t, handler) 34 | 35 | now := time.Now().UTC() 36 | 37 | node := &tork.Node{ 38 | ID: uuid.NewUUID(), 39 | Queue: uuid.NewUUID(), 40 | } 41 | err = ds.CreateNode(ctx, node) 42 | assert.NoError(t, err) 43 | 44 | j1 := &tork.Job{ 45 | ID: uuid.NewUUID(), 46 | State: tork.JobStateRunning, 47 | CreatedAt: now, 48 | Position: 1, 49 | TaskCount: 1, 50 | Tasks: []*tork.Task{ 51 | { 52 | Name: "task-1", 53 | }, 54 | }, 55 | } 56 | err = ds.CreateJob(ctx, j1) 57 | assert.NoError(t, err) 58 | 59 | t1 := &tork.Task{ 60 | ID: uuid.NewUUID(), 61 | State: tork.TaskStateRunning, 62 | StartedAt: &now, 63 | CompletedAt: &now, 64 | NodeID: node.ID, 65 | JobID: j1.ID, 66 | Position: 1, 67 | CreatedAt: &now, 68 | } 69 | 70 | t2 := &tork.Task{ 71 | ID: uuid.NewUUID(), 72 | State: tork.TaskStateRunning, 73 | StartedAt: &now, 74 | CompletedAt: &now, 75 | NodeID: node.ID, 76 | JobID: j1.ID, 77 | Position: 1, 78 | CreatedAt: &now, 79 | } 80 | 81 | err = ds.CreateTask(ctx, t1) 82 | assert.NoError(t, err) 83 | 84 | err = ds.CreateTask(ctx, t2) 85 | assert.NoError(t, err) 86 | 87 | actives, err := ds.GetActiveTasks(ctx, j1.ID) 88 | assert.NoError(t, err) 89 | assert.Len(t, actives, 2) 90 | 91 | err = handler(ctx, task.StateChange, t1) 92 | assert.NoError(t, err) 93 | 94 | <-events 95 | 96 | t11, err := ds.GetTaskByID(ctx, t1.ID) 97 | assert.NoError(t, err) 98 | assert.Equal(t, tork.TaskStateFailed, t11.State) 99 | assert.Equal(t, t1.CompletedAt.Unix(), t11.CompletedAt.Unix()) 100 | 101 | // verify that the job was 102 | // marked as FAILED 103 | j2, err := ds.GetJobByID(ctx, j1.ID) 104 | assert.NoError(t, err) 105 | assert.Equal(t, j1.ID, j2.ID) 106 | assert.Equal(t, tork.JobStateFailed, j2.State) 107 | 108 | actives, err = ds.GetActiveTasks(ctx, j1.ID) 109 | assert.NoError(t, err) 110 | assert.Len(t, actives, 0) 111 | assert.True(t, j2.FailedAt.After(j1.CreatedAt)) 112 | assert.NoError(t, ds.Close()) 113 | } 114 | 115 | func Test_handleFailedTaskRetry(t *testing.T) { 116 | ctx := context.Background() 117 | b := broker.NewInMemoryBroker() 118 | 119 | processed := make(chan any) 120 | err := b.SubscribeForTasks(broker.QUEUE_PENDING, func(tk *tork.Task) error { 121 | assert.Nil(t, tk.FailedAt) 122 | close(processed) 123 | return nil 124 | }) 125 | assert.NoError(t, err) 126 | 127 | ds, err := postgres.NewTestDatastore() 128 | assert.NoError(t, err) 129 | 130 | handler := NewErrorHandler(ds, b) 131 | assert.NotNil(t, handler) 132 | 133 | now := time.Now().UTC() 134 | 135 | j1 := &tork.Job{ 136 | ID: uuid.NewUUID(), 137 | State: tork.JobStateRunning, 138 | Position: 1, 139 | TaskCount: 1, 140 | Tasks: []*tork.Task{ 141 | { 142 | Name: "task-1", 143 | }, 144 | }, 145 | } 146 | err = ds.CreateJob(ctx, j1) 147 | assert.NoError(t, err) 148 | 149 | t1 := &tork.Task{ 150 | ID: uuid.NewUUID(), 151 | State: tork.TaskStateRunning, 152 | StartedAt: &now, 153 | CompletedAt: &now, 154 | NodeID: uuid.NewUUID(), 155 | JobID: j1.ID, 156 | Position: 1, 157 | Retry: &tork.TaskRetry{ 158 | Limit: 1, 159 | }, 160 | CreatedAt: &now, 161 | } 162 | 163 | err = ds.CreateTask(ctx, t1) 164 | assert.NoError(t, err) 165 | 166 | err = handler(ctx, task.StateChange, t1) 167 | assert.NoError(t, err) 168 | 169 | <-processed 170 | 171 | t2, err := ds.GetTaskByID(ctx, t1.ID) 172 | assert.NoError(t, err) 173 | assert.Equal(t, tork.TaskStateFailed, t2.State) 174 | assert.Equal(t, t1.CompletedAt.Unix(), t2.CompletedAt.Unix()) 175 | 176 | // verify that the job was 177 | // NOT marked as FAILED 178 | j2, err := ds.GetJobByID(ctx, j1.ID) 179 | assert.NoError(t, err) 180 | assert.Equal(t, j1.ID, j2.ID) 181 | assert.Equal(t, tork.JobStateRunning, j2.State) 182 | assert.NoError(t, ds.Close()) 183 | } 184 | -------------------------------------------------------------------------------- /internal/coordinator/handlers/heartbeat.go: -------------------------------------------------------------------------------- 1 | package handlers 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/rs/zerolog/log" 7 | "github.com/runabol/tork" 8 | "github.com/runabol/tork/datastore" 9 | "github.com/runabol/tork/middleware/node" 10 | ) 11 | 12 | type heartbeatHandler struct { 13 | ds datastore.Datastore 14 | } 15 | 16 | func NewHeartbeatHandler(ds datastore.Datastore) node.HandlerFunc { 17 | h := &heartbeatHandler{ 18 | ds: ds, 19 | } 20 | return h.handle 21 | } 22 | 23 | func (h *heartbeatHandler) handle(ctx context.Context, n *tork.Node) error { 24 | _, err := h.ds.GetNodeByID(ctx, n.ID) 25 | if err == datastore.ErrNodeNotFound { 26 | log.Info(). 27 | Str("node-id", n.ID). 28 | Str("hostname", n.Hostname). 29 | Msg("received first heartbeat") 30 | return h.ds.CreateNode(ctx, n) 31 | } 32 | return h.ds.UpdateNode(ctx, n.ID, func(u *tork.Node) error { 33 | // ignore "old" heartbeats 34 | if u.LastHeartbeatAt.After(n.LastHeartbeatAt) { 35 | return nil 36 | } 37 | u.LastHeartbeatAt = n.LastHeartbeatAt 38 | u.CPUPercent = n.CPUPercent 39 | u.Status = n.Status 40 | u.TaskCount = n.TaskCount 41 | return nil 42 | }) 43 | } 44 | -------------------------------------------------------------------------------- /internal/coordinator/handlers/heartbeat_test.go: -------------------------------------------------------------------------------- 1 | package handlers 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | "time" 7 | 8 | "github.com/runabol/tork" 9 | "github.com/runabol/tork/datastore/postgres" 10 | "github.com/runabol/tork/internal/uuid" 11 | "github.com/stretchr/testify/assert" 12 | ) 13 | 14 | func Test_handleHeartbeat(t *testing.T) { 15 | ctx := context.Background() 16 | 17 | ds, err := postgres.NewTestDatastore() 18 | assert.NoError(t, err) 19 | handler := NewHeartbeatHandler(ds) 20 | assert.NotNil(t, handler) 21 | 22 | n1 := tork.Node{ 23 | ID: uuid.NewUUID(), 24 | LastHeartbeatAt: time.Now().UTC().Add(-time.Minute * 5), 25 | CPUPercent: 75, 26 | Hostname: "host-1", 27 | Status: tork.NodeStatusUP, 28 | } 29 | 30 | err = handler(ctx, &n1) 31 | assert.NoError(t, err) 32 | 33 | n11, err := ds.GetNodeByID(ctx, n1.ID) 34 | assert.NoError(t, err) 35 | assert.Equal(t, n1.LastHeartbeatAt.Unix(), n11.LastHeartbeatAt.Unix()) 36 | assert.Equal(t, n1.CPUPercent, n11.CPUPercent) 37 | assert.Equal(t, tork.NodeStatusOffline, n11.Status) 38 | assert.Equal(t, n1.TaskCount, n11.TaskCount) 39 | 40 | n2 := tork.Node{ 41 | ID: n1.ID, 42 | LastHeartbeatAt: time.Now().UTC().Add(-time.Minute * 2), 43 | CPUPercent: 75, 44 | Status: tork.NodeStatusDown, 45 | TaskCount: 3, 46 | } 47 | 48 | err = handler(ctx, &n2) 49 | assert.NoError(t, err) 50 | 51 | n22, err := ds.GetNodeByID(ctx, n1.ID) 52 | assert.NoError(t, err) 53 | assert.Equal(t, n2.LastHeartbeatAt.Unix(), n22.LastHeartbeatAt.Unix()) 54 | assert.Equal(t, n2.CPUPercent, n22.CPUPercent) 55 | assert.Equal(t, n2.Status, n22.Status) 56 | assert.Equal(t, n2.TaskCount, n22.TaskCount) 57 | 58 | n3 := tork.Node{ 59 | ID: n1.ID, 60 | LastHeartbeatAt: time.Now().UTC().Add(-time.Minute * 7), 61 | CPUPercent: 75, 62 | } 63 | 64 | err = handler(ctx, &n3) 65 | assert.NoError(t, err) 66 | 67 | n33, err := ds.GetNodeByID(ctx, n1.ID) 68 | assert.NoError(t, err) 69 | assert.Equal(t, n2.LastHeartbeatAt.Unix(), n33.LastHeartbeatAt.Unix()) // should keep the latest 70 | assert.Equal(t, n3.CPUPercent, n33.CPUPercent) 71 | assert.NoError(t, ds.Close()) 72 | } 73 | -------------------------------------------------------------------------------- /internal/coordinator/handlers/log.go: -------------------------------------------------------------------------------- 1 | package handlers 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/rs/zerolog/log" 7 | "github.com/runabol/tork" 8 | "github.com/runabol/tork/datastore" 9 | ) 10 | 11 | type logHandler struct { 12 | ds datastore.Datastore 13 | } 14 | 15 | func NewLogHandler(ds datastore.Datastore) func(p *tork.TaskLogPart) { 16 | h := &logHandler{ 17 | ds: ds, 18 | } 19 | return h.handle 20 | } 21 | 22 | func (h *logHandler) handle(p *tork.TaskLogPart) { 23 | ctx := context.Background() 24 | if err := h.ds.CreateTaskLogPart(ctx, p); err != nil { 25 | log.Error().Err(err).Msgf("error writing task log: %s", err.Error()) 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /internal/coordinator/handlers/log_test.go: -------------------------------------------------------------------------------- 1 | package handlers 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | "time" 7 | 8 | "github.com/runabol/tork" 9 | "github.com/runabol/tork/datastore/postgres" 10 | "github.com/runabol/tork/internal/uuid" 11 | "github.com/stretchr/testify/assert" 12 | ) 13 | 14 | func Test_handleLog(t *testing.T) { 15 | ctx := context.Background() 16 | 17 | ds, err := postgres.NewTestDatastore() 18 | assert.NoError(t, err) 19 | handler := NewLogHandler(ds) 20 | assert.NotNil(t, handler) 21 | 22 | j1 := &tork.Job{ 23 | ID: uuid.NewUUID(), 24 | Name: "test job", 25 | } 26 | err = ds.CreateJob(ctx, j1) 27 | assert.NoError(t, err) 28 | 29 | now := time.Now().UTC() 30 | 31 | tk := &tork.Task{ 32 | ID: uuid.NewUUID(), 33 | Queue: "test-queue", 34 | JobID: j1.ID, 35 | CreatedAt: &now, 36 | } 37 | 38 | err = ds.CreateTask(ctx, tk) 39 | assert.NoError(t, err) 40 | 41 | p1 := tork.TaskLogPart{ 42 | TaskID: tk.ID, 43 | Number: 1, 44 | Contents: "line 1", 45 | } 46 | 47 | handler(&p1) 48 | 49 | n11, err := ds.GetTaskLogParts(ctx, p1.TaskID, "", 1, 10) 50 | assert.NoError(t, err) 51 | assert.Equal(t, 1, n11.TotalItems) 52 | assert.Equal(t, "line 1", n11.Items[0].Contents) 53 | assert.NoError(t, ds.Close()) 54 | } 55 | -------------------------------------------------------------------------------- /internal/coordinator/handlers/pending.go: -------------------------------------------------------------------------------- 1 | package handlers 2 | 3 | import ( 4 | "context" 5 | "strings" 6 | "time" 7 | 8 | "github.com/pkg/errors" 9 | "github.com/rs/zerolog/log" 10 | "github.com/runabol/tork" 11 | "github.com/runabol/tork/broker" 12 | "github.com/runabol/tork/datastore" 13 | "github.com/runabol/tork/internal/coordinator/scheduler" 14 | "github.com/runabol/tork/middleware/task" 15 | ) 16 | 17 | type pendingHandler struct { 18 | sched scheduler.Scheduler 19 | ds datastore.Datastore 20 | broker broker.Broker 21 | } 22 | 23 | func NewPendingHandler(ds datastore.Datastore, b broker.Broker) task.HandlerFunc { 24 | h := &pendingHandler{ 25 | ds: ds, 26 | broker: b, 27 | sched: *scheduler.NewScheduler(ds, b), 28 | } 29 | return h.handle 30 | } 31 | 32 | func (h *pendingHandler) handle(ctx context.Context, et task.EventType, t *tork.Task) error { 33 | log.Debug(). 34 | Str("task-id", t.ID). 35 | Msg("handling pending task") 36 | if strings.TrimSpace(t.If) == "false" { 37 | return h.skipTask(ctx, t) 38 | } else { 39 | return h.sched.ScheduleTask(ctx, t) 40 | } 41 | } 42 | 43 | func (h *pendingHandler) skipTask(ctx context.Context, t *tork.Task) error { 44 | now := time.Now().UTC() 45 | t.State = tork.TaskStateSkipped 46 | t.ScheduledAt = &now 47 | t.StartedAt = &now 48 | t.CompletedAt = &now 49 | if err := h.ds.UpdateTask(ctx, t.ID, func(u *tork.Task) error { 50 | u.State = t.State 51 | u.ScheduledAt = t.ScheduledAt 52 | u.StartedAt = t.StartedAt 53 | return nil 54 | }); err != nil { 55 | return errors.Wrapf(err, "error updating task in datastore") 56 | } 57 | return h.broker.PublishTask(ctx, broker.QUEUE_COMPLETED, t) 58 | } 59 | -------------------------------------------------------------------------------- /internal/coordinator/handlers/pending_test.go: -------------------------------------------------------------------------------- 1 | package handlers 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | "time" 7 | 8 | "github.com/runabol/tork" 9 | "github.com/runabol/tork/broker" 10 | "github.com/runabol/tork/datastore/postgres" 11 | "github.com/runabol/tork/internal/uuid" 12 | "github.com/runabol/tork/middleware/task" 13 | "github.com/stretchr/testify/assert" 14 | ) 15 | 16 | func Test_handlePendingTask(t *testing.T) { 17 | ctx := context.Background() 18 | b := broker.NewInMemoryBroker() 19 | 20 | processed := make(chan any) 21 | err := b.SubscribeForTasks("test-queue", func(t *tork.Task) error { 22 | close(processed) 23 | return nil 24 | }) 25 | assert.NoError(t, err) 26 | 27 | ds, err := postgres.NewTestDatastore() 28 | assert.NoError(t, err) 29 | handler := NewPendingHandler(ds, b) 30 | assert.NotNil(t, handler) 31 | 32 | j1 := &tork.Job{ 33 | ID: uuid.NewUUID(), 34 | Name: "test job", 35 | } 36 | err = ds.CreateJob(ctx, j1) 37 | assert.NoError(t, err) 38 | 39 | now := time.Now().UTC() 40 | 41 | tk := &tork.Task{ 42 | ID: uuid.NewUUID(), 43 | Queue: "test-queue", 44 | JobID: j1.ID, 45 | CreatedAt: &now, 46 | } 47 | 48 | err = ds.CreateTask(ctx, tk) 49 | assert.NoError(t, err) 50 | 51 | err = handler(ctx, task.StateChange, tk) 52 | assert.NoError(t, err) 53 | 54 | // wait for the task to get processed 55 | <-processed 56 | 57 | tk, err = ds.GetTaskByID(ctx, tk.ID) 58 | assert.NoError(t, err) 59 | assert.Equal(t, tork.TaskStateScheduled, tk.State) 60 | assert.NoError(t, ds.Close()) 61 | } 62 | 63 | func Test_handleConditionalTask(t *testing.T) { 64 | ctx := context.Background() 65 | b := broker.NewInMemoryBroker() 66 | 67 | completed := make(chan any) 68 | err := b.SubscribeForTasks(broker.QUEUE_COMPLETED, func(t *tork.Task) error { 69 | close(completed) 70 | return nil 71 | }) 72 | assert.NoError(t, err) 73 | 74 | ds, err := postgres.NewTestDatastore() 75 | assert.NoError(t, err) 76 | handler := NewPendingHandler(ds, b) 77 | assert.NotNil(t, handler) 78 | 79 | j1 := &tork.Job{ 80 | ID: uuid.NewUUID(), 81 | Name: "test job", 82 | } 83 | err = ds.CreateJob(ctx, j1) 84 | assert.NoError(t, err) 85 | 86 | now := time.Now().UTC() 87 | 88 | tk := &tork.Task{ 89 | ID: uuid.NewUUID(), 90 | Queue: "test-queue", 91 | If: "false", 92 | CreatedAt: &now, 93 | JobID: j1.ID, 94 | } 95 | 96 | err = ds.CreateTask(ctx, tk) 97 | assert.NoError(t, err) 98 | 99 | err = handler(ctx, task.StateChange, tk) 100 | assert.NoError(t, err) 101 | 102 | // wait for the task to get processed 103 | <-completed 104 | 105 | tk, err = ds.GetTaskByID(ctx, tk.ID) 106 | assert.NoError(t, err) 107 | assert.Equal(t, tork.TaskStateSkipped, tk.State) 108 | assert.NoError(t, ds.Close()) 109 | } 110 | -------------------------------------------------------------------------------- /internal/coordinator/handlers/progress.go: -------------------------------------------------------------------------------- 1 | package handlers 2 | 3 | import ( 4 | "context" 5 | "math" 6 | 7 | "github.com/pkg/errors" 8 | "github.com/rs/zerolog/log" 9 | "github.com/runabol/tork" 10 | "github.com/runabol/tork/datastore" 11 | "github.com/runabol/tork/middleware/job" 12 | "github.com/runabol/tork/middleware/task" 13 | ) 14 | 15 | type progressHandler struct { 16 | ds datastore.Datastore 17 | onJob job.HandlerFunc 18 | } 19 | 20 | func NewProgressHandler(ds datastore.Datastore, onJob job.HandlerFunc) task.HandlerFunc { 21 | h := &progressHandler{ 22 | ds: ds, 23 | onJob: onJob, 24 | } 25 | return h.handle 26 | } 27 | 28 | func (h *progressHandler) handle(ctx context.Context, et task.EventType, t *tork.Task) error { 29 | log.Debug().Msgf("[Task][%s] %.2f", t.ID, t.Progress) 30 | if t.Progress < 0 { 31 | t.Progress = 0 32 | } else if t.Progress > 100 { 33 | t.Progress = 100 34 | } 35 | if err := h.ds.UpdateTask(ctx, t.ID, func(u *tork.Task) error { 36 | u.Progress = t.Progress 37 | return nil 38 | }); err != nil { 39 | return errors.Wrapf(err, "error updating task progress: %s", err.Error()) 40 | } 41 | // calculate the overall job progress 42 | j, err := h.ds.GetJobByID(ctx, t.JobID) 43 | if err != nil { 44 | return err 45 | } 46 | if t.Progress == 0 { 47 | j.Progress = (float64(j.Position - 1)) / float64(j.TaskCount) * 100 48 | } else { 49 | j.Progress = (float64(j.Position-1) + (t.Progress / 100)) / float64(j.TaskCount) * 100 50 | } 51 | // Round progress to two decimal points 52 | j.Progress = math.Round(j.Progress*100) / 100 53 | if err := h.ds.UpdateJob(ctx, t.JobID, func(u *tork.Job) error { 54 | u.Progress = j.Progress 55 | return nil 56 | }); err != nil { 57 | return errors.Wrapf(err, "error updating job progress: %s", err.Error()) 58 | } 59 | return h.onJob(ctx, job.Progress, j) 60 | } 61 | -------------------------------------------------------------------------------- /internal/coordinator/handlers/started.go: -------------------------------------------------------------------------------- 1 | package handlers 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | "github.com/rs/zerolog/log" 8 | "github.com/runabol/tork" 9 | "github.com/runabol/tork/broker" 10 | "github.com/runabol/tork/datastore" 11 | "github.com/runabol/tork/middleware/job" 12 | "github.com/runabol/tork/middleware/task" 13 | ) 14 | 15 | type startedHandler struct { 16 | ds datastore.Datastore 17 | broker broker.Broker 18 | onJob job.HandlerFunc 19 | } 20 | 21 | func NewStartedHandler(ds datastore.Datastore, b broker.Broker, mw ...job.MiddlewareFunc) task.HandlerFunc { 22 | h := &startedHandler{ 23 | ds: ds, 24 | broker: b, 25 | onJob: job.ApplyMiddleware(NewJobHandler(ds, b), mw), 26 | } 27 | return h.handle 28 | } 29 | 30 | func (h *startedHandler) handle(ctx context.Context, et task.EventType, t *tork.Task) error { 31 | log.Debug(). 32 | Str("task-id", t.ID). 33 | Msg("received task start") 34 | // verify that the job is still running 35 | j, err := h.ds.GetJobByID(ctx, t.JobID) 36 | if err != nil { 37 | return err 38 | } 39 | // if the job isn't running anymore we need 40 | // to cancel the task 41 | if j.State != tork.JobStateRunning && j.State != tork.JobStateScheduled { 42 | t.State = tork.TaskStateCancelled 43 | node, err := h.ds.GetNodeByID(ctx, t.NodeID) 44 | if err != nil { 45 | return err 46 | } 47 | return h.broker.PublishTask(ctx, node.Queue, t) 48 | } 49 | // if this is the first task that started 50 | // we want to switch the state of the job 51 | // from SCHEDULED to RUNNING 52 | if j.State == tork.JobStateScheduled { 53 | j.State = tork.JobStateRunning 54 | if err := h.onJob(ctx, job.StateChange, j); err != nil { 55 | return err 56 | } 57 | } 58 | return h.ds.UpdateTask(ctx, t.ID, func(u *tork.Task) error { 59 | // we don't want to mark the task as RUNNING 60 | // if an out-of-order task completion/failure 61 | // arrived earlier 62 | if u.State == tork.TaskStateScheduled { 63 | now := time.Now().UTC() 64 | t.StartedAt = &now 65 | u.State = tork.TaskStateRunning 66 | u.StartedAt = &now 67 | } 68 | // if the worker crashed, the task 69 | // would automatically be returned 70 | // the queue and another worker 71 | // would pick it up. So we always 72 | // want to keep track of the latest 73 | // node that picked up the task. 74 | u.NodeID = t.NodeID 75 | return nil 76 | }) 77 | } 78 | -------------------------------------------------------------------------------- /internal/coordinator/handlers/started_test.go: -------------------------------------------------------------------------------- 1 | package handlers 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | "time" 7 | 8 | "github.com/runabol/tork" 9 | "github.com/runabol/tork/broker" 10 | "github.com/runabol/tork/datastore/postgres" 11 | "github.com/runabol/tork/internal/uuid" 12 | "github.com/runabol/tork/middleware/task" 13 | "github.com/stretchr/testify/assert" 14 | ) 15 | 16 | func Test_handleStartedTask(t *testing.T) { 17 | ctx := context.Background() 18 | b := broker.NewInMemoryBroker() 19 | 20 | ds, err := postgres.NewTestDatastore() 21 | assert.NoError(t, err) 22 | handler := NewStartedHandler(ds, b) 23 | assert.NotNil(t, handler) 24 | 25 | now := time.Now().UTC() 26 | 27 | j1 := &tork.Job{ 28 | ID: uuid.NewUUID(), 29 | State: tork.JobStateScheduled, 30 | } 31 | err = ds.CreateJob(ctx, j1) 32 | assert.NoError(t, err) 33 | 34 | t1 := &tork.Task{ 35 | ID: uuid.NewUUID(), 36 | State: tork.TaskStateScheduled, 37 | StartedAt: &now, 38 | NodeID: uuid.NewUUID(), 39 | JobID: j1.ID, 40 | CreatedAt: &now, 41 | } 42 | 43 | err = ds.CreateTask(ctx, t1) 44 | assert.NoError(t, err) 45 | 46 | err = handler(ctx, task.StateChange, t1) 47 | assert.NoError(t, err) 48 | 49 | t2, err := ds.GetTaskByID(ctx, t1.ID) 50 | assert.NoError(t, err) 51 | assert.Equal(t, tork.TaskStateRunning, t2.State) 52 | assert.Equal(t, t1.StartedAt.Unix(), t2.StartedAt.Unix()) 53 | assert.Equal(t, t1.NodeID, t2.NodeID) 54 | 55 | j2, err := ds.GetJobByID(ctx, j1.ID) 56 | assert.NoError(t, err) 57 | 58 | assert.Equal(t, tork.JobStateRunning, j2.State) 59 | assert.NoError(t, ds.Close()) 60 | } 61 | 62 | func Test_handleStartedTaskOfFailedJob(t *testing.T) { 63 | ctx := context.Background() 64 | b := broker.NewInMemoryBroker() 65 | 66 | qname := uuid.NewUUID() 67 | 68 | cancellations := make(chan any) 69 | err := b.SubscribeForTasks(qname, func(t *tork.Task) error { 70 | close(cancellations) 71 | return nil 72 | }) 73 | assert.NoError(t, err) 74 | 75 | ds, err := postgres.NewTestDatastore() 76 | assert.NoError(t, err) 77 | handler := NewStartedHandler(ds, b) 78 | assert.NotNil(t, handler) 79 | 80 | now := time.Now().UTC() 81 | 82 | j1 := &tork.Job{ 83 | ID: uuid.NewUUID(), 84 | State: tork.JobStateFailed, 85 | } 86 | err = ds.CreateJob(ctx, j1) 87 | assert.NoError(t, err) 88 | 89 | n1 := &tork.Node{ 90 | ID: uuid.NewUUID(), 91 | Queue: qname, 92 | } 93 | err = ds.CreateNode(ctx, n1) 94 | assert.NoError(t, err) 95 | 96 | t1 := &tork.Task{ 97 | ID: uuid.NewUUID(), 98 | State: tork.TaskStateScheduled, 99 | StartedAt: &now, 100 | JobID: j1.ID, 101 | NodeID: n1.ID, 102 | CreatedAt: &now, 103 | } 104 | 105 | err = ds.CreateTask(ctx, t1) 106 | assert.NoError(t, err) 107 | 108 | err = handler(ctx, task.StateChange, t1) 109 | assert.NoError(t, err) 110 | 111 | <-cancellations 112 | 113 | t2, err := ds.GetTaskByID(ctx, t1.ID) 114 | assert.NoError(t, err) 115 | assert.Equal(t, tork.TaskStateScheduled, t2.State) 116 | assert.Equal(t, t1.StartedAt.Unix(), t2.StartedAt.Unix()) 117 | assert.Equal(t, t1.NodeID, t2.NodeID) 118 | assert.NoError(t, ds.Close()) 119 | } 120 | -------------------------------------------------------------------------------- /internal/eval/eval.go: -------------------------------------------------------------------------------- 1 | package eval 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "regexp" 7 | 8 | "github.com/expr-lang/expr" 9 | "github.com/pkg/errors" 10 | "github.com/runabol/tork" 11 | ) 12 | 13 | var exprMatcher = regexp.MustCompile(`{{\s*(.+?)\s*}}`) 14 | 15 | func EvaluateTask(t *tork.Task, c map[string]any) error { 16 | // evaluate name 17 | name, err := EvaluateTemplate(t.Name, c) 18 | if err != nil { 19 | return err 20 | } 21 | t.Name = name 22 | // evaluate var 23 | var_, err := EvaluateTemplate(t.Var, c) 24 | if err != nil { 25 | return err 26 | } 27 | t.Var = var_ 28 | // evaluate image 29 | img, err := EvaluateTemplate(t.Image, c) 30 | if err != nil { 31 | return err 32 | } 33 | t.Image = img 34 | // evaluate queue 35 | q, err := EvaluateTemplate(t.Queue, c) 36 | if err != nil { 37 | return err 38 | } 39 | t.Queue = q 40 | // evaluate the env vars 41 | env := t.Env 42 | for k, v := range env { 43 | result, err := EvaluateTemplate(v, c) 44 | if err != nil { 45 | return err 46 | } 47 | env[k] = result 48 | } 49 | t.Env = env 50 | // evaluate if expr 51 | ifExpr, err := EvaluateTemplate(t.If, c) 52 | if err != nil { 53 | return err 54 | } 55 | t.If = ifExpr 56 | // evaluate pre-tasks 57 | pres := make([]*tork.Task, len(t.Pre)) 58 | for i, pre := range t.Pre { 59 | if err := EvaluateTask(pre, c); err != nil { 60 | return err 61 | } 62 | pres[i] = pre 63 | } 64 | t.Pre = pres 65 | // evaluate post-tasks 66 | posts := make([]*tork.Task, len(t.Post)) 67 | for i, post := range t.Post { 68 | if err := EvaluateTask(post, c); err != nil { 69 | return err 70 | } 71 | posts[i] = post 72 | } 73 | t.Post = posts 74 | // evaluate parallel tasks 75 | if t.Parallel != nil { 76 | parallel := make([]*tork.Task, len(t.Parallel.Tasks)) 77 | for i, par := range t.Parallel.Tasks { 78 | if err := EvaluateTask(par, c); err != nil { 79 | return err 80 | } 81 | parallel[i] = par 82 | } 83 | t.Parallel.Tasks = parallel 84 | } 85 | // evaluate cmd 86 | cmd := t.CMD 87 | for i, v := range cmd { 88 | result, err := EvaluateTemplate(v, c) 89 | if err != nil { 90 | return err 91 | } 92 | cmd[i] = result 93 | } 94 | // evaluate sub-job 95 | if t.SubJob != nil { 96 | name, err := EvaluateTemplate(t.SubJob.Name, c) 97 | if err != nil { 98 | return err 99 | } 100 | t.SubJob.Name = name 101 | if t.SubJob.Inputs == nil { 102 | t.SubJob.Inputs = make(map[string]string) 103 | } 104 | for k, v := range t.SubJob.Inputs { 105 | result, err := EvaluateTemplate(v, c) 106 | if err != nil { 107 | return err 108 | } 109 | t.SubJob.Inputs[k] = result 110 | } 111 | for k, v := range t.SubJob.Secrets { 112 | result, err := EvaluateTemplate(v, c) 113 | if err != nil { 114 | return err 115 | } 116 | t.SubJob.Secrets[k] = result 117 | } 118 | for _, wh := range t.SubJob.Webhooks { 119 | url, err := EvaluateTemplate(wh.URL, c) 120 | if err != nil { 121 | return err 122 | } 123 | wh.URL = url 124 | if wh.Headers == nil { 125 | wh.Headers = make(map[string]string) 126 | } 127 | for k, v := range wh.Headers { 128 | result, err := EvaluateTemplate(v, c) 129 | if err != nil { 130 | return err 131 | } 132 | wh.Headers[k] = result 133 | } 134 | } 135 | } 136 | return nil 137 | } 138 | 139 | func EvaluateTemplate(ex string, c map[string]any) (string, error) { 140 | if ex == "" { 141 | return "", nil 142 | } 143 | loc := 0 144 | var buf bytes.Buffer 145 | for _, match := range exprMatcher.FindAllStringSubmatchIndex(ex, -1) { 146 | startTag := match[0] 147 | endTag := match[1] 148 | startExpr := match[2] 149 | endExpr := match[3] 150 | buf.WriteString(ex[loc:startTag]) 151 | ev, err := EvaluateExpr(ex[startExpr:endExpr], c) 152 | if err != nil { 153 | return "", err 154 | } 155 | buf.WriteString(fmt.Sprintf("%v", ev)) 156 | loc = endTag 157 | } 158 | buf.WriteString(ex[loc:]) 159 | return buf.String(), nil 160 | } 161 | 162 | func ValidExpr(ex string) bool { 163 | ex = sanitizeExpr(ex) 164 | _, err := expr.Compile(ex) 165 | return err == nil 166 | } 167 | 168 | func sanitizeExpr(ex string) string { 169 | if matches := exprMatcher.FindStringSubmatch(ex); matches != nil { 170 | return matches[1] 171 | } 172 | return ex 173 | } 174 | 175 | func EvaluateExpr(ex string, c map[string]any) (any, error) { 176 | ex = sanitizeExpr(ex) 177 | env := map[string]any{ 178 | "randomInt": randomInt, 179 | "sequence": sequence, 180 | } 181 | for k, v := range c { 182 | env[k] = v 183 | } 184 | program, err := expr.Compile(ex, expr.Env(env)) 185 | if err != nil { 186 | return "", errors.Wrapf(err, "error compiling expression: %s", ex) 187 | } 188 | output, err := expr.Run(program, env) 189 | if err != nil { 190 | return "", errors.Wrapf(err, "error evaluating expression: %s", ex) 191 | } 192 | return output, nil 193 | } 194 | -------------------------------------------------------------------------------- /internal/eval/funcs.go: -------------------------------------------------------------------------------- 1 | package eval 2 | 3 | import ( 4 | "math/rand" 5 | "reflect" 6 | 7 | "github.com/pkg/errors" 8 | ) 9 | 10 | func randomInt(args ...any) (int, error) { 11 | if len(args) == 1 { 12 | if args[0] == nil { 13 | return 0, errors.Errorf("not expecting nil argument") 14 | } 15 | v := reflect.ValueOf(args[0]) 16 | if !v.CanInt() { 17 | return 0, errors.Errorf("invalid arg type %s", v.Type()) 18 | } 19 | return rand.Intn(int(v.Int())), nil 20 | } else if len(args) == 0 { 21 | return rand.Int(), nil 22 | } else { 23 | return 0, errors.Errorf("invalid number of arguments for trim (expected 0 or 1, got %d)", len(args)) 24 | } 25 | } 26 | 27 | func sequence(start, stop int) []int { 28 | if start > stop { 29 | return []int{} 30 | } 31 | result := make([]int, stop-start) 32 | for ix := range result { 33 | result[ix] = start 34 | start = start + 1 35 | } 36 | return result 37 | } 38 | -------------------------------------------------------------------------------- /internal/eval/funcs_test.go: -------------------------------------------------------------------------------- 1 | package eval 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestSequence(t *testing.T) { 10 | result := sequence(1, 3) 11 | assert.Equal(t, []int{1, 2}, result) 12 | 13 | result = sequence(-5, -2) 14 | assert.Equal(t, []int{-5, -4, -3}, result) 15 | 16 | result = sequence(5, 2) 17 | assert.Equal(t, []int{}, result) 18 | 19 | result = sequence(2, 2) 20 | assert.Equal(t, []int{}, result) 21 | } 22 | 23 | func TestRandomInt(t *testing.T) { 24 | for i := 0; i < 100; i++ { 25 | result, err := randomInt(5) 26 | assert.Less(t, result, 5) 27 | assert.NoError(t, err) 28 | 29 | result, err = randomInt(int64(5)) 30 | assert.Less(t, result, 5) 31 | assert.NoError(t, err) 32 | 33 | result, err = randomInt(int32(5)) 34 | assert.Less(t, result, 5) 35 | assert.NoError(t, err) 36 | 37 | _, err = randomInt(nil) 38 | assert.Error(t, err) 39 | 40 | _, err = randomInt("100") 41 | assert.Error(t, err) 42 | 43 | _, err = randomInt(1, 10) 44 | assert.Error(t, err) 45 | } 46 | ls := map[int]int{} 47 | for i := 0; i < 100; i++ { 48 | result, err := randomInt() 49 | ls[result] = result 50 | assert.NoError(t, err) 51 | } 52 | assert.Len(t, ls, 100) 53 | for i := 0; i < 100; i++ { 54 | result, err := randomInt(int32(5)) 55 | assert.Less(t, result, 5) 56 | assert.NoError(t, err) 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /internal/fns/fns.go: -------------------------------------------------------------------------------- 1 | package fns 2 | 3 | import "io" 4 | 5 | // CloseIgnore closes c, ignoring any error. 6 | // Its main use is to satisfy linters. 7 | func CloseIgnore(c io.Closer) { 8 | _ = c.Close() 9 | } 10 | -------------------------------------------------------------------------------- /internal/hash/hash.go: -------------------------------------------------------------------------------- 1 | package hash 2 | 3 | import ( 4 | "golang.org/x/crypto/bcrypt" 5 | ) 6 | 7 | func Password(password string) (string, error) { 8 | bytes, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost) 9 | return string(bytes), err 10 | } 11 | 12 | func CheckPasswordHash(password, hash string) bool { 13 | err := bcrypt.CompareHashAndPassword([]byte(hash), []byte(password)) 14 | return err == nil 15 | } 16 | -------------------------------------------------------------------------------- /internal/hash/hash_test.go: -------------------------------------------------------------------------------- 1 | package hash_test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/runabol/tork/internal/hash" 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func TestHashPassword(t *testing.T) { 11 | hashed, err := hash.Password("1234") 12 | assert.NoError(t, err) 13 | match := hash.CheckPasswordHash("1234", hashed) 14 | assert.True(t, match) 15 | } 16 | -------------------------------------------------------------------------------- /internal/host/host.go: -------------------------------------------------------------------------------- 1 | package host 2 | 3 | import ( 4 | "github.com/rs/zerolog/log" 5 | "github.com/shirou/gopsutil/v3/cpu" 6 | ) 7 | 8 | func GetCPUPercent() float64 { 9 | perc, err := cpu.Percent(0, false) 10 | if err != nil { 11 | log.Debug(). 12 | Err(err). 13 | Msgf("error getting CPU usage") 14 | return 0 15 | } 16 | return perc[0] 17 | } 18 | -------------------------------------------------------------------------------- /internal/host/host_test.go: -------------------------------------------------------------------------------- 1 | package host 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestGetStats(t *testing.T) { 10 | cpuPercent := GetCPUPercent() 11 | assert.GreaterOrEqual(t, cpuPercent, float64(0)) 12 | } 13 | -------------------------------------------------------------------------------- /internal/httpx/httpx.go: -------------------------------------------------------------------------------- 1 | package httpx 2 | 3 | import ( 4 | "net/http" 5 | "time" 6 | 7 | "github.com/pkg/errors" 8 | "github.com/runabol/tork/internal/netx" 9 | ) 10 | 11 | func StartAsync(s *http.Server) error { 12 | errChan := make(chan error) 13 | go func() { 14 | if err := s.ListenAndServe(); err != nil && err != http.ErrServerClosed { 15 | errChan <- err 16 | } 17 | }() 18 | for i := 0; i < 100; i++ { 19 | select { 20 | case err := <-errChan: 21 | return err 22 | case <-time.After(time.Millisecond * 100): 23 | } 24 | if netx.CanConnect(s.Addr) { 25 | return nil 26 | } 27 | } 28 | return errors.Errorf("unable to start API server") 29 | } 30 | -------------------------------------------------------------------------------- /internal/httpx/httpx_test.go: -------------------------------------------------------------------------------- 1 | package httpx_test 2 | 3 | import ( 4 | "context" 5 | "io" 6 | "net/http" 7 | "net/http/httptest" 8 | "testing" 9 | 10 | "github.com/runabol/tork/internal/httpx" 11 | "github.com/stretchr/testify/assert" 12 | ) 13 | 14 | func TestStartAsync(t *testing.T) { 15 | mux := http.NewServeMux() 16 | mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { 17 | _, err := io.WriteString(w, "OK") 18 | assert.NoError(t, err) 19 | }) 20 | s := &http.Server{ 21 | Addr: "localhost:7777", 22 | Handler: mux, 23 | } 24 | err := httpx.StartAsync(s) 25 | assert.NoError(t, err) 26 | defer func() { 27 | assert.NoError(t, s.Shutdown(context.Background())) 28 | }() 29 | 30 | req, err := http.NewRequest("GET", "/", nil) 31 | assert.NoError(t, err) 32 | w := httptest.NewRecorder() 33 | s.Handler.ServeHTTP(w, req) 34 | body, err := io.ReadAll(w.Body) 35 | assert.NoError(t, err) 36 | assert.Equal(t, "OK", string(body)) 37 | } 38 | -------------------------------------------------------------------------------- /internal/logging/logging.go: -------------------------------------------------------------------------------- 1 | package logging 2 | 3 | import ( 4 | "os" 5 | "strings" 6 | 7 | "github.com/pkg/errors" 8 | "github.com/rs/zerolog" 9 | "github.com/rs/zerolog/log" 10 | "github.com/runabol/tork/conf" 11 | ) 12 | 13 | func SetupLogging() error { 14 | zerolog.TimeFieldFormat = zerolog.TimeFormatUnix 15 | logLevel := strings.ToLower(conf.StringDefault("logging.level", "debug")) 16 | // setup log level 17 | switch logLevel { 18 | case "debug": 19 | zerolog.SetGlobalLevel(zerolog.DebugLevel) 20 | case "info": 21 | zerolog.SetGlobalLevel(zerolog.InfoLevel) 22 | case "warn", "warning": 23 | zerolog.SetGlobalLevel(zerolog.WarnLevel) 24 | case "error": 25 | zerolog.SetGlobalLevel(zerolog.ErrorLevel) 26 | default: 27 | return errors.Errorf("invalid logging level: %s", logLevel) 28 | } 29 | // setup log format (pretty / json) 30 | logFormat := strings.ToLower(conf.StringDefault("logging.format", "pretty")) 31 | switch logFormat { 32 | case "pretty": 33 | log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stderr, TimeFormat: "2006-01-02 15:04:05"}) 34 | case "json": 35 | log.Logger = zerolog.New(os.Stderr).With().Timestamp().Logger() 36 | default: 37 | return errors.Errorf("invalid logging format: %s", logFormat) 38 | } 39 | return nil 40 | } 41 | -------------------------------------------------------------------------------- /internal/logging/writer.go: -------------------------------------------------------------------------------- 1 | package logging 2 | 3 | import ( 4 | "github.com/rs/zerolog" 5 | "github.com/rs/zerolog/log" 6 | ) 7 | 8 | // ZerologWriter is a writer that adapts the io.Writer interface to the zerolog.Logger. 9 | type ZerologWriter struct { 10 | taskID string 11 | level zerolog.Level 12 | } 13 | 14 | func NewZerologWriter(taskID string, level zerolog.Level) *ZerologWriter { 15 | return &ZerologWriter{ 16 | taskID: taskID, 17 | level: level, 18 | } 19 | } 20 | 21 | func (zw *ZerologWriter) Write(p []byte) (n int, err error) { 22 | logLine := string(p[:]) 23 | log.WithLevel(zw.level).Str("task-id", zw.taskID).Msg(logLine) 24 | return len(p), nil 25 | } 26 | -------------------------------------------------------------------------------- /internal/netx/netx.go: -------------------------------------------------------------------------------- 1 | package netx 2 | 3 | import ( 4 | "net" 5 | "time" 6 | 7 | "github.com/rs/zerolog/log" 8 | ) 9 | 10 | func CanConnect(address string) bool { 11 | timeout := time.Second 12 | conn, err := net.DialTimeout("tcp", address, timeout) 13 | if err != nil { 14 | return false 15 | } 16 | if conn != nil { 17 | if err := conn.Close(); err != nil { 18 | log.Error(). 19 | Err(err). 20 | Msgf("error closing connection to %s", address) 21 | } 22 | return true 23 | } 24 | return false 25 | } 26 | -------------------------------------------------------------------------------- /internal/netx/netx_test.go: -------------------------------------------------------------------------------- 1 | package netx_test 2 | 3 | import ( 4 | "net" 5 | "testing" 6 | 7 | "github.com/runabol/tork/internal/netx" 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | func TestCanConnect(t *testing.T) { 12 | ln, err := net.Listen("tcp", "localhost:9999") 13 | assert.NoError(t, err) 14 | defer func() { 15 | assert.NoError(t, ln.Close()) 16 | }() 17 | assert.True(t, netx.CanConnect("localhost:9999")) 18 | assert.False(t, netx.CanConnect("localhost:8888")) 19 | } 20 | -------------------------------------------------------------------------------- /internal/redact/redact.go: -------------------------------------------------------------------------------- 1 | package redact 2 | 3 | import ( 4 | "context" 5 | "strings" 6 | 7 | "github.com/rs/zerolog/log" 8 | "github.com/runabol/tork" 9 | "github.com/runabol/tork/datastore" 10 | "github.com/runabol/tork/internal/wildcard" 11 | ) 12 | 13 | const ( 14 | redactedStr = "[REDACTED]" 15 | ) 16 | 17 | type Redacter struct { 18 | matchers []Matcher 19 | ds datastore.Datastore 20 | } 21 | 22 | func NewRedacter(ds datastore.Datastore, matchers ...Matcher) *Redacter { 23 | if len(matchers) == 0 { 24 | matchers = defaultMatchers 25 | } 26 | return &Redacter{ 27 | matchers: matchers, 28 | ds: ds, 29 | } 30 | } 31 | 32 | type Matcher func(string) bool 33 | 34 | var defaultMatchers = []Matcher{ 35 | Contains("SECRET"), 36 | Contains("PASSWORD"), 37 | Contains("ACCESS_KEY"), 38 | } 39 | 40 | func Contains(substr string) func(s string) bool { 41 | return func(s string) bool { 42 | return strings.Contains(strings.ToUpper(s), strings.ToUpper(substr)) 43 | } 44 | } 45 | 46 | func Wildcard(pattern string) func(s string) bool { 47 | return func(s string) bool { 48 | return wildcard.Match(pattern, s) 49 | } 50 | } 51 | 52 | func (r *Redacter) RedactTask(t *tork.Task) { 53 | job, err := r.ds.GetJobByID(context.Background(), t.JobID) 54 | if err != nil { 55 | log.Error().Err(err).Msgf("error getting job for task %s", t.ID) 56 | return 57 | } 58 | r.doRedactTask(t, job.Secrets) 59 | } 60 | 61 | func (r *Redacter) doRedactTask(t *tork.Task, secrets map[string]string) { 62 | redacted := t 63 | // redact env vars 64 | redacted.Env = r.redactVars(redacted.Env, secrets) 65 | // redact pre tasks 66 | for _, p := range redacted.Pre { 67 | r.doRedactTask(p, secrets) 68 | } 69 | // redact post tasks 70 | for _, p := range redacted.Post { 71 | r.doRedactTask(p, secrets) 72 | } 73 | // redact parallel tasks 74 | if redacted.Parallel != nil { 75 | for _, p := range redacted.Parallel.Tasks { 76 | r.doRedactTask(p, secrets) 77 | } 78 | } 79 | // registry creds 80 | if redacted.Registry != nil { 81 | redacted.Registry.Password = redactedStr 82 | } 83 | if redacted.SubJob != nil { 84 | for k := range redacted.SubJob.Secrets { 85 | redacted.SubJob.Secrets[k] = redactedStr 86 | } 87 | } 88 | } 89 | 90 | func (r *Redacter) RedactJob(j *tork.Job) { 91 | redacted := j 92 | // redact inputs 93 | redacted.Inputs = r.redactVars(redacted.Inputs, j.Secrets) 94 | // redact webhook headers 95 | for _, w := range j.Webhooks { 96 | if w.Headers != nil { 97 | w.Headers = r.redactVars(w.Headers, j.Secrets) 98 | } 99 | } 100 | // redact context 101 | redacted.Context.Inputs = r.redactVars(redacted.Context.Inputs, j.Secrets) 102 | redacted.Context.Secrets = r.redactVars(redacted.Context.Secrets, j.Secrets) 103 | redacted.Context.Tasks = r.redactVars(redacted.Context.Tasks, j.Secrets) 104 | // redact tasks 105 | for _, t := range redacted.Tasks { 106 | r.doRedactTask(t, j.Secrets) 107 | } 108 | // redact execution 109 | for _, t := range redacted.Execution { 110 | r.doRedactTask(t, j.Secrets) 111 | } 112 | for k := range j.Secrets { 113 | redacted.Secrets[k] = redactedStr 114 | } 115 | } 116 | 117 | func (r *Redacter) redactVars(m map[string]string, secrets map[string]string) map[string]string { 118 | redacted := make(map[string]string) 119 | for k, v := range m { 120 | for _, m := range r.matchers { 121 | if m(k) { 122 | v = redactedStr 123 | break 124 | } 125 | } 126 | for _, secret := range secrets { 127 | if secret == v { 128 | v = redactedStr 129 | } 130 | } 131 | redacted[k] = v 132 | } 133 | return redacted 134 | } 135 | -------------------------------------------------------------------------------- /internal/reexec/command_linux.go: -------------------------------------------------------------------------------- 1 | //go:build linux 2 | 3 | package reexec // import "github.com/docker/docker/pkg/reexec" 4 | 5 | import ( 6 | "os/exec" 7 | "syscall" 8 | 9 | "golang.org/x/sys/unix" 10 | ) 11 | 12 | // Self returns the path to the current process's binary. 13 | // Returns "/proc/self/exe". 14 | func Self() string { 15 | return "/proc/self/exe" 16 | } 17 | 18 | // Command returns *exec.Cmd which has Path as current binary. Also it setting 19 | // SysProcAttr.Pdeathsig to SIGTERM. 20 | // This will use the in-memory version (/proc/self/exe) of the current binary, 21 | // it is thus safe to delete or replace the on-disk binary (os.Args[0]). 22 | // 23 | // As SysProcAttr.Pdeathsig is set, the signal will be sent to the process when 24 | // the OS thread which created the process dies. It is the caller's 25 | // responsibility to ensure that the creating thread is not terminated 26 | // prematurely. See https://go.dev/issue/27505 for more details. 27 | func Command(args ...string) *exec.Cmd { 28 | return &exec.Cmd{ 29 | Path: Self(), 30 | Args: args, 31 | SysProcAttr: &syscall.SysProcAttr{ 32 | Pdeathsig: unix.SIGTERM, 33 | }, 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /internal/reexec/command_unix.go: -------------------------------------------------------------------------------- 1 | //go:build freebsd || darwin 2 | 3 | package reexec // import "github.com/docker/docker/pkg/reexec" 4 | 5 | import ( 6 | "os/exec" 7 | ) 8 | 9 | // Self returns the path to the current process's binary. 10 | // Uses os.Args[0]. 11 | func Self() string { 12 | return naiveSelf() 13 | } 14 | 15 | // Command returns *exec.Cmd which has Path as current binary. 16 | // For example if current binary is "docker" at "/usr/bin/", then cmd.Path will 17 | // be set to "/usr/bin/docker". 18 | func Command(args ...string) *exec.Cmd { 19 | return &exec.Cmd{ 20 | Path: Self(), 21 | Args: args, 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /internal/reexec/command_unsupported.go: -------------------------------------------------------------------------------- 1 | //go:build !linux && !freebsd && !darwin 2 | 3 | package reexec // import "github.com/docker/docker/pkg/reexec" 4 | 5 | import ( 6 | "os/exec" 7 | ) 8 | 9 | func Self() string { 10 | return "" 11 | } 12 | 13 | func Command(args ...string) *exec.Cmd { 14 | return nil 15 | } 16 | -------------------------------------------------------------------------------- /internal/reexec/reexec_test.go: -------------------------------------------------------------------------------- 1 | package reexec // import "github.com/docker/docker/pkg/reexec" 2 | 3 | import ( 4 | "os" 5 | "os/exec" 6 | "testing" 7 | 8 | "github.com/runabol/tork/internal/fns" 9 | "gotest.tools/v3/assert" 10 | ) 11 | 12 | func init() { 13 | Register("reexec", func() { 14 | panic("Return Error") 15 | }) 16 | Init() 17 | } 18 | 19 | func TestRegister(t *testing.T) { 20 | defer func() { 21 | if r := recover(); r != nil { 22 | assert.Equal(t, `reexec func already registered under name "reexec"`, r) 23 | } 24 | }() 25 | Register("reexec", func() {}) 26 | } 27 | 28 | func TestCommand(t *testing.T) { 29 | cmd := Command("reexec") 30 | w, err := cmd.StdinPipe() 31 | assert.NilError(t, err, "Error on pipe creation: %v", err) 32 | defer fns.CloseIgnore(w) 33 | 34 | err = cmd.Start() 35 | assert.NilError(t, err, "Error on re-exec cmd: %v", err) 36 | err = cmd.Wait() 37 | assert.Error(t, err, "exit status 2") 38 | } 39 | 40 | func TestNaiveSelf(t *testing.T) { 41 | if os.Getenv("TEST_CHECK") == "1" { 42 | os.Exit(2) 43 | } 44 | cmd := exec.Command(naiveSelf(), "-test.run=TestNaiveSelf") 45 | cmd.Env = append(os.Environ(), "TEST_CHECK=1") 46 | err := cmd.Start() 47 | assert.NilError(t, err, "Unable to start command") 48 | err = cmd.Wait() 49 | assert.Error(t, err, "exit status 2") 50 | 51 | os.Args[0] = "mkdir" 52 | assert.Check(t, naiveSelf() != os.Args[0]) 53 | } 54 | -------------------------------------------------------------------------------- /internal/reexec/rexec.go: -------------------------------------------------------------------------------- 1 | // Package reexec facilitates the busybox style reexec of the docker binary that 2 | // we require because of the forking limitations of using Go. Handlers can be 3 | // registered with a name and the argv 0 of the exec of the binary will be used 4 | // to find and execute custom init paths. 5 | package reexec // import "github.com/docker/docker/pkg/reexec" 6 | 7 | import ( 8 | "fmt" 9 | "os" 10 | "os/exec" 11 | "path/filepath" 12 | ) 13 | 14 | var registeredInitializers = make(map[string]func()) 15 | 16 | // Register adds an initialization func under the specified name 17 | func Register(name string, initializer func()) { 18 | if _, exists := registeredInitializers[name]; exists { 19 | panic(fmt.Sprintf("reexec func already registered under name %q", name)) 20 | } 21 | 22 | registeredInitializers[name] = initializer 23 | } 24 | 25 | // Init is called as the first part of the exec process and returns true if an 26 | // initialization function was called. 27 | func Init() bool { 28 | initializer, exists := registeredInitializers[os.Args[0]] 29 | if exists { 30 | initializer() 31 | 32 | return true 33 | } 34 | return false 35 | } 36 | 37 | func naiveSelf() string { 38 | name := os.Args[0] 39 | if filepath.Base(name) == name { 40 | if lp, err := exec.LookPath(name); err == nil { 41 | return lp 42 | } 43 | } 44 | // handle conversion of relative paths to absolute 45 | if absName, err := filepath.Abs(name); err == nil { 46 | return absName 47 | } 48 | // if we couldn't get absolute name, return original 49 | // (NOTE: Go only errors on Abs() if os.Getwd fails) 50 | return name 51 | } 52 | -------------------------------------------------------------------------------- /internal/slices/slices.go: -------------------------------------------------------------------------------- 1 | package slices 2 | 3 | func Intersect[T comparable](a []T, b []T) bool { 4 | elements := make(map[T]struct{}) 5 | 6 | for _, item := range a { 7 | elements[item] = struct{}{} 8 | } 9 | 10 | for _, item := range b { 11 | if _, found := elements[item]; found { 12 | return true 13 | } 14 | } 15 | 16 | return false 17 | } 18 | 19 | func Map[T any, U any](items []T, f func(T) U) []U { 20 | result := make([]U, len(items)) 21 | for i, v := range items { 22 | result[i] = f(v) 23 | } 24 | return result 25 | } 26 | -------------------------------------------------------------------------------- /internal/slices/slirces_test.go: -------------------------------------------------------------------------------- 1 | package slices 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/runabol/tork" 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func TestHasIntersection(t *testing.T) { 11 | tests := []struct { 12 | slice1 []int 13 | slice2 []int 14 | want bool 15 | }{ 16 | {slice1: []int{1, 2, 3, 4, 5}, slice2: []int{4, 5, 6, 7, 8}, want: true}, 17 | {slice1: []int{1, 2, 3, 4, 5}, slice2: []int{6, 7, 8, 9, 10}, want: false}, 18 | {slice1: []int{}, slice2: []int{1, 2, 3}, want: false}, 19 | {slice1: []int{1, 2, 3}, slice2: []int{}, want: false}, 20 | {slice1: []int{}, slice2: []int{}, want: false}, 21 | {slice1: []int{1, 2, 3}, slice2: []int{3, 4, 5}, want: true}, 22 | } 23 | 24 | for _, tt := range tests { 25 | got := Intersect(tt.slice1, tt.slice2) 26 | assert.Equal(t, tt.want, got) 27 | } 28 | } 29 | 30 | func TestMap(t *testing.T) { 31 | tests := []*tork.Task{{ 32 | Name: "a", 33 | }, { 34 | Name: "b", 35 | }, { 36 | Name: "c", 37 | }} 38 | 39 | got := Map(tests, func(tk *tork.Task) string { 40 | return tk.Name 41 | }) 42 | assert.Equal(t, []string{"a", "b", "c"}, got) 43 | } 44 | -------------------------------------------------------------------------------- /internal/syncx/map.go: -------------------------------------------------------------------------------- 1 | package syncx 2 | 3 | import "sync" 4 | 5 | type Map[K comparable, V any] struct { 6 | m sync.Map 7 | } 8 | 9 | func (m *Map[K, V]) Delete(key K) { 10 | m.m.Delete(key) 11 | } 12 | 13 | func (m *Map[K, V]) Get(key K) (value V, ok bool) { 14 | v, ok := m.m.Load(key) 15 | if !ok { 16 | return value, ok 17 | } 18 | return v.(V), ok 19 | } 20 | 21 | func (m *Map[K, V]) Set(key K, value V) { 22 | m.m.Store(key, value) 23 | } 24 | 25 | func (m *Map[K, V]) Iterate(f func(key K, value V)) { 26 | m.m.Range(func(key, value any) bool { 27 | f(key.(K), value.(V)) 28 | return true 29 | }) 30 | } 31 | -------------------------------------------------------------------------------- /internal/syncx/map_test.go: -------------------------------------------------------------------------------- 1 | package syncx_test 2 | 3 | import ( 4 | "math/rand" 5 | "sync" 6 | "testing" 7 | "time" 8 | 9 | "github.com/runabol/tork/internal/syncx" 10 | "github.com/stretchr/testify/assert" 11 | "golang.org/x/exp/slices" 12 | ) 13 | 14 | func TestGetNonExistent(t *testing.T) { 15 | m := syncx.Map[string, int]{} 16 | v, ok := m.Get("nothing") 17 | assert.False(t, ok) 18 | assert.Equal(t, 0, v) 19 | } 20 | 21 | func TestSetAndGet(t *testing.T) { 22 | m := syncx.Map[string, int]{} 23 | m.Set("somekey", 100) 24 | v, ok := m.Get("somekey") 25 | assert.True(t, ok) 26 | assert.Equal(t, 100, v) 27 | } 28 | 29 | func TestSetAndDelete(t *testing.T) { 30 | m := syncx.Map[string, int]{} 31 | m.Set("somekey", 100) 32 | v, ok := m.Get("somekey") 33 | assert.True(t, ok) 34 | assert.Equal(t, 100, v) 35 | m.Delete("somekey") 36 | v, ok = m.Get("somekey") 37 | assert.False(t, ok) 38 | assert.Equal(t, 0, v) 39 | } 40 | 41 | func TestConcurrentSetAndGet(t *testing.T) { 42 | m := syncx.Map[string, int]{} 43 | wg := sync.WaitGroup{} 44 | wg.Add(1000) 45 | for i := 1; i <= 1000; i++ { 46 | go func(ix int) { 47 | defer wg.Done() 48 | // introduce some arbitrary latency 49 | time.Sleep(time.Millisecond * time.Duration(rand.Intn(100)+1)) 50 | m.Set("somekey", ix) 51 | v, ok := m.Get("somekey") 52 | assert.True(t, ok) 53 | assert.Greater(t, v, 0) 54 | }(i) 55 | } 56 | wg.Wait() 57 | } 58 | 59 | func TestIterate(t *testing.T) { 60 | m := syncx.Map[string, int]{} 61 | m.Set("k1", 100) 62 | m.Set("k2", 200) 63 | vals := make([]int, 0) 64 | keys := make([]string, 0) 65 | m.Iterate(func(k string, v int) { 66 | vals = append(vals, v) 67 | keys = append(keys, k) 68 | }) 69 | slices.Sort(vals) 70 | slices.Sort(keys) 71 | assert.Equal(t, []int{100, 200}, vals) 72 | assert.Equal(t, []string{"k1", "k2"}, keys) 73 | } 74 | 75 | func BenchmarkSetAndGet(b *testing.B) { 76 | for i := 0; i < b.N; i++ { 77 | m := syncx.Map[string, int]{} 78 | m.Set("somekey", 100) 79 | v, ok := m.Get("somekey") 80 | assert.True(b, ok) 81 | assert.Equal(b, 100, v) 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /internal/uuid/uuid.go: -------------------------------------------------------------------------------- 1 | package uuid 2 | 3 | import ( 4 | "strings" 5 | 6 | guuid "github.com/google/uuid" 7 | "github.com/lithammer/shortuuid/v4" 8 | ) 9 | 10 | // NewUUID creates a new random UUID and returns it as a string or panics. 11 | func NewUUID() string { 12 | return strings.ReplaceAll(guuid.NewString(), "-", "") 13 | } 14 | 15 | // NewShortUUID returns a new UUIDv4, encoded with base57 16 | func NewShortUUID() string { 17 | return shortuuid.New() 18 | } 19 | -------------------------------------------------------------------------------- /internal/uuid/uuid_test.go: -------------------------------------------------------------------------------- 1 | package uuid_test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/runabol/tork/internal/uuid" 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func TestNewUUID(t *testing.T) { 11 | assert.Equal(t, 32, len(uuid.NewUUID())) 12 | } 13 | 14 | func TestNewShortUUID(t *testing.T) { 15 | ids := map[string]string{} 16 | for i := 0; i < 100; i++ { 17 | uid := uuid.NewShortUUID() 18 | assert.Len(t, uid, 22) 19 | ids[uid] = uid 20 | } 21 | assert.Len(t, ids, 100) 22 | } 23 | -------------------------------------------------------------------------------- /internal/webhook/webhook.go: -------------------------------------------------------------------------------- 1 | package webhook 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "net/http" 7 | "time" 8 | 9 | "github.com/pkg/errors" 10 | "github.com/rs/zerolog/log" 11 | "github.com/runabol/tork" 12 | "github.com/runabol/tork/internal/fns" 13 | ) 14 | 15 | const ( 16 | webhookDefaultMaxAttempts = 5 17 | webhookDefaultTimeout = time.Second * 5 18 | ) 19 | 20 | const ( 21 | EventJobStateChange = "job.StateChange" 22 | EventJobProgress = "job.Progress" 23 | EventTaskStateChange = "task.StateChange" 24 | EventTaskProgress = "task.Progress" 25 | EventDefault = "" 26 | ) 27 | 28 | var retryableStatusCodes = map[int]bool{ 29 | http.StatusTooManyRequests: true, // 429 30 | http.StatusInternalServerError: true, // 500 31 | http.StatusBadGateway: true, // 502 32 | http.StatusServiceUnavailable: true, // 503 33 | http.StatusGatewayTimeout: true, // 504 34 | } 35 | 36 | func isRetryable(statusCode int) bool { 37 | return retryableStatusCodes[statusCode] 38 | } 39 | 40 | func Call(wh *tork.Webhook, body any) error { 41 | b, err := json.Marshal(body) 42 | if err != nil { 43 | return errors.Wrapf(err, "[Webhook] error serializing body") 44 | } 45 | attempts := 1 46 | client := http.Client{ 47 | Timeout: webhookDefaultTimeout, 48 | } 49 | for attempts <= webhookDefaultMaxAttempts { 50 | req, err := http.NewRequest("POST", wh.URL, bytes.NewReader(b)) 51 | req.Header.Set("Content-Type", "application/json; charset=UTF-8") 52 | if err != nil { 53 | return err 54 | } 55 | if wh.Headers != nil { 56 | for name, val := range wh.Headers { 57 | req.Header.Set(name, val) 58 | } 59 | } 60 | resp, err := client.Do(req) 61 | if err != nil { 62 | log.Info().Msgf("[Webhook] request to %s failed with error: %v", wh.URL, err) 63 | time.Sleep(time.Second * time.Duration(attempts*2)) 64 | attempts++ 65 | continue 66 | } 67 | defer fns.CloseIgnore(resp.Body) 68 | // Success (2xx) 69 | if resp.StatusCode >= 200 && resp.StatusCode < 300 { 70 | return nil 71 | } 72 | // Check if the status code is retryable 73 | if !isRetryable(resp.StatusCode) { 74 | return errors.Errorf("[Webhook] request to %s failed with non-retryable status %d", wh.URL, resp.StatusCode) 75 | } 76 | log.Info().Msgf("[Webhook] request to %s failed with %d", wh.URL, resp.StatusCode) 77 | // sleep a little before retrying 78 | time.Sleep(time.Second * time.Duration(attempts*2)) 79 | attempts = attempts + 1 80 | } 81 | return errors.Errorf("[Webhook] failed to call webhook %s. max attempts: %d)", wh.URL, webhookDefaultMaxAttempts) 82 | } 83 | -------------------------------------------------------------------------------- /internal/webhook/webhook_test.go: -------------------------------------------------------------------------------- 1 | package webhook 2 | 3 | import ( 4 | "net/http" 5 | "net/http/httptest" 6 | "testing" 7 | 8 | "github.com/runabol/tork" 9 | "github.com/stretchr/testify/assert" 10 | ) 11 | 12 | func TestCall(t *testing.T) { 13 | // Test Cases 14 | tests := []struct { 15 | name string 16 | responseCodes []int // Sequence of response codes to return 17 | numRequests int // Number of requests expected 18 | expectedError bool // Should the function return an error? 19 | }{ 20 | { 21 | name: "Successful Response", 22 | responseCodes: []int{http.StatusOK}, 23 | numRequests: 1, 24 | expectedError: false, 25 | }, 26 | { 27 | name: "Successful Response", 28 | responseCodes: []int{http.StatusNoContent}, 29 | numRequests: 1, 30 | expectedError: false, 31 | }, 32 | { 33 | name: "Retryable Response - 500 Internal Server Error", 34 | responseCodes: []int{http.StatusInternalServerError, http.StatusInternalServerError, http.StatusOK}, 35 | numRequests: 3, 36 | expectedError: false, 37 | }, 38 | { 39 | name: "Non-Retryable Response - 400 Bad Request", 40 | responseCodes: []int{http.StatusBadRequest}, 41 | numRequests: 1, 42 | expectedError: true, 43 | }, 44 | } 45 | 46 | for _, tt := range tests { 47 | t.Run(tt.name, func(t *testing.T) { 48 | // Create a test server that returns responses in sequence 49 | requestCount := 0 50 | testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 51 | if requestCount < len(tt.responseCodes) { 52 | w.WriteHeader(tt.responseCodes[requestCount]) 53 | requestCount++ 54 | } 55 | })) 56 | defer testServer.Close() 57 | 58 | // Prepare the Webhook configuration 59 | wh := &tork.Webhook{ 60 | URL: testServer.URL, 61 | } 62 | body := map[string]string{"key": "value"} 63 | 64 | // Call the function 65 | err := Call(wh, body) 66 | 67 | // Check retries and errors 68 | assert.Equal(t, tt.numRequests, requestCount, "Number of requests sent does not match expected") 69 | if tt.expectedError { 70 | assert.Error(t, err, "Expected an error but got nil") 71 | } else { 72 | assert.NoError(t, err, "Did not expect an error but got one") 73 | } 74 | }) 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /internal/wildcard/wildcard.go: -------------------------------------------------------------------------------- 1 | package wildcard 2 | 3 | // credit: https://github.com/vodkaslime/wildcard 4 | 5 | const C = '*' 6 | 7 | func isWildPattern(pattern string) bool { 8 | for i := range pattern { 9 | c := pattern[i] 10 | if c == C { 11 | return true 12 | } 13 | } 14 | 15 | return false 16 | } 17 | 18 | func Match(pattern string, s string) bool { 19 | // Edge cases. 20 | if pattern == string(C) { 21 | return true 22 | } 23 | 24 | if pattern == "" { 25 | return s == "" 26 | } 27 | 28 | // If pattern does not contain wildcard chars, just compare the strings 29 | // to avoid extra memory allocation. 30 | if !isWildPattern(pattern) { 31 | return pattern == s 32 | } 33 | 34 | // Initialize DP. 35 | lp := len(pattern) 36 | ls := len(s) 37 | dp := make([][]bool, lp+1) 38 | for i := 0; i < lp+1; i++ { 39 | dp[i] = make([]bool, ls+1) 40 | } 41 | 42 | dp[0][0] = true 43 | 44 | for i := 0; i < lp; i++ { 45 | if pattern[i] == C { 46 | dp[i+1][0] = dp[i][0] 47 | } else { 48 | dp[i+1][0] = false 49 | } 50 | } 51 | 52 | for j := 0; j < ls; j++ { 53 | dp[0][j+1] = false 54 | } 55 | 56 | // Start DP. 57 | for i := 0; i < lp; i++ { 58 | for j := 0; j < ls; j++ { 59 | pc := pattern[i] 60 | sc := s[j] 61 | switch pattern[i] { 62 | case C: 63 | dp[i+1][j+1] = dp[i][j] || dp[i][j+1] || dp[i+1][j] 64 | default: 65 | if pc == sc { 66 | dp[i+1][j+1] = dp[i][j] 67 | } else { 68 | dp[i+1][j+1] = false 69 | } 70 | } 71 | } 72 | } 73 | 74 | return dp[lp][ls] 75 | } 76 | -------------------------------------------------------------------------------- /internal/wildcard/wildcard_test.go: -------------------------------------------------------------------------------- 1 | package wildcard 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | type wildPatternTestCase struct { 10 | p string 11 | m bool 12 | } 13 | 14 | type matchTestCase struct { 15 | p string 16 | s string 17 | m bool 18 | } 19 | 20 | func TestIsWildPattern(t *testing.T) { 21 | testCases1 := []wildPatternTestCase{ 22 | {"*", true}, 23 | {"**", true}, 24 | {".", false}, 25 | {"a", false}, 26 | } 27 | 28 | for _, tc := range testCases1 { 29 | b := isWildPattern(tc.p) 30 | if !assert.Equal(t, b, tc.m) { 31 | println(tc.p, tc.m) 32 | } 33 | } 34 | 35 | } 36 | 37 | func TestMatch(t *testing.T) { 38 | 39 | testCases1 := []matchTestCase{ 40 | {"", "", true}, 41 | {"*", "", true}, 42 | {"", "a", false}, 43 | {"abc", "abc", true}, 44 | {"abc", "ac", false}, 45 | {"abc", "abd", false}, 46 | {"a*c", "abc", true}, 47 | {"a*c", "abcbc", true}, 48 | {"a*c", "abcbd", false}, 49 | {"a*b*c", "ajkembbcldkcedc", true}, 50 | } 51 | 52 | for _, tc := range testCases1 { 53 | m := Match(tc.p, tc.s) 54 | if !assert.Equal(t, m, tc.m) { 55 | println(tc.p, tc.s, tc.m) 56 | } 57 | 58 | } 59 | 60 | } 61 | 62 | func TestMatch2(t *testing.T) { 63 | testCases1 := []matchTestCase{ 64 | {"jobs.*", "jobs.completed", true}, 65 | {"jobs.*", "jobs.long.completed", true}, 66 | {"tasks.*", "jobs.completed", false}, 67 | {"*.completed", "jobs.completed", true}, 68 | {"*.completed.thing", "jobs.completed", false}, 69 | } 70 | for _, tc := range testCases1 { 71 | m := Match(tc.p, tc.s) 72 | assert.Equal(t, m, tc.m) 73 | } 74 | } 75 | -------------------------------------------------------------------------------- /internal/worker/api.go: -------------------------------------------------------------------------------- 1 | package worker 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "syscall" 7 | 8 | "net/http" 9 | 10 | "github.com/labstack/echo/v4" 11 | "github.com/pkg/errors" 12 | "github.com/rs/zerolog/log" 13 | "github.com/runabol/tork/broker" 14 | "github.com/runabol/tork/health" 15 | "github.com/runabol/tork/internal/httpx" 16 | "github.com/runabol/tork/internal/syncx" 17 | "github.com/runabol/tork/runtime" 18 | ) 19 | 20 | const ( 21 | MIN_PORT = 8001 22 | MAX_PORT = 8100 23 | ) 24 | 25 | type api struct { 26 | server *http.Server 27 | broker broker.Broker 28 | runtime runtime.Runtime 29 | tasks *syncx.Map[string, runningTask] 30 | port int 31 | } 32 | 33 | func newAPI(cfg Config, tasks *syncx.Map[string, runningTask]) *api { 34 | r := echo.New() 35 | s := &api{ 36 | runtime: cfg.Runtime, 37 | broker: cfg.Broker, 38 | tasks: tasks, 39 | server: &http.Server{ 40 | Addr: cfg.Address, 41 | Handler: r, 42 | }, 43 | } 44 | r.GET("/health", s.health) 45 | return s 46 | } 47 | 48 | func (s *api) health(c echo.Context) error { 49 | result := health.NewHealthCheck(). 50 | WithIndicator(health.ServiceRuntime, s.runtime.HealthCheck). 51 | WithIndicator(health.ServiceBroker, s.broker.HealthCheck). 52 | Do(c.Request().Context()) 53 | if result.Status == health.StatusDown { 54 | return c.JSON(http.StatusServiceUnavailable, result) 55 | } else { 56 | return c.JSON(http.StatusOK, result) 57 | } 58 | } 59 | 60 | func (s *api) start() error { 61 | if s.server.Addr != "" { 62 | if err := httpx.StartAsync(s.server); err != nil { 63 | return err 64 | } 65 | } else { 66 | // attempting to dynamically assign port 67 | for port := MIN_PORT; port < MAX_PORT; port++ { 68 | s.server.Addr = fmt.Sprintf(":%d", port) 69 | s.port = port 70 | if err := httpx.StartAsync(s.server); err != nil { 71 | if errors.Is(err, syscall.EADDRINUSE) { 72 | continue 73 | } 74 | log.Fatal().Err(err).Msgf("error starting up server") 75 | } 76 | break 77 | } 78 | } 79 | log.Info().Msgf("Worker listening on http://%s", s.server.Addr) 80 | return nil 81 | } 82 | 83 | func (s *api) shutdown(ctx context.Context) error { 84 | return s.server.Shutdown(ctx) 85 | } 86 | -------------------------------------------------------------------------------- /internal/worker/api_test.go: -------------------------------------------------------------------------------- 1 | package worker 2 | 3 | import ( 4 | "io" 5 | "net/http" 6 | "net/http/httptest" 7 | "testing" 8 | 9 | "github.com/runabol/tork/broker" 10 | "github.com/runabol/tork/internal/syncx" 11 | "github.com/runabol/tork/runtime/docker" 12 | "github.com/stretchr/testify/assert" 13 | ) 14 | 15 | func Test_health(t *testing.T) { 16 | rt, err := docker.NewDockerRuntime() 17 | assert.NoError(t, err) 18 | api := newAPI(Config{ 19 | Broker: broker.NewInMemoryBroker(), 20 | Runtime: rt, 21 | }, &syncx.Map[string, runningTask]{}) 22 | assert.NotNil(t, api) 23 | req, err := http.NewRequest("GET", "/health", nil) 24 | assert.NoError(t, err) 25 | w := httptest.NewRecorder() 26 | api.server.Handler.ServeHTTP(w, req) 27 | body, err := io.ReadAll(w.Body) 28 | 29 | assert.NoError(t, err) 30 | assert.Contains(t, string(body), "\"status\":\"UP\"") 31 | assert.Equal(t, http.StatusOK, w.Code) 32 | } 33 | -------------------------------------------------------------------------------- /job_test.go: -------------------------------------------------------------------------------- 1 | package tork_test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/runabol/tork" 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func TestClone(t *testing.T) { 11 | j1 := &tork.Job{ 12 | Context: tork.JobContext{ 13 | Inputs: map[string]string{ 14 | "INPUT1": "VAL1", 15 | }, 16 | Job: map[string]string{ 17 | "id": "some-id", 18 | "name": "my job", 19 | }, 20 | }, 21 | Tasks: []*tork.Task{ 22 | { 23 | Env: map[string]string{ 24 | "VAR1": "VAL1", 25 | }, 26 | }, 27 | }, 28 | Execution: []*tork.Task{ 29 | { 30 | Env: map[string]string{ 31 | "EVAR1": "EVAL1", 32 | }, 33 | }, 34 | }, 35 | } 36 | 37 | j2 := j1.Clone() 38 | 39 | assert.Equal(t, j1.Context.Inputs, j2.Context.Inputs) 40 | assert.Equal(t, j1.Context.Job, j2.Context.Job) 41 | assert.Equal(t, j1.Tasks[0].Env, j2.Tasks[0].Env) 42 | assert.Equal(t, j1.Execution[0].Env, j2.Execution[0].Env) 43 | 44 | j2.Context.Inputs["INPUT2"] = "VAL2" 45 | j2.Tasks[0].Env["VAR2"] = "VAL2" 46 | j2.Execution[0].Env["EVAR2"] = "VAL2" 47 | assert.NotEqual(t, j1.Context.Inputs, j2.Context.Inputs) 48 | assert.NotEqual(t, j1.Tasks[0].Env, j2.Tasks[0].Env) 49 | assert.NotEqual(t, j1.Execution[0].Env, j2.Execution[0].Env) 50 | } 51 | -------------------------------------------------------------------------------- /locker/inmemory.go: -------------------------------------------------------------------------------- 1 | package locker 2 | 3 | import ( 4 | "context" 5 | "sync" 6 | 7 | "github.com/pkg/errors" 8 | ) 9 | 10 | type InMemoryLocker struct { 11 | mu sync.Mutex 12 | locks map[string]struct{} 13 | } 14 | 15 | type inmemLock struct { 16 | key string 17 | locker *InMemoryLocker 18 | } 19 | 20 | func (l *inmemLock) ReleaseLock(_ context.Context) error { 21 | return l.locker.releaseLock(l.key) 22 | } 23 | 24 | func NewInMemoryLocker() *InMemoryLocker { 25 | return &InMemoryLocker{ 26 | locks: make(map[string]struct{}), 27 | } 28 | } 29 | 30 | func (m *InMemoryLocker) AcquireLock(ctx context.Context, key string) (Lock, error) { 31 | m.mu.Lock() 32 | defer m.mu.Unlock() 33 | if _, exists := m.locks[key]; exists { 34 | return nil, errors.Errorf("failed to acquire lock for key '%s'", key) 35 | } 36 | m.locks[key] = struct{}{} 37 | return &inmemLock{key: key, locker: m}, nil 38 | } 39 | 40 | func (m *InMemoryLocker) releaseLock(key string) error { 41 | m.mu.Lock() 42 | defer m.mu.Unlock() 43 | if _, exists := m.locks[key]; !exists { 44 | return errors.Errorf("failed to release lock for key '%s'", key) 45 | } 46 | delete(m.locks, key) 47 | return nil 48 | } 49 | -------------------------------------------------------------------------------- /locker/inmemory_test.go: -------------------------------------------------------------------------------- 1 | package locker 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func TestInmemoryLocker_AcquireLock(t *testing.T) { 11 | locker := NewInMemoryLocker() 12 | 13 | ctx := context.Background() 14 | key := "test_key" 15 | 16 | lock, err := locker.AcquireLock(ctx, key) 17 | assert.NoError(t, err, "lock acquisition should succeed") 18 | assert.NotNil(t, lock) 19 | 20 | lock2, err := locker.AcquireLock(ctx, key) 21 | assert.Error(t, err, "lock acquisition should not succeed") 22 | assert.Nil(t, lock2) 23 | 24 | assert.NoError(t, lock.ReleaseLock(ctx)) 25 | 26 | lock3, err := locker.AcquireLock(ctx, key) 27 | assert.NoError(t, err, "lock acquisition should succeed") 28 | assert.NoError(t, lock3.ReleaseLock(ctx)) 29 | } 30 | -------------------------------------------------------------------------------- /locker/locker.go: -------------------------------------------------------------------------------- 1 | package locker 2 | 3 | import ( 4 | "context" 5 | ) 6 | 7 | const ( 8 | LOCKER_INMEMORY = "inmemory" 9 | LOCKER_POSTGRES = "postgres" 10 | ) 11 | 12 | type Lock interface { 13 | ReleaseLock(ctx context.Context) error 14 | } 15 | 16 | type Locker interface { 17 | AcquireLock(ctx context.Context, key string) (Lock, error) 18 | } 19 | -------------------------------------------------------------------------------- /locker/postgres.go: -------------------------------------------------------------------------------- 1 | package locker 2 | 3 | import ( 4 | "context" 5 | "crypto/sha256" 6 | "database/sql" 7 | "encoding/binary" 8 | 9 | "github.com/jmoiron/sqlx" 10 | _ "github.com/lib/pq" 11 | "github.com/pkg/errors" 12 | ) 13 | 14 | type PostgresLocker struct { 15 | db *sqlx.DB 16 | } 17 | 18 | type postgresLock struct { 19 | tx *sqlx.Tx 20 | } 21 | 22 | func (l *postgresLock) ReleaseLock(ctx context.Context) error { 23 | return l.tx.Rollback() 24 | } 25 | 26 | func NewPostgresLocker(dsn string) (*PostgresLocker, error) { 27 | db, err := sqlx.Connect("postgres", dsn) 28 | if err != nil { 29 | return nil, errors.Wrapf(err, "unable to connect to postgres") 30 | } 31 | if db == nil { 32 | return nil, errors.New("database connection cannot be nil") 33 | } 34 | if err := db.Ping(); err != nil { 35 | return nil, errors.Wrapf(err, "failed to ping database") 36 | } 37 | return &PostgresLocker{db: db}, nil 38 | } 39 | 40 | func (p *PostgresLocker) AcquireLock(ctx context.Context, key string) (Lock, error) { 41 | keyHash := hashKey(key) 42 | tx, err := p.db.BeginTxx(ctx, &sql.TxOptions{}) 43 | if err != nil { 44 | return nil, err 45 | } 46 | var lockAttempt bool 47 | if err := tx.GetContext(ctx, &lockAttempt, "SELECT pg_try_advisory_xact_lock($1)", keyHash); err != nil { 48 | return nil, errors.Wrapf(err, "failed to acquire lock for key '%s'", key) 49 | } 50 | if !lockAttempt { 51 | return nil, errors.Errorf("failed to acquire lock for key '%s'", key) 52 | } 53 | return &postgresLock{tx: tx}, nil 54 | } 55 | 56 | func hashKey(key string) int64 { 57 | hash := sha256.Sum256([]byte(key)) // Compute SHA-256 hash 58 | unsigned := binary.BigEndian.Uint64(hash[:8]) // Take the first 8 bytes 59 | return int64(unsigned) 60 | } 61 | -------------------------------------------------------------------------------- /locker/postgres_test.go: -------------------------------------------------------------------------------- 1 | package locker 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func TestNewPostgresLocker(t *testing.T) { 11 | dsn := "host=localhost user=tork password=tork dbname=tork port=5432 sslmode=disable" 12 | locker, err := NewPostgresLocker(dsn) 13 | assert.NoError(t, err, "locker initialization should succeed") 14 | assert.NotNil(t, locker, "locker should not be nil") 15 | } 16 | 17 | func TestPostgresLocker_AcquireLock(t *testing.T) { 18 | dsn := "host=localhost user=tork password=tork dbname=tork port=5432 sslmode=disable" 19 | locker, err := NewPostgresLocker(dsn) 20 | assert.NoError(t, err, "locker initialization should succeed") 21 | assert.NotNil(t, locker, "locker should not be nil") 22 | 23 | ctx := context.Background() 24 | key := "test_key" 25 | 26 | lock, err := locker.AcquireLock(ctx, key) 27 | assert.NoError(t, err, "lock acquisition should succeed") 28 | assert.NotNil(t, lock) 29 | 30 | lock2, err := locker.AcquireLock(ctx, key) 31 | assert.Error(t, err, "lock acquisition should not succeed") 32 | assert.Nil(t, lock2) 33 | 34 | assert.NoError(t, lock.ReleaseLock(ctx)) 35 | 36 | lock3, err := locker.AcquireLock(ctx, key) 37 | assert.NoError(t, err, "lock acquisition should succeed") 38 | assert.NoError(t, lock3.ReleaseLock(ctx)) 39 | } 40 | 41 | func Test_hashKey(t *testing.T) { 42 | i := hashKey("2c7eb7e1951343468ce360c906003a22") 43 | assert.Equal(t, int64(-414568140838410356), i) 44 | } 45 | -------------------------------------------------------------------------------- /middleware/job/job.go: -------------------------------------------------------------------------------- 1 | package job 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/runabol/tork" 7 | ) 8 | 9 | type EventType string 10 | 11 | const ( 12 | // StateChange occurs when a job's state changes. 13 | // Handler can inspect the job's State property 14 | // in order to determine what state the job is at. 15 | StateChange = "STATE_CHANGE" 16 | // Progress occurs when a job's progress changes. 17 | Progress = "PROGRESS" 18 | // Read occurs when a Job is read by the client 19 | // through the API. 20 | Read = "READ" 21 | ) 22 | 23 | type HandlerFunc func(ctx context.Context, et EventType, j *tork.Job) error 24 | 25 | func NoOpHandlerFunc(ctx context.Context, et EventType, j *tork.Job) error { return nil } 26 | 27 | type MiddlewareFunc func(next HandlerFunc) HandlerFunc 28 | 29 | func ApplyMiddleware(h HandlerFunc, mws []MiddlewareFunc) HandlerFunc { 30 | return func(ctx context.Context, et EventType, t *tork.Job) error { 31 | nx := next(ctx, 0, mws, h) 32 | return nx(ctx, et, t) 33 | } 34 | } 35 | 36 | func next(ctx context.Context, index int, mws []MiddlewareFunc, h HandlerFunc) HandlerFunc { 37 | if index >= len(mws) { 38 | return h 39 | } 40 | return mws[index](next(ctx, index+1, mws, h)) 41 | } 42 | -------------------------------------------------------------------------------- /middleware/job/job_test.go: -------------------------------------------------------------------------------- 1 | package job 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "testing" 7 | 8 | "github.com/runabol/tork" 9 | 10 | "github.com/stretchr/testify/assert" 11 | ) 12 | 13 | func TestMiddlewareBefore(t *testing.T) { 14 | order := 1 15 | h := func(ctx context.Context, _ EventType, j *tork.Job) error { 16 | assert.Equal(t, 3, order) 17 | return nil 18 | } 19 | mw1 := func(next HandlerFunc) HandlerFunc { 20 | return func(ctx context.Context, et EventType, j *tork.Job) error { 21 | assert.Equal(t, 1, order) 22 | order = order + 1 23 | return next(ctx, et, j) 24 | } 25 | } 26 | mw2 := func(next HandlerFunc) HandlerFunc { 27 | return func(ctx context.Context, et EventType, j *tork.Job) error { 28 | assert.Equal(t, 2, order) 29 | order = order + 1 30 | return next(ctx, et, j) 31 | } 32 | } 33 | hm := ApplyMiddleware(h, []MiddlewareFunc{mw1, mw2}) 34 | assert.NoError(t, hm(context.Background(), StateChange, &tork.Job{})) 35 | } 36 | 37 | func TestMiddlewareAfter(t *testing.T) { 38 | order := 1 39 | h := func(ctx context.Context, _ EventType, j *tork.Job) error { 40 | assert.Equal(t, 1, order) 41 | order = order + 1 42 | return nil 43 | } 44 | mw1 := func(next HandlerFunc) HandlerFunc { 45 | return func(ctx context.Context, et EventType, j *tork.Job) error { 46 | assert.NoError(t, next(ctx, et, j)) 47 | assert.Equal(t, 3, order) 48 | order = order + 1 49 | return nil 50 | } 51 | } 52 | mw2 := func(next HandlerFunc) HandlerFunc { 53 | return func(ctx context.Context, et EventType, j *tork.Job) error { 54 | assert.NoError(t, next(ctx, et, j)) 55 | assert.Equal(t, 2, order) 56 | order = order + 1 57 | return nil 58 | } 59 | } 60 | hm := ApplyMiddleware(h, []MiddlewareFunc{mw1, mw2}) 61 | assert.NoError(t, hm(context.Background(), StateChange, &tork.Job{})) 62 | } 63 | 64 | func TestNoMiddleware(t *testing.T) { 65 | order := 1 66 | h := func(ctx context.Context, _ EventType, j *tork.Job) error { 67 | assert.Equal(t, 1, order) 68 | order = order + 1 69 | return nil 70 | } 71 | hm := ApplyMiddleware(h, []MiddlewareFunc{}) 72 | assert.NoError(t, hm(context.Background(), StateChange, &tork.Job{})) 73 | } 74 | 75 | func TestMiddlewareError(t *testing.T) { 76 | Err := errors.New("something bad happened") 77 | h := func(ctx context.Context, _ EventType, j *tork.Job) error { 78 | panic(1) // should not get here 79 | } 80 | mw1 := func(next HandlerFunc) HandlerFunc { 81 | return func(ctx context.Context, _ EventType, j *tork.Job) error { 82 | return Err 83 | } 84 | } 85 | mw2 := func(next HandlerFunc) HandlerFunc { 86 | return func(ctx context.Context, _ EventType, j *tork.Job) error { 87 | panic(1) // should not get here 88 | } 89 | } 90 | hm := ApplyMiddleware(h, []MiddlewareFunc{mw1, mw2}) 91 | assert.ErrorIs(t, hm(context.Background(), StateChange, &tork.Job{}), Err) 92 | } 93 | -------------------------------------------------------------------------------- /middleware/job/redact.go: -------------------------------------------------------------------------------- 1 | package job 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/runabol/tork" 7 | "github.com/runabol/tork/internal/redact" 8 | ) 9 | 10 | func Redact(redacter *redact.Redacter) MiddlewareFunc { 11 | return func(next HandlerFunc) HandlerFunc { 12 | return func(ctx context.Context, et EventType, j *tork.Job) error { 13 | if et == Read { 14 | redacter.RedactJob(j) 15 | } 16 | return next(ctx, et, j) 17 | } 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /middleware/job/redact_test.go: -------------------------------------------------------------------------------- 1 | package job 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/runabol/tork" 8 | "github.com/runabol/tork/datastore/postgres" 9 | "github.com/runabol/tork/internal/redact" 10 | "github.com/stretchr/testify/assert" 11 | ) 12 | 13 | func TestRedactOnRead(t *testing.T) { 14 | ds, err := postgres.NewTestDatastore() 15 | assert.NoError(t, err) 16 | hm := ApplyMiddleware(NoOpHandlerFunc, []MiddlewareFunc{Redact(redact.NewRedacter(ds))}) 17 | j := &tork.Job{ 18 | Inputs: map[string]string{ 19 | "secret": "1234", 20 | }, 21 | } 22 | assert.NoError(t, hm(context.Background(), Read, j)) 23 | assert.Equal(t, "[REDACTED]", j.Inputs["secret"]) 24 | assert.NoError(t, ds.Close()) 25 | } 26 | 27 | func TestNoRedact(t *testing.T) { 28 | ds, err := postgres.NewTestDatastore() 29 | assert.NoError(t, err) 30 | hm := ApplyMiddleware(NoOpHandlerFunc, []MiddlewareFunc{Redact(redact.NewRedacter(ds))}) 31 | j := &tork.Job{ 32 | Inputs: map[string]string{ 33 | "secret": "1234", 34 | }, 35 | } 36 | assert.NoError(t, hm(context.Background(), StateChange, j)) 37 | assert.Equal(t, "1234", j.Inputs["secret"]) 38 | assert.NoError(t, ds.Close()) 39 | } 40 | -------------------------------------------------------------------------------- /middleware/job/webhook.go: -------------------------------------------------------------------------------- 1 | package job 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/rs/zerolog/log" 7 | "github.com/runabol/tork" 8 | "github.com/runabol/tork/internal/eval" 9 | "github.com/runabol/tork/internal/webhook" 10 | ) 11 | 12 | func Webhook(next HandlerFunc) HandlerFunc { 13 | return func(ctx context.Context, et EventType, j *tork.Job) error { 14 | if err := next(ctx, et, j); err != nil { 15 | return err 16 | } 17 | if et != StateChange && et != Progress { 18 | return nil 19 | } 20 | if len(j.Webhooks) == 0 { 21 | return nil 22 | } 23 | for _, wh := range j.Webhooks { 24 | if wh.Event != webhook.EventJobStateChange && wh.Event != webhook.EventDefault && wh.Event != webhook.EventJobProgress { 25 | continue 26 | } 27 | if et == StateChange && wh.Event != webhook.EventJobStateChange && wh.Event != webhook.EventDefault { 28 | continue 29 | } 30 | if et == Progress && wh.Event != webhook.EventJobProgress { 31 | continue 32 | } 33 | if wh.If != "" { 34 | val, err := eval.EvaluateExpr(wh.If, map[string]any{ 35 | "job": tork.NewJobSummary(j), 36 | }) 37 | if err != nil { 38 | log.Error().Err(err).Msgf("[Webhook] error evaluating if expression %s", wh.If) 39 | continue 40 | } 41 | ifResult, ok := val.(bool) 42 | if !ok { 43 | log.Error().Msgf("[Webhook] if expression %s did not evaluate to a boolean", wh.If) 44 | continue 45 | } 46 | if !ifResult { 47 | continue 48 | } 49 | } 50 | go func(w *tork.Webhook) { 51 | callWebhook(w.Clone(), j) 52 | }(wh) 53 | } 54 | return nil 55 | } 56 | } 57 | 58 | func callWebhook(wh *tork.Webhook, job *tork.Job) { 59 | log.Debug().Msgf("[Webhook] Calling %s for job %s %s", wh.URL, job.ID, job.State) 60 | // evaluate headers 61 | for name, v := range wh.Headers { 62 | newv, err := eval.EvaluateTemplate(v, job.Context.AsMap()) 63 | if err != nil { 64 | log.Error().Err(err).Msgf("[Webhook] error evaluating header %s: %s", name, v) 65 | } 66 | wh.Headers[name] = newv 67 | } 68 | summary := tork.NewJobSummary(job) 69 | if err := webhook.Call(wh, summary); err != nil { 70 | log.Info().Err(err).Msgf("[Webhook] error calling job webhook %s", wh.URL) 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /middleware/node/node.go: -------------------------------------------------------------------------------- 1 | package node 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/runabol/tork" 7 | ) 8 | 9 | type HandlerFunc func(context.Context, *tork.Node) error 10 | 11 | type MiddlewareFunc func(next HandlerFunc) HandlerFunc 12 | 13 | func ApplyMiddleware(h HandlerFunc, mws []MiddlewareFunc) HandlerFunc { 14 | return func(ctx context.Context, n *tork.Node) error { 15 | nx := next(ctx, 0, mws, h) 16 | return nx(ctx, n) 17 | } 18 | } 19 | 20 | func next(ctx context.Context, index int, mws []MiddlewareFunc, h HandlerFunc) HandlerFunc { 21 | if index >= len(mws) { 22 | return h 23 | } 24 | return mws[index](next(ctx, index+1, mws, h)) 25 | } 26 | -------------------------------------------------------------------------------- /middleware/node/node_test.go: -------------------------------------------------------------------------------- 1 | package node 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "testing" 7 | 8 | "github.com/runabol/tork" 9 | "github.com/stretchr/testify/assert" 10 | ) 11 | 12 | func TestMiddlewareBefore(t *testing.T) { 13 | order := 1 14 | h := func(ctx context.Context, n *tork.Node) error { 15 | assert.Equal(t, 3, order) 16 | return nil 17 | } 18 | mw1 := func(next HandlerFunc) HandlerFunc { 19 | return func(ctx context.Context, n *tork.Node) error { 20 | assert.Equal(t, 1, order) 21 | order = order + 1 22 | return next(ctx, n) 23 | } 24 | } 25 | mw2 := func(next HandlerFunc) HandlerFunc { 26 | return func(ctx context.Context, n *tork.Node) error { 27 | assert.Equal(t, 2, order) 28 | order = order + 1 29 | return next(ctx, n) 30 | } 31 | } 32 | hm := ApplyMiddleware(h, []MiddlewareFunc{mw1, mw2}) 33 | assert.NoError(t, hm(context.Background(), &tork.Node{})) 34 | } 35 | 36 | func TestMiddlewareAfter(t *testing.T) { 37 | order := 1 38 | h := func(ctx context.Context, n *tork.Node) error { 39 | assert.Equal(t, 1, order) 40 | order = order + 1 41 | return nil 42 | } 43 | mw1 := func(next HandlerFunc) HandlerFunc { 44 | return func(ctx context.Context, n *tork.Node) error { 45 | assert.NoError(t, next(ctx, n)) 46 | assert.Equal(t, 3, order) 47 | order = order + 1 48 | return nil 49 | } 50 | } 51 | mw2 := func(next HandlerFunc) HandlerFunc { 52 | return func(ctx context.Context, n *tork.Node) error { 53 | assert.NoError(t, next(ctx, n)) 54 | assert.Equal(t, 2, order) 55 | order = order + 1 56 | return nil 57 | } 58 | } 59 | hm := ApplyMiddleware(h, []MiddlewareFunc{mw1, mw2}) 60 | assert.NoError(t, hm(context.Background(), &tork.Node{})) 61 | } 62 | 63 | func TestNoMiddleware(t *testing.T) { 64 | order := 1 65 | h := func(ctx context.Context, n *tork.Node) error { 66 | assert.Equal(t, 1, order) 67 | order = order + 1 68 | return nil 69 | } 70 | hm := ApplyMiddleware(h, []MiddlewareFunc{}) 71 | assert.NoError(t, hm(context.Background(), &tork.Node{})) 72 | } 73 | 74 | func TestMiddlewareError(t *testing.T) { 75 | Err := errors.New("something bad happened") 76 | h := func(ctx context.Context, n *tork.Node) error { 77 | panic(1) // should not get here 78 | } 79 | mw1 := func(next HandlerFunc) HandlerFunc { 80 | return func(ctx context.Context, n *tork.Node) error { 81 | return Err 82 | } 83 | } 84 | mw2 := func(next HandlerFunc) HandlerFunc { 85 | return func(ctx context.Context, n *tork.Node) error { 86 | panic(1) // should not get here 87 | } 88 | } 89 | hm := ApplyMiddleware(h, []MiddlewareFunc{mw1, mw2}) 90 | assert.ErrorIs(t, hm(context.Background(), &tork.Node{}), Err) 91 | } 92 | -------------------------------------------------------------------------------- /middleware/task/hostenv.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "context" 5 | "os" 6 | "strings" 7 | 8 | "github.com/pkg/errors" 9 | "github.com/runabol/tork" 10 | ) 11 | 12 | type HostEnv struct { 13 | vars map[string]string 14 | } 15 | 16 | func NewHostEnv(vars ...string) (*HostEnv, error) { 17 | varsMap := make(map[string]string, 0) 18 | for _, varSpec := range vars { 19 | parsed := strings.Split(varSpec, ":") 20 | if len(parsed) == 1 { 21 | varsMap[varSpec] = varSpec 22 | } else if len(parsed) == 2 { 23 | varsMap[parsed[0]] = parsed[1] 24 | } else { 25 | return nil, errors.Errorf("invalid env var spec: %s", varSpec) 26 | } 27 | } 28 | return &HostEnv{vars: varsMap}, nil 29 | } 30 | 31 | func (m *HostEnv) Execute(next HandlerFunc) HandlerFunc { 32 | return func(ctx context.Context, et EventType, t *tork.Task) error { 33 | if et == StateChange && t.State == tork.TaskStateRunning { 34 | m.setHostVars(t) 35 | } 36 | return next(ctx, et, t) 37 | } 38 | } 39 | 40 | func (m *HostEnv) setHostVars(t *tork.Task) { 41 | if t.Env == nil { 42 | t.Env = make(map[string]string) 43 | } 44 | for name, alias := range m.vars { 45 | t.Env[alias] = os.Getenv(name) 46 | } 47 | for _, pre := range t.Pre { 48 | m.setHostVars(pre) 49 | } 50 | for _, post := range t.Post { 51 | m.setHostVars(post) 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /middleware/task/hostenv_test.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "context" 5 | "os" 6 | "testing" 7 | 8 | "github.com/runabol/tork" 9 | "github.com/stretchr/testify/assert" 10 | ) 11 | 12 | func TestHostEnv1(t *testing.T) { 13 | mw, err := NewHostEnv("TORK_HOST_VAR1") 14 | assert.NoError(t, os.Setenv("TORK_HOST_VAR1", "value1")) 15 | defer func() { 16 | assert.NoError(t, os.Unsetenv("TORK_HOST_VAR1")) 17 | }() 18 | assert.NoError(t, err) 19 | hm := ApplyMiddleware(NoOpHandlerFunc, []MiddlewareFunc{mw.Execute}) 20 | t1 := &tork.Task{ 21 | State: tork.TaskStateRunning, 22 | } 23 | assert.NoError(t, hm(context.Background(), StateChange, t1)) 24 | assert.Equal(t, "value1", t1.Env["TORK_HOST_VAR1"]) 25 | } 26 | 27 | func TestHostEnv2(t *testing.T) { 28 | mw, err := NewHostEnv("TORK_HOST_VAR2") 29 | assert.NoError(t, os.Setenv("TORK_HOST_VAR2", "value2")) 30 | defer func() { 31 | assert.NoError(t, os.Unsetenv("TORK_HOST_VAR2")) 32 | }() 33 | assert.NoError(t, err) 34 | hm := ApplyMiddleware(NoOpHandlerFunc, []MiddlewareFunc{mw.Execute}) 35 | t1 := &tork.Task{ 36 | State: tork.TaskStateRunning, 37 | Env: map[string]string{ 38 | "OTHER_VAR": "othervalue", 39 | }, 40 | } 41 | assert.NoError(t, hm(context.Background(), StateChange, t1)) 42 | assert.Equal(t, "value2", t1.Env["TORK_HOST_VAR2"]) 43 | assert.Equal(t, "othervalue", t1.Env["OTHER_VAR"]) 44 | } 45 | 46 | func TestHostEnv3(t *testing.T) { 47 | mw, err := NewHostEnv("TORK_HOST_VAR3:VAR3") 48 | assert.NoError(t, os.Setenv("TORK_HOST_VAR3", "value3")) 49 | defer func() { 50 | assert.NoError(t, os.Unsetenv("TORK_HOST_VAR3")) 51 | }() 52 | assert.NoError(t, err) 53 | hm := ApplyMiddleware(NoOpHandlerFunc, []MiddlewareFunc{mw.Execute}) 54 | t1 := &tork.Task{ 55 | State: tork.TaskStateRunning, 56 | Env: map[string]string{ 57 | "OTHER_VAR": "othervalue", 58 | }, 59 | } 60 | assert.NoError(t, hm(context.Background(), StateChange, t1)) 61 | assert.Equal(t, "value3", t1.Env["VAR3"]) 62 | assert.Equal(t, "othervalue", t1.Env["OTHER_VAR"]) 63 | } 64 | 65 | func TestHostEnv4(t *testing.T) { 66 | _, err := NewHostEnv("TORK_HOST_VAR4:VAR4_:XYZ") 67 | assert.Error(t, err) 68 | } 69 | 70 | func TestHostEnv5(t *testing.T) { 71 | mw, err := NewHostEnv("TORK_HOST_VAR5:VAR5") 72 | assert.NoError(t, os.Setenv("TORK_HOST_VAR5", "value5")) 73 | defer func() { 74 | assert.NoError(t, os.Unsetenv("TORK_HOST_VAR5")) 75 | }() 76 | assert.NoError(t, err) 77 | hm := ApplyMiddleware(NoOpHandlerFunc, []MiddlewareFunc{mw.Execute}) 78 | t1 := &tork.Task{ 79 | State: tork.TaskStateRunning, 80 | Env: map[string]string{ 81 | "OTHER_VAR": "othervalue", 82 | }, 83 | Pre: []*tork.Task{{ 84 | Name: "some pre task", 85 | }}, 86 | Post: []*tork.Task{{ 87 | Name: "some post task", 88 | }}, 89 | } 90 | assert.NoError(t, hm(context.Background(), StateChange, t1)) 91 | assert.Equal(t, "value5", t1.Env["VAR5"]) 92 | assert.Equal(t, "othervalue", t1.Env["OTHER_VAR"]) 93 | assert.Equal(t, "value5", t1.Pre[0].Env["VAR5"]) 94 | assert.Equal(t, "value5", t1.Post[0].Env["VAR5"]) 95 | } 96 | -------------------------------------------------------------------------------- /middleware/task/redact.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/runabol/tork" 7 | "github.com/runabol/tork/internal/redact" 8 | ) 9 | 10 | func Redact(redacter *redact.Redacter) MiddlewareFunc { 11 | return func(next HandlerFunc) HandlerFunc { 12 | return func(ctx context.Context, et EventType, t *tork.Task) error { 13 | if et == Read { 14 | redacter.RedactTask(t) 15 | } 16 | return next(ctx, et, t) 17 | } 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /middleware/task/redact_test.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/runabol/tork" 8 | "github.com/runabol/tork/datastore/postgres" 9 | "github.com/runabol/tork/internal/redact" 10 | "github.com/runabol/tork/internal/uuid" 11 | "github.com/stretchr/testify/assert" 12 | ) 13 | 14 | func TestRedactOnRead(t *testing.T) { 15 | ds, err := postgres.NewTestDatastore() 16 | assert.NoError(t, err) 17 | ctx := context.Background() 18 | j1 := tork.Job{ 19 | ID: uuid.NewUUID(), 20 | } 21 | err = ds.CreateJob(ctx, &j1) 22 | assert.NoError(t, err) 23 | hm := ApplyMiddleware(NoOpHandlerFunc, []MiddlewareFunc{Redact(redact.NewRedacter(ds))}) 24 | t1 := &tork.Task{ 25 | JobID: j1.ID, 26 | Env: map[string]string{ 27 | "secret": "1234", 28 | }, 29 | } 30 | assert.NoError(t, hm(context.Background(), Read, t1)) 31 | assert.Equal(t, "[REDACTED]", t1.Env["secret"]) 32 | assert.NoError(t, ds.Close()) 33 | } 34 | 35 | func TestNoRedact(t *testing.T) { 36 | ds, err := postgres.NewTestDatastore() 37 | assert.NoError(t, err) 38 | ctx := context.Background() 39 | j1 := tork.Job{ 40 | ID: uuid.NewUUID(), 41 | } 42 | err = ds.CreateJob(ctx, &j1) 43 | assert.NoError(t, err) 44 | hm := ApplyMiddleware(NoOpHandlerFunc, []MiddlewareFunc{Redact(redact.NewRedacter(ds))}) 45 | t1 := &tork.Task{ 46 | JobID: j1.ID, 47 | Env: map[string]string{ 48 | "secret": "1234", 49 | }, 50 | } 51 | assert.NoError(t, hm(context.Background(), StateChange, t1)) 52 | assert.Equal(t, "1234", t1.Env["secret"]) 53 | assert.NoError(t, ds.Close()) 54 | } 55 | -------------------------------------------------------------------------------- /middleware/task/task.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/runabol/tork" 7 | ) 8 | 9 | type EventType string 10 | 11 | const ( 12 | // Started event occurs when the Worker notifies 13 | // the Coordinator that the task began processing. 14 | Started = "STARTED" 15 | // StateChange occurs when a task's state changes. 16 | // Handler can inspect the task's State property 17 | // in order to determine what state the task is at. 18 | StateChange = "STATE_CHANGE" 19 | // Progress event occurs when a task's progress changes. 20 | Progress = "PROGRESS" 21 | // Read occurs when a task is read by the client 22 | // through the API. 23 | Read = "READ" 24 | ) 25 | 26 | type HandlerFunc func(context.Context, EventType, *tork.Task) error 27 | 28 | func NoOpHandlerFunc(context.Context, EventType, *tork.Task) error { return nil } 29 | 30 | type MiddlewareFunc func(next HandlerFunc) HandlerFunc 31 | 32 | func ApplyMiddleware(h HandlerFunc, mws []MiddlewareFunc) HandlerFunc { 33 | return func(ctx context.Context, et EventType, t *tork.Task) error { 34 | nx := next(ctx, 0, mws, h) 35 | return nx(ctx, et, t) 36 | } 37 | } 38 | 39 | func next(ctx context.Context, index int, mws []MiddlewareFunc, h HandlerFunc) HandlerFunc { 40 | if index >= len(mws) { 41 | return h 42 | } 43 | return mws[index](next(ctx, index+1, mws, h)) 44 | } 45 | -------------------------------------------------------------------------------- /middleware/task/task_test.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "testing" 7 | 8 | "github.com/runabol/tork" 9 | "github.com/stretchr/testify/assert" 10 | ) 11 | 12 | func TestMiddlewareBefore(t *testing.T) { 13 | order := 1 14 | h := func(ctx context.Context, et EventType, tk *tork.Task) error { 15 | assert.Equal(t, 3, order) 16 | return nil 17 | } 18 | mw1 := func(next HandlerFunc) HandlerFunc { 19 | return func(ctx context.Context, et EventType, tk *tork.Task) error { 20 | assert.Equal(t, 1, order) 21 | order = order + 1 22 | return next(ctx, et, tk) 23 | } 24 | } 25 | mw2 := func(next HandlerFunc) HandlerFunc { 26 | return func(ctx context.Context, et EventType, tk *tork.Task) error { 27 | assert.Equal(t, 2, order) 28 | order = order + 1 29 | return next(ctx, et, tk) 30 | } 31 | } 32 | hm := ApplyMiddleware(h, []MiddlewareFunc{mw1, mw2}) 33 | assert.NoError(t, hm(context.Background(), StateChange, &tork.Task{})) 34 | } 35 | 36 | func TestMiddlewareAfter(t *testing.T) { 37 | order := 1 38 | h := func(ctx context.Context, et EventType, tk *tork.Task) error { 39 | assert.Equal(t, 1, order) 40 | order = order + 1 41 | return nil 42 | } 43 | mw1 := func(next HandlerFunc) HandlerFunc { 44 | return func(ctx context.Context, et EventType, tk *tork.Task) error { 45 | assert.NoError(t, next(ctx, et, tk)) 46 | assert.Equal(t, 3, order) 47 | order = order + 1 48 | return nil 49 | } 50 | } 51 | mw2 := func(next HandlerFunc) HandlerFunc { 52 | return func(ctx context.Context, et EventType, tk *tork.Task) error { 53 | assert.NoError(t, next(ctx, et, tk)) 54 | assert.Equal(t, 2, order) 55 | order = order + 1 56 | return nil 57 | } 58 | } 59 | hm := ApplyMiddleware(h, []MiddlewareFunc{mw1, mw2}) 60 | assert.NoError(t, hm(context.Background(), StateChange, &tork.Task{})) 61 | } 62 | 63 | func TestNoMiddleware(t *testing.T) { 64 | order := 1 65 | h := func(ctx context.Context, et EventType, tk *tork.Task) error { 66 | assert.Equal(t, 1, order) 67 | order = order + 1 68 | return nil 69 | } 70 | hm := ApplyMiddleware(h, []MiddlewareFunc{}) 71 | assert.NoError(t, hm(context.Background(), StateChange, &tork.Task{})) 72 | } 73 | 74 | func TestMiddlewareError(t *testing.T) { 75 | Err := errors.New("something bad happened") 76 | h := func(ctx context.Context, et EventType, tk *tork.Task) error { 77 | panic(1) // should not get here 78 | } 79 | mw1 := func(next HandlerFunc) HandlerFunc { 80 | return func(ctx context.Context, et EventType, tk *tork.Task) error { 81 | return Err 82 | } 83 | } 84 | mw2 := func(next HandlerFunc) HandlerFunc { 85 | return func(ctx context.Context, et EventType, tk *tork.Task) error { 86 | panic(1) // should not get here 87 | } 88 | } 89 | hm := ApplyMiddleware(h, []MiddlewareFunc{mw1, mw2}) 90 | assert.ErrorIs(t, hm(context.Background(), StateChange, &tork.Task{}), Err) 91 | } 92 | -------------------------------------------------------------------------------- /middleware/task/webhook.go: -------------------------------------------------------------------------------- 1 | package task 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | "github.com/rs/zerolog/log" 8 | "github.com/runabol/tork" 9 | "github.com/runabol/tork/datastore" 10 | "github.com/runabol/tork/internal/cache" 11 | "github.com/runabol/tork/internal/eval" 12 | "github.com/runabol/tork/internal/webhook" 13 | ) 14 | 15 | func Webhook(ds datastore.Datastore) MiddlewareFunc { 16 | cache := cache.New[*tork.Job](time.Hour, time.Minute) 17 | return func(next HandlerFunc) HandlerFunc { 18 | return func(ctx context.Context, et EventType, t *tork.Task) error { 19 | if err := next(ctx, et, t); err != nil { 20 | return err 21 | } 22 | if et != StateChange && et != Progress { 23 | return nil 24 | } 25 | job, err := getJob(ctx, t, ds, cache) 26 | if err != nil { 27 | return err 28 | } 29 | if len(job.Webhooks) == 0 { 30 | return nil 31 | } 32 | summary := tork.NewTaskSummary(t) 33 | for _, wh := range job.Webhooks { 34 | if wh.Event != webhook.EventTaskStateChange && wh.Event != webhook.EventTaskProgress { 35 | continue 36 | } 37 | if (wh.Event == webhook.EventTaskStateChange && et != StateChange) || 38 | (wh.Event == webhook.EventTaskProgress && et != Progress) { 39 | continue 40 | } 41 | if wh.If != "" { 42 | val, err := eval.EvaluateExpr(wh.If, map[string]any{ 43 | "task": tork.NewTaskSummary(t), 44 | "job": tork.NewJobSummary(job), 45 | }) 46 | if err != nil { 47 | log.Error().Err(err).Msgf("[Webhook] error evaluating if expression %s", wh.If) 48 | continue 49 | } 50 | ifResult, ok := val.(bool) 51 | if !ok { 52 | log.Error().Msgf("[Webhook] if expression %s did not evaluate to a boolean", wh.If) 53 | continue 54 | } 55 | if !ifResult { 56 | continue 57 | } 58 | } 59 | go func(w *tork.Webhook) { 60 | callWebhook(w.Clone(), job, summary) 61 | }(wh) 62 | } 63 | return nil 64 | } 65 | } 66 | } 67 | 68 | func getJob(ctx context.Context, t *tork.Task, ds datastore.Datastore, c *cache.Cache[*tork.Job]) (*tork.Job, error) { 69 | job, ok := c.Get(t.JobID) 70 | if ok { 71 | return job, nil 72 | } 73 | job, err := ds.GetJobByID(ctx, t.JobID) 74 | if err != nil { 75 | return nil, err 76 | } 77 | c.Set(job.ID, job) 78 | return job, nil 79 | } 80 | 81 | func callWebhook(wh *tork.Webhook, job *tork.Job, summary *tork.TaskSummary) { 82 | log.Debug().Msgf("[Webhook] Calling %s for task %s %s", wh.URL, summary.ID, summary.State) 83 | // evaluate headers 84 | for name, v := range wh.Headers { 85 | newv, err := eval.EvaluateTemplate(v, job.Context.AsMap()) 86 | if err != nil { 87 | log.Error().Err(err).Msgf("[Webhook] error evaluating header %s: %s", name, v) 88 | } 89 | wh.Headers[name] = newv 90 | } 91 | if err := webhook.Call(wh, summary); err != nil { 92 | log.Info().Err(err).Msgf("[Webhook] error calling task webhook %s", wh.URL) 93 | } 94 | } 95 | -------------------------------------------------------------------------------- /middleware/web/web.go: -------------------------------------------------------------------------------- 1 | package web 2 | 3 | import ( 4 | "net/http" 5 | ) 6 | 7 | type MiddlewareFunc func(next HandlerFunc) HandlerFunc 8 | 9 | type HandlerFunc func(c Context) error 10 | 11 | type Context interface { 12 | // Request returns `*http.Request`. 13 | Request() *http.Request 14 | 15 | // Get retrieves data from the context. 16 | Get(key any) any 17 | 18 | // Set saves data in the context. 19 | Set(key any, val any) 20 | 21 | // Response returns `http.ResponseWriter`. 22 | Response() http.ResponseWriter 23 | 24 | // NoContent sends a response with no body and a status code. 25 | NoContent(code int) error 26 | 27 | // String sends a string response with status code. 28 | String(code int, s string) error 29 | 30 | // JSON sends a JSON response with status code. 31 | JSON(code int, data any) error 32 | 33 | // Bind binds path params, query params and the request body into provided type `i`. The default binder 34 | // binds body based on Content-Type header. 35 | Bind(i any) error 36 | 37 | // Error sends an error back to the client. 38 | Error(code int, err error) 39 | 40 | // Done returns a channel that's closed when work done on behalf of this 41 | // context should be canceled. 42 | Done() <-chan any 43 | } 44 | -------------------------------------------------------------------------------- /mount.go: -------------------------------------------------------------------------------- 1 | package tork 2 | 3 | const ( 4 | MountTypeVolume string = "volume" 5 | MountTypeBind string = "bind" 6 | MountTypeTmpfs string = "tmpfs" 7 | ) 8 | 9 | type Mount struct { 10 | ID string `json:"-"` 11 | Type string `json:"type,omitempty"` 12 | Source string `json:"source,omitempty"` 13 | Target string `json:"target,omitempty"` 14 | } 15 | -------------------------------------------------------------------------------- /node.go: -------------------------------------------------------------------------------- 1 | package tork 2 | 3 | import ( 4 | "time" 5 | ) 6 | 7 | var LAST_HEARTBEAT_TIMEOUT = time.Minute * 5 8 | var HEARTBEAT_RATE = time.Second * 30 9 | 10 | type NodeStatus string 11 | 12 | const ( 13 | NodeStatusUP NodeStatus = "UP" 14 | NodeStatusDown NodeStatus = "DOWN" 15 | NodeStatusOffline NodeStatus = "OFFLINE" 16 | ) 17 | 18 | type Node struct { 19 | ID string `json:"id,omitempty"` 20 | Name string `json:"name,omitempty"` 21 | StartedAt time.Time `json:"startedAt,omitempty"` 22 | CPUPercent float64 `json:"cpuPercent,omitempty"` 23 | LastHeartbeatAt time.Time `json:"lastHeartbeatAt,omitempty"` 24 | Queue string `json:"queue,omitempty"` 25 | Status NodeStatus `json:"status,omitempty"` 26 | Hostname string `json:"hostname,omitempty"` 27 | Port int `json:"port,omitempty"` 28 | TaskCount int `json:"taskCount,omitempty"` 29 | Version string `json:"version"` 30 | } 31 | 32 | func (n *Node) Clone() *Node { 33 | return &Node{ 34 | ID: n.ID, 35 | Name: n.Name, 36 | StartedAt: n.StartedAt, 37 | CPUPercent: n.CPUPercent, 38 | LastHeartbeatAt: n.LastHeartbeatAt, 39 | Queue: n.Queue, 40 | Status: n.Status, 41 | Hostname: n.Hostname, 42 | Port: n.Port, 43 | TaskCount: n.TaskCount, 44 | Version: n.Version, 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /role.go: -------------------------------------------------------------------------------- 1 | package tork 2 | 3 | import "time" 4 | 5 | const ( 6 | ROLE_PUBLIC string = "public" 7 | ) 8 | 9 | type Role struct { 10 | ID string `json:"id,omitempty"` 11 | Slug string `json:"slug,omitempty"` 12 | Name string `json:"name,omitempty"` 13 | CreatedAt *time.Time `json:"createdAt,omitempty"` 14 | } 15 | 16 | func (r *Role) Clone() *Role { 17 | return &Role{ 18 | ID: r.ID, 19 | Slug: r.Slug, 20 | Name: r.Name, 21 | CreatedAt: r.CreatedAt, 22 | } 23 | } 24 | 25 | type UserRole struct { 26 | ID string `json:"id,omitempty"` 27 | UserID string `json:"userId,omitempty"` 28 | RoleID string `json:"roleId,omitempty"` 29 | CreatedAt *time.Time `json:"createdAt,omitempty"` 30 | } 31 | -------------------------------------------------------------------------------- /runtime/docker/archive.go: -------------------------------------------------------------------------------- 1 | package docker 2 | 3 | import ( 4 | "archive/tar" 5 | "bufio" 6 | "os" 7 | 8 | "github.com/pkg/errors" 9 | ) 10 | 11 | type archive struct { 12 | f *os.File 13 | reader *bufio.Reader 14 | writer *tar.Writer 15 | } 16 | 17 | func NewTempArchive() (*archive, error) { 18 | f, err := os.CreateTemp("", "archive-*.tar") 19 | if err != nil { 20 | return nil, errors.Wrapf(err, "error creating temp archive file") 21 | } 22 | a := &archive{ 23 | f: f, 24 | writer: tar.NewWriter(f), 25 | } 26 | return a, nil 27 | } 28 | 29 | func (a *archive) Read(p []byte) (int, error) { 30 | if a.reader == nil { 31 | if err := a.f.Close(); err != nil { 32 | return 0, err 33 | } 34 | f, err := os.Open(a.f.Name()) 35 | if err != nil { 36 | return 0, err 37 | } 38 | a.f = f 39 | a.reader = bufio.NewReader(f) 40 | } 41 | n, err := a.reader.Read(p) 42 | if err != nil { 43 | if err := a.f.Close(); err != nil { 44 | return 0, err 45 | } 46 | } 47 | return n, err 48 | } 49 | 50 | func (a *archive) Name() string { 51 | return a.f.Name() 52 | } 53 | 54 | func (a *archive) Remove() error { 55 | return os.Remove(a.f.Name()) 56 | } 57 | 58 | func (a *archive) WriteFile(name string, mode int64, contents []byte) error { 59 | hdr := &tar.Header{ 60 | Name: name, 61 | Mode: mode, 62 | Size: int64(len(contents)), 63 | } 64 | if err := a.writer.WriteHeader(hdr); err != nil { 65 | return err 66 | } 67 | if _, err := a.writer.Write(contents); err != nil { 68 | return err 69 | } 70 | return nil 71 | } 72 | -------------------------------------------------------------------------------- /runtime/docker/archive_test.go: -------------------------------------------------------------------------------- 1 | package docker 2 | 3 | import ( 4 | "archive/tar" 5 | "io" 6 | "testing" 7 | 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | func TestCreateArchive(t *testing.T) { 12 | ar, err := NewTempArchive() 13 | assert.NoError(t, err) 14 | assert.NotNil(t, ar) 15 | 16 | err = ar.WriteFile("some_file.txt", 0444, []byte("hello world")) 17 | assert.NoError(t, err) 18 | 19 | r := tar.NewReader(ar) 20 | 21 | h, err := r.Next() 22 | 23 | assert.NoError(t, err) 24 | assert.Equal(t, "some_file.txt", h.Name) 25 | 26 | b, err := io.ReadAll(r) 27 | assert.NoError(t, err) 28 | assert.Equal(t, "hello world", string(b)) 29 | 30 | assert.NoError(t, ar.Remove()) 31 | } 32 | -------------------------------------------------------------------------------- /runtime/docker/auth_test.go: -------------------------------------------------------------------------------- 1 | package docker 2 | 3 | import ( 4 | "encoding/base64" 5 | "testing" 6 | ) 7 | 8 | func TestDecodeBase64Auth(t *testing.T) { 9 | for _, tc := range base64TestCases() { 10 | t.Run(tc.name, testBase64Case(tc, func() (string, string, error) { 11 | return decodeBase64Auth(tc.config) 12 | })) 13 | } 14 | } 15 | 16 | func TestGetRegistryCredentials(t *testing.T) { 17 | t.Run("from base64 auth", func(t *testing.T) { 18 | for _, tc := range base64TestCases() { 19 | t.Run(tc.name, func(T *testing.T) { 20 | config := config{ 21 | AuthConfigs: map[string]authConfig{ 22 | "some.domain": tc.config, 23 | }, 24 | } 25 | testBase64Case(tc, func() (string, string, error) { 26 | return config.getRegistryCredentials("some.domain") 27 | }) 28 | }) 29 | } 30 | }) 31 | } 32 | 33 | type base64TestCase struct { 34 | name string 35 | config authConfig 36 | expUser string 37 | expPass string 38 | expErr bool 39 | } 40 | 41 | func base64TestCases() []base64TestCase { 42 | cases := []base64TestCase{ 43 | {name: "empty"}, 44 | {name: "not base64", expErr: true, config: authConfig{Auth: "not base64"}}, 45 | {name: "invalid format", expErr: true, config: authConfig{ 46 | Auth: base64.StdEncoding.EncodeToString([]byte("invalid format")), 47 | }}, 48 | {name: "happy case", expUser: "user", expPass: "pass", config: authConfig{ 49 | Auth: base64.StdEncoding.EncodeToString([]byte("user:pass")), 50 | }}, 51 | } 52 | 53 | return cases 54 | } 55 | 56 | type testAuthFn func() (string, string, error) 57 | 58 | func testBase64Case(tc base64TestCase, authFn testAuthFn) func(t *testing.T) { 59 | return func(t *testing.T) { 60 | u, p, err := authFn() 61 | if tc.expErr && err == nil { 62 | t.Fatal("expected error") 63 | } 64 | 65 | if u != tc.expUser || p != tc.expPass { 66 | t.Errorf("decoded username and password do not match, expected user: %s, password: %s, got user: %s, password: %s", tc.expUser, tc.expPass, u, p) 67 | } 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /runtime/docker/bind.go: -------------------------------------------------------------------------------- 1 | package docker 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "os" 7 | "strings" 8 | "sync" 9 | 10 | "github.com/pkg/errors" 11 | "github.com/rs/zerolog/log" 12 | "github.com/runabol/tork" 13 | ) 14 | 15 | type BindMounter struct { 16 | cfg BindConfig 17 | mounts map[string]string 18 | mu sync.RWMutex 19 | } 20 | 21 | type BindConfig struct { 22 | Allowed bool 23 | Sources []string 24 | } 25 | 26 | func NewBindMounter(cfg BindConfig) *BindMounter { 27 | return &BindMounter{ 28 | cfg: cfg, 29 | mounts: make(map[string]string), 30 | } 31 | } 32 | 33 | func (m *BindMounter) Mount(ctx context.Context, mnt *tork.Mount) error { 34 | if !m.cfg.Allowed { 35 | return errors.New("bind mounts are not allowed") 36 | } 37 | if !m.isSourceAllowed(mnt.Source) { 38 | return errors.New(fmt.Sprintf("src bind mount is not allowed: %s", mnt.Source)) 39 | } 40 | m.mu.RLock() 41 | _, ok := m.mounts[mnt.Source] 42 | m.mu.RUnlock() 43 | if ok { 44 | return nil 45 | } 46 | m.mu.Lock() 47 | defer m.mu.Unlock() 48 | // check if the source dir exists 49 | if _, err := os.Stat(mnt.Source); os.IsNotExist(err) { 50 | if err := os.MkdirAll(mnt.Source, 0707); err != nil { 51 | return errors.Wrapf(err, "error creating mount directory: %s", mnt.Source) 52 | } 53 | log.Info().Msgf("Created bind mount: %s", mnt.Source) 54 | } else if err != nil { 55 | return errors.Wrapf(err, "error stat on directory: %s", mnt.Source) 56 | } 57 | m.mounts[mnt.Source] = mnt.Source 58 | return nil 59 | } 60 | 61 | func (m *BindMounter) isSourceAllowed(src string) bool { 62 | if len(m.cfg.Sources) == 0 { 63 | return true 64 | } 65 | for _, allow := range m.cfg.Sources { 66 | if strings.EqualFold(allow, src) { 67 | return true 68 | } 69 | } 70 | return false 71 | } 72 | 73 | func (m *BindMounter) Unmount(ctx context.Context, mnt *tork.Mount) error { 74 | return nil 75 | } 76 | -------------------------------------------------------------------------------- /runtime/docker/bind_test.go: -------------------------------------------------------------------------------- 1 | package docker 2 | 3 | import ( 4 | "context" 5 | "os" 6 | "path" 7 | "sync" 8 | "testing" 9 | 10 | "github.com/runabol/tork" 11 | "github.com/runabol/tork/internal/uuid" 12 | "github.com/stretchr/testify/assert" 13 | ) 14 | 15 | func TestMountBindNotAllowed(t *testing.T) { 16 | m := &BindMounter{cfg: BindConfig{ 17 | Allowed: false, 18 | }} 19 | 20 | err := m.Mount(context.Background(), &tork.Mount{ 21 | Type: tork.MountTypeBind, 22 | Source: "/tmp", 23 | Target: "/somevol", 24 | }) 25 | assert.Error(t, err) 26 | } 27 | 28 | func TestMountCreate(t *testing.T) { 29 | m := NewBindMounter(BindConfig{ 30 | Allowed: true, 31 | }) 32 | dir := path.Join(os.TempDir(), uuid.NewUUID()) 33 | wg := sync.WaitGroup{} 34 | c := 10 35 | wg.Add(c) 36 | for i := 0; i < c; i++ { 37 | go func() { 38 | defer wg.Done() 39 | err := m.Mount(context.Background(), &tork.Mount{ 40 | Type: tork.MountTypeBind, 41 | Source: dir, 42 | Target: "/somevol", 43 | }) 44 | assert.NoError(t, err) 45 | }() 46 | } 47 | wg.Wait() 48 | } 49 | 50 | func TestMountSources(t *testing.T) { 51 | 52 | t.Run("allowed source", func(t *testing.T) { 53 | m := NewBindMounter(BindConfig{ 54 | Allowed: true, 55 | Sources: []string{"/tmp"}, 56 | }) 57 | mnt := tork.Mount{ 58 | Type: tork.MountTypeBind, 59 | Source: "/tmp", 60 | Target: "/somevol", 61 | } 62 | 63 | err := m.Mount(context.Background(), &mnt) 64 | assert.NoError(t, err) 65 | assert.Equal(t, "/somevol", mnt.Target) 66 | assert.Equal(t, "/tmp", mnt.Source) 67 | assert.Equal(t, tork.MountTypeBind, mnt.Type) 68 | }) 69 | 70 | t.Run("non allowed source", func(t *testing.T) { 71 | m := NewBindMounter(BindConfig{ 72 | Allowed: true, 73 | Sources: []string{"/tmp"}, 74 | }) 75 | mnt := tork.Mount{ 76 | Type: tork.MountTypeBind, 77 | Source: "/tmp/sub/path", 78 | Target: "/somevol", 79 | } 80 | 81 | err := m.Mount(context.Background(), &mnt) 82 | assert.Error(t, err) 83 | }) 84 | 85 | t.Run("non allowed source", func(t *testing.T) { 86 | m := NewBindMounter(BindConfig{ 87 | Allowed: true, 88 | Sources: []string{"/tmp"}, 89 | }) 90 | mnt := tork.Mount{ 91 | Type: tork.MountTypeBind, 92 | Source: "/other", 93 | Target: "/somevol", 94 | } 95 | 96 | err := m.Mount(context.Background(), &mnt) 97 | assert.Error(t, err) 98 | }) 99 | 100 | } 101 | -------------------------------------------------------------------------------- /runtime/docker/config.go: -------------------------------------------------------------------------------- 1 | package docker // import "https://github.com/cpuguy83/dockercfg" 2 | 3 | // Config represents the on disk format of the docker CLI's config file. 4 | type config struct { 5 | AuthConfigs map[string]authConfig `json:"auths"` 6 | HTTPHeaders map[string]string `json:"HttpHeaders,omitempty"` 7 | PsFormat string `json:"psFormat,omitempty"` 8 | ImagesFormat string `json:"imagesFormat,omitempty"` 9 | NetworksFormat string `json:"networksFormat,omitempty"` 10 | PluginsFormat string `json:"pluginsFormat,omitempty"` 11 | VolumesFormat string `json:"volumesFormat,omitempty"` 12 | StatsFormat string `json:"statsFormat,omitempty"` 13 | DetachKeys string `json:"detachKeys,omitempty"` 14 | CredentialsStore string `json:"credsStore,omitempty"` 15 | CredentialHelpers map[string]string `json:"credHelpers,omitempty"` 16 | Filename string `json:"-"` // Note: for internal use only 17 | ServiceInspectFormat string `json:"serviceInspectFormat,omitempty"` 18 | ServicesFormat string `json:"servicesFormat,omitempty"` 19 | TasksFormat string `json:"tasksFormat,omitempty"` 20 | SecretFormat string `json:"secretFormat,omitempty"` 21 | ConfigFormat string `json:"configFormat,omitempty"` 22 | NodesFormat string `json:"nodesFormat,omitempty"` 23 | PruneFilters []string `json:"pruneFilters,omitempty"` 24 | Proxies map[string]proxyConfig `json:"proxies,omitempty"` 25 | Experimental string `json:"experimental,omitempty"` 26 | StackOrchestrator string `json:"stackOrchestrator,omitempty"` 27 | Kubernetes *KubernetesConfig `json:"kubernetes,omitempty"` 28 | CurrentContext string `json:"currentContext,omitempty"` 29 | CLIPluginsExtraDirs []string `json:"cliPluginsExtraDirs,omitempty"` 30 | Aliases map[string]string `json:"aliases,omitempty"` 31 | } 32 | 33 | // ProxyConfig contains proxy configuration settings 34 | type proxyConfig struct { 35 | HTTPProxy string `json:"httpProxy,omitempty"` 36 | HTTPSProxy string `json:"httpsProxy,omitempty"` 37 | NoProxy string `json:"noProxy,omitempty"` 38 | FTPProxy string `json:"ftpProxy,omitempty"` 39 | } 40 | 41 | // AuthConfig contains authorization information for connecting to a Registry 42 | type authConfig struct { 43 | Username string `json:"username,omitempty"` 44 | Password string `json:"password,omitempty"` 45 | Auth string `json:"auth,omitempty"` 46 | 47 | // Email is an optional value associated with the username. 48 | // This field is deprecated and will be removed in a later 49 | // version of docker. 50 | Email string `json:"email,omitempty"` 51 | 52 | ServerAddress string `json:"serveraddress,omitempty"` 53 | 54 | // IdentityToken is used to authenticate the user and get 55 | // an access token for the registry. 56 | IdentityToken string `json:"identitytoken,omitempty"` 57 | 58 | // RegistryToken is a bearer token to be sent to a registry 59 | RegistryToken string `json:"registrytoken,omitempty"` 60 | } 61 | 62 | // KubernetesConfig contains Kubernetes orchestrator settings 63 | type KubernetesConfig struct { 64 | AllNamespaces string `json:"allNamespaces,omitempty"` 65 | } 66 | -------------------------------------------------------------------------------- /runtime/docker/reference_test.go: -------------------------------------------------------------------------------- 1 | package docker 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | ) 8 | 9 | func TestParse(t *testing.T) { 10 | ref, err := parseRef("ubuntu:mantic") 11 | assert.NoError(t, err) 12 | assert.Equal(t, "", ref.domain) 13 | assert.Equal(t, "ubuntu", ref.path) 14 | assert.Equal(t, "mantic", ref.tag) 15 | 16 | ref, err = parseRef("localhost:9090/ubuntu:mantic") 17 | assert.NoError(t, err) 18 | assert.Equal(t, "localhost:9090", ref.domain) 19 | assert.Equal(t, "ubuntu", ref.path) 20 | assert.Equal(t, "mantic", ref.tag) 21 | 22 | ref, err = parseRef("localhost:9090/ubuntu:mantic-2.7") 23 | assert.NoError(t, err) 24 | assert.Equal(t, "localhost:9090", ref.domain) 25 | assert.Equal(t, "ubuntu", ref.path) 26 | assert.Equal(t, "mantic-2.7", ref.tag) 27 | 28 | ref, err = parseRef("my-registry/ubuntu:mantic-2.7") 29 | assert.NoError(t, err) 30 | assert.Equal(t, "my-registry", ref.domain) 31 | assert.Equal(t, "ubuntu", ref.path) 32 | assert.Equal(t, "mantic-2.7", ref.tag) 33 | 34 | ref, err = parseRef("my-registry/ubuntu") 35 | assert.NoError(t, err) 36 | assert.Equal(t, "my-registry", ref.domain) 37 | assert.Equal(t, "ubuntu", ref.path) 38 | assert.Equal(t, "", ref.tag) 39 | 40 | ref, err = parseRef("ubuntu") 41 | assert.NoError(t, err) 42 | assert.Equal(t, "", ref.domain) 43 | assert.Equal(t, "ubuntu", ref.path) 44 | assert.Equal(t, "", ref.tag) 45 | 46 | ref, err = parseRef("ubuntu:latest") 47 | assert.NoError(t, err) 48 | assert.Equal(t, "", ref.domain) 49 | assert.Equal(t, "ubuntu", ref.path) 50 | assert.Equal(t, "latest", ref.tag) 51 | } 52 | -------------------------------------------------------------------------------- /runtime/docker/tmpfs.go: -------------------------------------------------------------------------------- 1 | package docker 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/pkg/errors" 7 | "github.com/runabol/tork" 8 | ) 9 | 10 | type TmpfsMounter struct { 11 | } 12 | 13 | func NewTmpfsMounter() *TmpfsMounter { 14 | return &TmpfsMounter{} 15 | } 16 | 17 | func (m *TmpfsMounter) Mount(ctx context.Context, mnt *tork.Mount) error { 18 | if mnt.Target == "" { 19 | return errors.Errorf("tmpfs target is required") 20 | } 21 | if mnt.Source != "" { 22 | return errors.Errorf("tmpfs source should be empty") 23 | } 24 | return nil 25 | } 26 | 27 | func (m *TmpfsMounter) Unmount(ctx context.Context, mnt *tork.Mount) error { 28 | return nil 29 | } 30 | -------------------------------------------------------------------------------- /runtime/docker/tmpfs_test.go: -------------------------------------------------------------------------------- 1 | package docker 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/runabol/tork" 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | func TestMountTmpfs(t *testing.T) { 12 | mounter := NewTmpfsMounter() 13 | ctx := context.Background() 14 | mnt := &tork.Mount{ 15 | Type: tork.MountTypeTmpfs, 16 | Target: "/target", 17 | } 18 | err := mounter.Mount(ctx, mnt) 19 | assert.NoError(t, err) 20 | } 21 | 22 | func TestMountTmpfsWithSource(t *testing.T) { 23 | mounter := NewTmpfsMounter() 24 | ctx := context.Background() 25 | mnt := &tork.Mount{ 26 | Type: tork.MountTypeTmpfs, 27 | Target: "/target", 28 | Source: "/source", 29 | } 30 | err := mounter.Mount(ctx, mnt) 31 | assert.Error(t, err) 32 | } 33 | 34 | func TestUnmountTmpfs(t *testing.T) { 35 | mounter := NewTmpfsMounter() 36 | ctx := context.Background() 37 | mnt := &tork.Mount{ 38 | Type: tork.MountTypeTmpfs, 39 | Target: "/target", 40 | } 41 | err := mounter.Mount(ctx, mnt) 42 | assert.NoError(t, err) 43 | err = mounter.Unmount(ctx, mnt) 44 | assert.NoError(t, err) 45 | } 46 | -------------------------------------------------------------------------------- /runtime/docker/volume.go: -------------------------------------------------------------------------------- 1 | package docker 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/docker/docker/api/types/filters" 7 | "github.com/docker/docker/api/types/volume" 8 | "github.com/docker/docker/client" 9 | "github.com/pkg/errors" 10 | "github.com/rs/zerolog/log" 11 | "github.com/runabol/tork" 12 | "github.com/runabol/tork/internal/uuid" 13 | ) 14 | 15 | type VolumeMounter struct { 16 | client *client.Client 17 | } 18 | 19 | func NewVolumeMounter() (*VolumeMounter, error) { 20 | dc, err := client.NewClientWithOpts(client.FromEnv) 21 | if err != nil { 22 | return nil, err 23 | } 24 | return &VolumeMounter{client: dc}, nil 25 | } 26 | 27 | func (m *VolumeMounter) Mount(ctx context.Context, mn *tork.Mount) error { 28 | name := uuid.NewUUID() 29 | mn.Source = name 30 | v, err := m.client.VolumeCreate(ctx, volume.CreateOptions{Name: name}) 31 | if err != nil { 32 | return err 33 | } 34 | log.Debug(). 35 | Str("mount-point", v.Mountpoint).Msgf("created volume %s", v.Name) 36 | return nil 37 | } 38 | 39 | func (m *VolumeMounter) Unmount(ctx context.Context, mn *tork.Mount) error { 40 | ls, err := m.client.VolumeList(ctx, volume.ListOptions{Filters: filters.NewArgs(filters.Arg("name", mn.Source))}) 41 | if err != nil { 42 | return err 43 | } 44 | if len(ls.Volumes) == 0 { 45 | return errors.Errorf("unknown volume: %s", mn.Source) 46 | } 47 | if err := m.client.VolumeRemove(ctx, mn.Source, true); err != nil { 48 | return err 49 | } 50 | log.Debug().Msgf("removed volume %s", mn.Source) 51 | return nil 52 | } 53 | -------------------------------------------------------------------------------- /runtime/docker/volume_test.go: -------------------------------------------------------------------------------- 1 | package docker 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/docker/docker/api/types/volume" 8 | "github.com/runabol/tork" 9 | "github.com/stretchr/testify/assert" 10 | ) 11 | 12 | func TestCreateVolume(t *testing.T) { 13 | vm, err := NewVolumeMounter() 14 | assert.NoError(t, err) 15 | 16 | ctx := context.Background() 17 | mnt := &tork.Mount{} 18 | err = vm.Mount(ctx, mnt) 19 | assert.NoError(t, err) 20 | 21 | ls, err := vm.client.VolumeList(ctx, volume.ListOptions{}) 22 | assert.NoError(t, err) 23 | found := false 24 | for _, v := range ls.Volumes { 25 | if v.Name == mnt.Source { 26 | found = true 27 | break 28 | } 29 | } 30 | assert.True(t, found) 31 | 32 | err = vm.Unmount(ctx, mnt) 33 | assert.NoError(t, err) 34 | 35 | ls, err = vm.client.VolumeList(ctx, volume.ListOptions{}) 36 | assert.NoError(t, err) 37 | 38 | for _, v := range ls.Volumes { 39 | assert.NotEqual(t, "testvol", v.Name) 40 | } 41 | } 42 | 43 | func Test_createMountVolume(t *testing.T) { 44 | m, err := NewVolumeMounter() 45 | assert.NoError(t, err) 46 | 47 | mnt := &tork.Mount{ 48 | Type: tork.MountTypeVolume, 49 | Target: "/somevol", 50 | } 51 | 52 | err = m.Mount(context.Background(), mnt) 53 | assert.NoError(t, err) 54 | defer func() { 55 | assert.NoError(t, m.Unmount(context.Background(), mnt)) 56 | }() 57 | assert.Equal(t, "/somevol", mnt.Target) 58 | assert.NotEmpty(t, mnt.Source) 59 | } 60 | -------------------------------------------------------------------------------- /runtime/mount.go: -------------------------------------------------------------------------------- 1 | package runtime 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/runabol/tork" 7 | ) 8 | 9 | type Mounter interface { 10 | Mount(ctx context.Context, mnt *tork.Mount) error 11 | Unmount(ctx context.Context, mnt *tork.Mount) error 12 | } 13 | -------------------------------------------------------------------------------- /runtime/multi.go: -------------------------------------------------------------------------------- 1 | package runtime 2 | 3 | import ( 4 | "context" 5 | "sync" 6 | 7 | "github.com/pkg/errors" 8 | "github.com/runabol/tork" 9 | ) 10 | 11 | type MultiMounter struct { 12 | mounters map[string]Mounter 13 | mapping map[string]Mounter 14 | mu sync.RWMutex 15 | } 16 | 17 | func NewMultiMounter() *MultiMounter { 18 | return &MultiMounter{ 19 | mounters: map[string]Mounter{}, 20 | mapping: make(map[string]Mounter), 21 | } 22 | } 23 | 24 | func (m *MultiMounter) Mount(ctx context.Context, mnt *tork.Mount) error { 25 | if mnt.ID == "" { 26 | return errors.New("must provide a mount ID") 27 | } 28 | m.mu.RLock() 29 | mounter, ok := m.mounters[mnt.Type] 30 | m.mu.RUnlock() 31 | if !ok { 32 | return errors.Errorf("unknown mount type: %s", mnt.Type) 33 | } 34 | m.mu.Lock() 35 | m.mapping[mnt.ID] = mounter 36 | m.mu.Unlock() 37 | return mounter.Mount(ctx, mnt) 38 | } 39 | 40 | func (m *MultiMounter) Unmount(ctx context.Context, mnt *tork.Mount) error { 41 | if mnt.ID == "" { 42 | return errors.New("must provide a mount ID") 43 | } 44 | m.mu.RLock() 45 | mounter, ok := m.mapping[mnt.ID] 46 | m.mu.RUnlock() 47 | if !ok { 48 | return errors.Errorf("unmounter not found for: %+v", mnt) 49 | } 50 | m.mu.Lock() 51 | delete(m.mapping, mnt.ID) 52 | m.mu.Unlock() 53 | return mounter.Unmount(ctx, mnt) 54 | } 55 | 56 | func (m *MultiMounter) RegisterMounter(mtype string, mr Mounter) { 57 | m.mu.Lock() 58 | defer m.mu.Unlock() 59 | if _, ok := m.mounters[mtype]; ok { 60 | panic("mount: Register called twice for mounter") 61 | } 62 | m.mounters[mtype] = mr 63 | } 64 | -------------------------------------------------------------------------------- /runtime/multi_test.go: -------------------------------------------------------------------------------- 1 | package runtime 2 | 3 | import ( 4 | "context" 5 | "sync" 6 | "testing" 7 | 8 | "github.com/runabol/tork" 9 | "github.com/runabol/tork/internal/uuid" 10 | 11 | "github.com/stretchr/testify/assert" 12 | ) 13 | 14 | type fakeMounter struct { 15 | newType string 16 | } 17 | 18 | func (m *fakeMounter) Mount(ctx context.Context, mnt *tork.Mount) error { 19 | mnt.Type = m.newType 20 | return nil 21 | } 22 | 23 | func (m *fakeMounter) Unmount(ctx context.Context, mnt *tork.Mount) error { 24 | return nil 25 | } 26 | 27 | func TestMultiVolumeMount(t *testing.T) { 28 | m := NewMultiMounter() 29 | m.RegisterMounter(tork.MountTypeVolume, &fakeMounter{newType: tork.MountTypeVolume}) 30 | ctx := context.Background() 31 | mnt := &tork.Mount{ 32 | ID: uuid.NewUUID(), 33 | Type: tork.MountTypeVolume, 34 | Target: "/mnt", 35 | } 36 | err := m.Mount(ctx, mnt) 37 | defer func() { 38 | err := m.Unmount(ctx, mnt) 39 | assert.NoError(t, err) 40 | }() 41 | assert.NoError(t, err) 42 | } 43 | 44 | func TestMultiBadTypeMount(t *testing.T) { 45 | m := NewMultiMounter() 46 | ctx := context.Background() 47 | mnt := &tork.Mount{ 48 | ID: uuid.NewUUID(), 49 | Type: "badone", 50 | Target: "/mnt", 51 | } 52 | err := m.Mount(ctx, mnt) 53 | assert.Error(t, err) 54 | } 55 | 56 | func TestMultiMountUnmount(t *testing.T) { 57 | m := NewMultiMounter() 58 | m.RegisterMounter(tork.MountTypeVolume, &fakeMounter{newType: "other-type"}) 59 | ctx := context.Background() 60 | mnt := &tork.Mount{ 61 | ID: uuid.NewUUID(), 62 | Type: tork.MountTypeVolume, 63 | Target: "/mnt", 64 | } 65 | err := m.Mount(ctx, mnt) 66 | defer func() { 67 | err := m.Unmount(ctx, mnt) 68 | assert.NoError(t, err) 69 | }() 70 | assert.NoError(t, err) 71 | } 72 | 73 | func TestMountConcurrency(t *testing.T) { 74 | ctx := context.Background() 75 | m := NewMultiMounter() 76 | m.RegisterMounter(tork.MountTypeVolume, &fakeMounter{newType: tork.MountTypeVolume}) 77 | w := sync.WaitGroup{} 78 | w.Add(1_000) 79 | for i := 0; i < 1_000; i++ { 80 | go func() { 81 | defer w.Done() 82 | mnt := &tork.Mount{ 83 | ID: uuid.NewUUID(), 84 | Type: tork.MountTypeVolume, 85 | Target: "/mnt", 86 | } 87 | err := m.Mount(ctx, mnt) 88 | assert.NoError(t, err) 89 | err = m.Unmount(ctx, mnt) 90 | assert.NoError(t, err) 91 | }() 92 | } 93 | w.Wait() 94 | assert.Len(t, m.mapping, 0) 95 | } 96 | -------------------------------------------------------------------------------- /runtime/podman/volume.go: -------------------------------------------------------------------------------- 1 | package podman 2 | 3 | import ( 4 | "context" 5 | "os" 6 | 7 | "github.com/pkg/errors" 8 | "github.com/runabol/tork" 9 | ) 10 | 11 | type VolumeMounter struct { 12 | } 13 | 14 | func NewVolumeMounter() *VolumeMounter { 15 | return &VolumeMounter{} 16 | } 17 | 18 | func (m *VolumeMounter) Mount(ctx context.Context, mn *tork.Mount) error { 19 | vol, err := os.MkdirTemp("", "tork-volume-*") 20 | if err != nil { 21 | return errors.Wrap(err, "failed to create temporary directory") 22 | } 23 | if err := os.Chmod(vol, 0777); err != nil { 24 | return errors.Wrap(err, "failed to chmod temporary directory") 25 | } 26 | mn.Source = vol 27 | return nil 28 | } 29 | 30 | func (m *VolumeMounter) Unmount(ctx context.Context, mn *tork.Mount) error { 31 | return os.RemoveAll(mn.Source) 32 | } 33 | -------------------------------------------------------------------------------- /runtime/podman/volume_test.go: -------------------------------------------------------------------------------- 1 | package podman 2 | 3 | import ( 4 | "context" 5 | "os" 6 | "testing" 7 | 8 | "github.com/runabol/tork" 9 | "github.com/stretchr/testify/assert" 10 | ) 11 | 12 | func TestCreateVolume(t *testing.T) { 13 | vm := NewVolumeMounter() 14 | 15 | ctx := context.Background() 16 | mnt := &tork.Mount{} 17 | err := vm.Mount(ctx, mnt) 18 | assert.NoError(t, err) 19 | 20 | _, err = os.Stat(mnt.Source) 21 | assert.NoError(t, err) 22 | 23 | err = vm.Unmount(ctx, mnt) 24 | assert.NoError(t, err) 25 | 26 | _, err = os.Stat(mnt.Source) 27 | assert.Error(t, err) 28 | } 29 | 30 | func Test_createMountVolume(t *testing.T) { 31 | vm := NewVolumeMounter() 32 | 33 | mnt := &tork.Mount{ 34 | Type: tork.MountTypeVolume, 35 | Target: "/somevol", 36 | } 37 | 38 | err := vm.Mount(context.Background(), mnt) 39 | assert.NoError(t, err) 40 | defer func() { 41 | assert.NoError(t, vm.Unmount(context.Background(), mnt)) 42 | }() 43 | assert.Equal(t, "/somevol", mnt.Target) 44 | assert.NotEmpty(t, mnt.Source) 45 | } 46 | -------------------------------------------------------------------------------- /runtime/runtime.go: -------------------------------------------------------------------------------- 1 | package runtime 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/runabol/tork" 7 | ) 8 | 9 | const ( 10 | Docker = "docker" 11 | Podman = "podman" 12 | Shell = "shell" 13 | ) 14 | 15 | // Runtime is the actual runtime environment that executes a task. 16 | type Runtime interface { 17 | Run(ctx context.Context, t *tork.Task) error 18 | HealthCheck(ctx context.Context) error 19 | } 20 | -------------------------------------------------------------------------------- /runtime/shell/setid_unix.go: -------------------------------------------------------------------------------- 1 | //go:build freebsd || darwin || linux 2 | 3 | package shell 4 | 5 | import ( 6 | "strconv" 7 | "syscall" 8 | 9 | "github.com/rs/zerolog/log" 10 | ) 11 | 12 | func SetUID(uid string) { 13 | if uid != DEFAULT_UID { 14 | uidi, err := strconv.Atoi(uid) 15 | if err != nil { 16 | log.Fatal().Err(err).Msgf("invalid uid: %s", uid) 17 | } 18 | if err := syscall.Setuid(uidi); err != nil { 19 | log.Fatal().Err(err).Msgf("error setting uid: %s", uid) 20 | } 21 | } 22 | } 23 | 24 | func SetGID(gid string) { 25 | if gid != DEFAULT_GID { 26 | gidi, err := strconv.Atoi(gid) 27 | if err != nil { 28 | log.Fatal().Err(err).Msgf("invalid gid: %s", gid) 29 | } 30 | if err := syscall.Setgid(gidi); err != nil { 31 | log.Fatal().Err(err).Msgf("error setting gid: %s", gid) 32 | } 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /runtime/shell/setid_unsupported.go: -------------------------------------------------------------------------------- 1 | //go:build !freebsd && !darwin && !linux 2 | 3 | package shell 4 | 5 | import ( 6 | "github.com/rs/zerolog/log" 7 | ) 8 | 9 | func SetUID(uid string) { 10 | if uid != DEFAULT_UID { 11 | log.Fatal().Msgf("setting uid is only supported on unix/linux systems") 12 | } 13 | } 14 | 15 | func SetGID(gid string) { 16 | if gid != DEFAULT_GID { 17 | log.Fatal().Msgf("setting gid is only supported on unix/linux systems") 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /stats.go: -------------------------------------------------------------------------------- 1 | package tork 2 | 3 | type Metrics struct { 4 | Jobs JobMetrics `json:"jobs"` 5 | Tasks TaskMetrics `json:"tasks"` 6 | Nodes NodeMetrics `json:"nodes"` 7 | } 8 | 9 | type JobMetrics struct { 10 | Running int `json:"running"` 11 | } 12 | 13 | type TaskMetrics struct { 14 | Running int `json:"running"` 15 | } 16 | 17 | type NodeMetrics struct { 18 | Running int `json:"online"` 19 | CPUPercent float64 `json:"cpuPercent"` 20 | } 21 | -------------------------------------------------------------------------------- /task_test.go: -------------------------------------------------------------------------------- 1 | package tork_test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/runabol/tork" 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func TestCloneTask(t *testing.T) { 11 | t1 := &tork.Task{ 12 | Env: map[string]string{ 13 | "VAR1": "VAL1", 14 | }, 15 | Limits: &tork.TaskLimits{ 16 | CPUs: "1", 17 | }, 18 | Parallel: &tork.ParallelTask{ 19 | Tasks: []*tork.Task{ 20 | { 21 | Env: map[string]string{ 22 | "PVAR1": "PVAL1", 23 | }, 24 | }, 25 | }, 26 | }, 27 | } 28 | t2 := t1.Clone() 29 | assert.Equal(t, t1.Env, t2.Env) 30 | assert.Equal(t, t1.Limits.CPUs, t2.Limits.CPUs) 31 | assert.Equal(t, t1.Parallel.Tasks[0].Env, t2.Parallel.Tasks[0].Env) 32 | 33 | t2.Env["VAR2"] = "VAL2" 34 | t2.Limits.CPUs = "2" 35 | t2.Parallel.Tasks[0].Env["PVAR2"] = "PVAL2" 36 | assert.NotEqual(t, t1.Env, t2.Env) 37 | assert.NotEqual(t, t1.Limits.CPUs, t2.Limits.CPUs) 38 | assert.NotEqual(t, t1.Parallel.Tasks[0].Env, t2.Parallel.Tasks[0].Env) 39 | } 40 | 41 | func TestIsActive(t *testing.T) { 42 | t1 := &tork.Task{ 43 | State: tork.TaskStateCancelled, 44 | } 45 | assert.False(t, t1.IsActive()) 46 | t2 := &tork.Task{ 47 | State: tork.TaskStateCreated, 48 | } 49 | assert.True(t, t2.IsActive()) 50 | t3 := &tork.Task{ 51 | State: tork.TaskStatePending, 52 | } 53 | assert.True(t, t3.IsActive()) 54 | t4 := &tork.Task{ 55 | State: tork.TaskStateRunning, 56 | } 57 | assert.True(t, t4.IsActive()) 58 | t5 := &tork.Task{ 59 | State: tork.TaskStateCompleted, 60 | } 61 | assert.False(t, t5.IsActive()) 62 | } 63 | -------------------------------------------------------------------------------- /user.go: -------------------------------------------------------------------------------- 1 | package tork 2 | 3 | import "time" 4 | 5 | type UsernameKey string 6 | 7 | const ( 8 | USER_GUEST string = "guest" 9 | USERNAME UsernameKey = "username" 10 | ) 11 | 12 | type User struct { 13 | ID string `json:"id,omitempty"` 14 | Name string `json:"name,omitempty"` 15 | Username string `json:"username,omitempty"` 16 | PasswordHash string `json:"-"` 17 | Password string `json:"password,omitempty"` 18 | CreatedAt *time.Time `json:"createdAt,omitempty"` 19 | Disabled bool `json:"disabled,omitempty"` 20 | } 21 | 22 | func (u *User) Clone() *User { 23 | return &User{ 24 | ID: u.ID, 25 | Name: u.Name, 26 | Username: u.Username, 27 | PasswordHash: u.PasswordHash, 28 | CreatedAt: u.CreatedAt, 29 | Disabled: u.Disabled, 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /version.go: -------------------------------------------------------------------------------- 1 | package tork 2 | 3 | const ( 4 | Version = "0.1.128" 5 | ) 6 | 7 | var ( 8 | GitCommit string = "develop" 9 | ) 10 | --------------------------------------------------------------------------------