├── .commit_template.txt ├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md └── workflows │ ├── ci-build.yml │ └── release.yaml ├── .gitignore ├── Changelog.md ├── Contributing.md ├── Dockerfile ├── LICENSE.txt ├── Makefile ├── README.md ├── api └── v1 │ ├── client.pb.go │ ├── client.proto │ ├── cluster.pb.go │ ├── cluster.proto │ ├── empty.pb.go │ ├── empty.proto │ ├── job.pb.go │ ├── job.proto │ ├── jsm.pb.go │ └── jsm.proto ├── beanstalkd ├── cmd │ ├── beanstalkd.go │ └── cmd_beanstalkd.go ├── core │ ├── client.go │ ├── client_test.go │ ├── cmd_data.go │ ├── cmd_data_test.go │ ├── cmd_proc.go │ ├── cmd_type.go │ ├── cmd_type_string.go │ ├── core.go │ ├── parse.go │ └── parse_test.go ├── proto │ ├── conn.go │ ├── conn_state.go │ ├── conn_state_string.go │ ├── conn_test.go │ └── tcp_server.go └── proxy │ ├── bool.go │ ├── bool_test.go │ └── client.go ├── cluster ├── client │ └── cluster_client.go ├── cmd │ ├── cmd_client.go │ └── cmd_cluster.go └── server │ ├── cluster_server.go │ ├── health_server.go │ ├── jsm_server.go │ ├── reservations_controller.go │ ├── reservations_controller_test.go │ └── serverfakes │ └── fake_jsm_tick.go ├── coverage_test.sh ├── deploy ├── beanstalkd.procfile ├── dev-cluster.procfile ├── dev.procfile ├── docker-compose.yml └── prometheus.yml ├── doc ├── Beanstalkd-Proxy.md ├── Design.md ├── GettingStarted.md ├── GettingStarted3.md ├── arch.dot ├── arch.png ├── bean_3185124.png ├── bean_3185124.svg ├── beanstalkd_lifecycle.dot └── beanstalkd_lifecycle.png ├── generate_changelog.sh ├── go.mod ├── go.sum ├── main.go ├── state ├── client_resv.go ├── client_resv_test.go ├── errors.go ├── index.go ├── job_heap.go ├── job_heap_test.go ├── jsm.go ├── jsm_test.go ├── state.go └── state_string.go ├── store ├── client_uri.go ├── client_uri_test.go ├── snapshot.go ├── snapshot_test.go └── store.go ├── tests └── e2e │ └── protocol_test.go └── tools ├── opts.go └── tools.go /.commit_template.txt: -------------------------------------------------------------------------------- 1 | 2 | # A short line summary 3 | # 4 | # Description elaborates and should provide context for the change and explain what it does. 5 | # Plain text Only 6 | # 7 | # Fixes # 8 | # 9 | # Example of a good commit message: 10 | # Refer https://golang.org/doc/contribute.html#commit_messages 11 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | Describe the bug: 11 | A clear and concise description of what the bug is. 12 | 13 | Provide the steps to reproduce the behavior: 14 | - Step 1 15 | - Step 2 16 | 17 | What is the expected behavior? 18 | A clear and concise description of what you expected to happen. 19 | 20 | Other: 21 | - OS: [e.g. Linux, Docker, OSX] 22 | - Version or branch-commit-hash [e.g. 1.0, master-abcde] 23 | - Add any other context about the problem here. 24 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.github/workflows/ci-build.yml: -------------------------------------------------------------------------------- 1 | name: Build 2 | 3 | on: 4 | pull_request: 5 | branches: [ master ] 6 | push: 7 | branches: [ master ] 8 | 9 | jobs: 10 | build: 11 | name: Build 12 | runs-on: ubuntu-latest 13 | steps: 14 | - name: Install Protoc 15 | uses: arduino/setup-protoc@master 16 | with: 17 | version: '3.11.4' 18 | 19 | - name: Set up Go 1.16.3 20 | uses: actions/setup-go@v2 21 | with: 22 | go-version: 1.16.3 23 | id: go 24 | 25 | - name: Check out code into the Go module directory 26 | uses: actions/checkout@v2 27 | 28 | - name: Build 29 | run: make build 30 | 31 | - name: UnitTests 32 | run: make test 33 | 34 | - name: Docker login 35 | run: echo '${{ secrets.DOCKERHUB_PASS }}' | docker login --username 1xyz --password-stdin 36 | 37 | - name: Docker build and push 38 | run: make docker-push -------------------------------------------------------------------------------- /.github/workflows/release.yaml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | # Sequence of patterns matched against refs/tags 4 | tags: 5 | - 'v*' # Push events to matching v*, i.e. v1.0, v20.15.10 6 | 7 | name: Release 8 | 9 | jobs: 10 | build: 11 | name: Release 12 | runs-on: ubuntu-latest 13 | steps: 14 | - name: Install Protoc 15 | uses: arduino/setup-protoc@master 16 | with: 17 | version: '3.11.4' 18 | 19 | - name: Set up Go 1.13+ 20 | uses: actions/setup-go@v2 21 | with: 22 | go-version: ^1.13 23 | id: go 24 | 25 | - name: Check out code into the Go module directory 26 | uses: actions/checkout@v2 27 | 28 | - name: Build x64 linux release 29 | run: make release/linux 30 | 31 | - name: Zip linux build 32 | run: zip -j coolbeans-linux-amd64.zip README.md LICENSE.txt bin/linux/coolbeans 33 | 34 | - name: Build x64 darwin release 35 | run: make release/darwin 36 | 37 | - name: Zip darwin build 38 | run: zip -j coolbeans-darwin-amd64.zip README.md LICENSE.txt bin/darwin/coolbeans 39 | 40 | - name: Get tag name 41 | id: get_tag_name 42 | run: echo ::set-output name=VERSION::${GITHUB_REF/refs\/tags\//} 43 | 44 | - name: Create Release 45 | id: create_release 46 | uses: actions/create-release@v1 47 | env: 48 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 49 | with: 50 | tag_name: ${{ github.ref }} 51 | release_name: Release ${{ github.ref }} 52 | draft: false 53 | prerelease: false 54 | 55 | - name: Upload Linux Release Asset 56 | id: upload-release-asset-linux 57 | uses: actions/upload-release-asset@v1 58 | env: 59 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 60 | with: 61 | upload_url: ${{ steps.create_release.outputs.upload_url }} 62 | asset_path: ./coolbeans-linux-amd64.zip 63 | asset_name: coolbeans-linux-amd64-${{ steps.get_tag_name.outputs.VERSION }}.zip 64 | asset_content_type: application/zip 65 | 66 | - name: Upload darwin Release Asset 67 | id: upload-release-asset-darwin 68 | uses: actions/upload-release-asset@v1 69 | env: 70 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 71 | with: 72 | upload_url: ${{ steps.create_release.outputs.upload_url }} 73 | asset_path: ./coolbeans-darwin-amd64.zip 74 | asset_name: coolbeans-darwin-amd64-${{ steps.get_tag_name.outputs.VERSION }}.zip 75 | asset_content_type: application/zip 76 | 77 | - name: Docker login 78 | run: echo '${{ secrets.DOCKERHUB_PASS }}' | docker login --username 1xyz --password-stdin 79 | 80 | - name: Docker build and push release 81 | run: make docker-release -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | 3 | # Binaries for programs and plugins 4 | *.exe 5 | *.exe~ 6 | *.dll 7 | *.so 8 | *.dylib 9 | 10 | # Test binary, built with `go test -c` 11 | *.test 12 | 13 | # Output of the go coverage tool, specifically when used with LiteIDE 14 | *.out 15 | coverage.txt 16 | 17 | # Dependency directories (remove the comment below to include it) 18 | vendor/ 19 | bin/ 20 | -------------------------------------------------------------------------------- /Changelog.md: -------------------------------------------------------------------------------- 1 | ## v0.1.10 (2020-05-25) 2 | 3 | * Update release documentation (#35) [View](https://github.com/1xyz/coolbeans/commit/8c2db92894486349928d296164518fe0f35efc56) 4 | * Renamed jellybeans metrics to coolbeans (#36) [View](https://github.com/1xyz/coolbeans/commit/e065409e77358e6d8a712bb37d5b466c8b29f689) 5 | ## v0.1.11 (2020-05-27) 6 | 7 | * Cleaned changelog & push repo (#37) [View](https://github.com/1xyz/coolbeans/commit/94e3aaae88c58fb50e35782a8a66fb5a6ae23abb) 8 | * Add changelog [View](https://github.com/1xyz/coolbeans/commit/0ca9dd91fe57edd433d7a8893599759a92623b75) 9 | * Update README.md [View](https://github.com/1xyz/coolbeans/commit/d305b5890505ad9f851478f965fa5e932d6e73e5) 10 | * Update README.md [View](https://github.com/1xyz/coolbeans/commit/983853568e2f0ce7fd0bf7c11e074065311399f3) 11 | * Added getting started guide (#38) [View](https://github.com/1xyz/coolbeans/commit/991454f47f1de254b1826f8db4b90188463b1da3) 12 | * Appropriate sigterm handling for beanstalkd process (#40) [View](https://github.com/1xyz/coolbeans/commit/edef35b61773e8436dde2c1d005d625489209b0d) 13 | -------------------------------------------------------------------------------- /Contributing.md: -------------------------------------------------------------------------------- 1 | Contributing 2 | ============ 3 | 4 | Coolbeans is currently at `alpha` release quality. It is all about improving the quality by adoption and testing. 5 | 6 | By participating in this project you agree to abide by the [code of conduct](https://www.contributor-covenant.org/version/2/0/code_of_conduct/). 7 | 8 | - [Building coolbeans](#building-coolbeans) 9 | - [Dependencies](#dependencies) 10 | - [Build the binary](#build-the-binary) 11 | - [Run the service](#run-the-service) 12 | - [Other run options](#other-run-options) 13 | - [Testing](#testing) 14 | - [Setup a beanstalkd client to test manually](#manual-test) 15 | - [Unit Tests](#unit-tests) 16 | - [End to end Tests](#run-end-to-end-tests) 17 | - [Other](#other) 18 | 19 | 20 | Building coolbeans 21 | ------------------ 22 | 23 | This section walks through the process of building the source and running coolbeans. 24 | 25 | ### Dependencies 26 | 27 | * [Install golang v1.13+](https://golang.org/dl/) 28 | - Coolbeans is written in golang, it requires go version 1.13 or newer. I prefer to use [go version manager](https://github.com/moovweb/gvm) to manage multiple go versions. 29 | - Ensure `$GOPATH/bin` is added to your path. 30 | 31 | * [Install Docker](https://docs.docker.com/get-docker/) 32 | - A [Dockerfile](../Dockerfile) is provided. 33 | 34 | * [Install Protocol Buffer Compiler (protoc) & the Go plugin (protoc-gen-go)](https://grpc.io/docs/quickstart/go/#protocol-buffers) 35 | - The project depends on protocol buffers and uses the Grpc library. 36 | - Ensure you have `protoc` & `protoc-gen-go` installed and accessible in your paths. 37 | 38 | ### Build the binary. 39 | 40 | The [Makefile](./Makefile) provides different target options to build and run from source. 41 | 42 | To explore these options, run `make` which shows all possible targets: 43 | 44 | make 45 | 46 | For example: To generate a statically linked binary for the local operating-system. 47 | 48 | make build 49 | 50 | 51 | ### Run the service 52 | 53 | Coolbeans typically runs as a two processes, refer the [design](doc/Design.md) for more detail. 54 | 55 | Run a single node cluster. Note this creates two processes, a cluster-node process and beanstalkd proxy: 56 | 57 | make run-single 58 | 59 | 60 | Run a three node cluster. Note this spawns four processes, three cluster-node processes and beanstalkd proxy.: 61 | 62 | make run-cluster 63 | 64 | 65 | ### Other Run options 66 | 67 | Run a single process beanstalkd (no replication via Raft, the entire queue is in memory): 68 | 69 | make run-beanstalkd 70 | 71 | Run a three node cluster via docker-compose. Run this prior to running docker-compose-up 72 | 73 | make docker-compose-build 74 | 75 | make docker-compose-up 76 | 77 | Once done: 78 | 79 | make docker-compose-down 80 | 81 | 82 | Testing 83 | ------- 84 | 85 | ### Manual test 86 | 87 | Download and run a beanstalk client from [here](https://github.com/beanstalkd/beanstalkd/wiki/Tools). 88 | 89 | Some client I tested with: 90 | - [Aurora](https://github.com/xuri/aurora/releases/tag/2.2) 91 | - [yabean](https://github.com/1xyz/yabean) 92 | 93 | 94 | ### Unit Tests 95 | 96 | Run unit-tests 97 | 98 | make test 99 | 100 | Explore other test options by running `make` 101 | 102 | 103 | ### Run end to end tests 104 | 105 | Run an end to end test scenarios against a running cluster. 106 | 107 | make test-e2e 108 | 109 | 110 | Other 111 | ----- 112 | 113 | - Reporting an issue, please refer [here](https://github.com/1xyz/coolbeans/issues/new/choose) 114 | 115 | - Guidelines for a good commit message. please refer [here](https://golang.org/doc/contribute.html#commit_messages) 116 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.13.8-alpine3.11 AS builder 2 | 3 | RUN apk update && apk add make git build-base curl protobuf && \ 4 | rm -rf /var/cache/apk/* 5 | 6 | RUN go get golang.org/x/tools/cmd/stringer 7 | 8 | ADD . /go/src/github.com/1xyz/coolbeans 9 | WORKDIR /go/src/github.com/1xyz/coolbeans 10 | RUN make release/linux 11 | 12 | ### 13 | 14 | FROM alpine:latest AS coolbeans 15 | 16 | RUN apk update && apk add ca-certificates bash 17 | WORKDIR /root/ 18 | COPY --from=builder /go/src/github.com/1xyz/coolbeans/bin/linux/coolbeans . -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | GO=go 2 | GOFMT=gofmt 3 | GOREMAN=goreman 4 | PROTOC=protoc 5 | DELETE=rm 6 | DOCKER=docker 7 | DOCKER_COMPOSE=docker-compose 8 | DOCKER_REPO=1xyz/coolbeans 9 | DOCKER_DEV_REPO=1xyz/coolbeans-developer 10 | BINARY=coolbeans 11 | BUILD_BINARY=bin/$(BINARY) 12 | # go source files, ignore vendor directory 13 | SRC = $(shell find . -type f -name '*.go' -not -path "./vendor/*") 14 | BRANCH = $(shell git rev-parse --abbrev-ref HEAD) 15 | SAFE_BRANCH = $(subst /,-,$(BRANCH)) 16 | # current git version short-hash 17 | VER = $(shell git rev-parse --short HEAD) 18 | GIT_RELEASE_TAG=$(shell git describe --tags) 19 | DOCKER_TAG = "$(SAFE_BRANCH)-$(VER)" 20 | 21 | info: 22 | @echo " target ⾖ Description. " 23 | @echo " ----------------------------------------------------------------- " 24 | @echo 25 | @echo " build generate a local build ⇨ $(BUILD_BINARY) " 26 | @echo " clean clean up bin/ & go test cache " 27 | @echo " fmt format go code files using go fmt " 28 | @echo " generate generate enum-strings & test-fakes go files " 29 | @echo " protoc compile proto files to generate go files " 30 | @echo " release/darwin generate a darwin target build " 31 | @echo " release/linux generate a linux target build " 32 | @echo " tidy clean up go module file " 33 | @echo 34 | @echo " Run targets " 35 | @echo " -----------" 36 | @echo " run-single run a single node cluster w/ beanstalkd proxy " 37 | @echo " run-cluster run a three node cluster w/ beanstalkd proxy " 38 | @echo " run-beanstalkd run a single process beanstalkd " 39 | @echo 40 | @echo " Test targets " 41 | @echo " -----------" 42 | @echo " test run unit-tests " 43 | @echo " testc run unit-tests w/ coverage " 44 | @echo " testv run unit-tests verbose " 45 | @echo " test-e2e run E2E tests requires a running beanstalkd " 46 | @echo 47 | @echo " Docker targets" 48 | @echo " --------------" 49 | @echo " docker-build build image $(DOCKER_DEV_REPO):$(DOCKER_TAG) " 50 | @echo " docker-push push image $(DOCKER_DEV_REPO):$(DOCKER_TAG) " 51 | @echo " docker-release push image $(DOCKER_REPO):$(GIT_RELEASE_TAG) " 52 | @echo " docker-compose-up run docker-compose-up " 53 | @echo " docker-compose-down run docker-compose-down " 54 | @echo " ------------------------------------------------------------------" 55 | 56 | build: clean fmt protoc 57 | $(GO) build -o $(BUILD_BINARY) -v main.go 58 | 59 | 60 | .PHONY: clean 61 | clean: 62 | $(DELETE) -rf bin/ 63 | $(GO) clean -cache 64 | 65 | 66 | .PHONY: fmt 67 | fmt: 68 | $(GOFMT) -s -l -w $(SRC) 69 | 70 | 71 | # tools deps to generate code (stringer...) 72 | # https://github.com/golang/go/wiki/Modules#how-can-i-track-tool-dependencies-for-a-module 73 | .PHONY: generate 74 | generate: 75 | $(GO) generate ./... 76 | 77 | 78 | .PHONY: protoc 79 | protoc: 80 | $(GO) get -u github.com/golang/protobuf/protoc-gen-go 81 | $(PROTOC) -I api/v1 api/v1/*.proto --go_out=plugins=grpc:api/v1 --go_opt=paths=source_relative 82 | 83 | 84 | release/%: clean fmt protoc 85 | @echo "build no race on alpine. https://github.com/golang/go/issues/14481" 86 | $(GO) test ./... 87 | @echo "build GOOS: $(subst release/,,$@) & GOARCH: amd64" 88 | GOOS=$(subst release/,,$@) GOARCH=amd64 $(GO) build -o bin/$(subst release/,,$@)/$(BINARY) -v main.go 89 | 90 | .PHONY: run-single 91 | run-single: build 92 | $(GO) get github.com/mattn/goreman 93 | $(GOREMAN) -f deploy/dev.procfile start 94 | 95 | .PHONY: run-cluster 96 | run-cluster: build 97 | $(GO) get github.com/mattn/goreman 98 | $(GOREMAN) -f deploy/dev-cluster.procfile start 99 | 100 | .PHONY: run-beanstalkd 101 | run-beanstalkd: build 102 | $(GO) get github.com/mattn/goreman 103 | $(GOREMAN) -f deploy/beanstalkd.procfile start 104 | 105 | 106 | # test w/ race detector on always 107 | # https://golang.org/doc/articles/race_detector.html#Typical_Data_Races 108 | .PHONY: test 109 | test: build 110 | $(GO) test -race ./... 111 | 112 | 113 | .PHONY: testv 114 | testv: build 115 | $(GO) test -v -race ./... 116 | 117 | 118 | .PHONY: testc 119 | testc: build 120 | $(GO) get github.com/ory/go-acc 121 | ./coverage_test.sh 122 | $(GO) tool cover -html=coverage.txt 123 | 124 | .PHONY: test-e2e 125 | test-e2e: 126 | $(GO) test -v -tags=integration ./tests/e2e 127 | 128 | .PHONY: tidy 129 | tidy: 130 | $(GO) mod tidy 131 | 132 | docker-build: 133 | $(DOCKER) build -t $(DOCKER_DEV_REPO):$(DOCKER_TAG) -f Dockerfile . 134 | 135 | docker-push: docker-build 136 | $(DOCKER) push $(DOCKER_DEV_REPO):$(DOCKER_TAG) 137 | 138 | docker-release: docker-release 139 | $(DOCKER) build -t $(DOCKER_REPO):$(GIT_RELEASE_TAG) -f Dockerfile . 140 | $(DOCKER) push $(DOCKER_REPO):$(GIT_RELEASE_TAG) 141 | 142 | docker-compose-build: 143 | $(DOCKER_COMPOSE) --file deploy/docker-compose.yml --project-directory . build --no-cache 144 | 145 | docker-compose-up: 146 | $(DOCKER_COMPOSE) --file deploy/docker-compose.yml --project-directory . up --remove-orphans 147 | 148 | docker-compose-down: 149 | $(DOCKER_COMPOSE) --file deploy/docker-compose.yml --project-directory . down 150 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Build](https://github.com/1xyz/coolbeans/workflows/Build/badge.svg)](https://github.com/1xyz/coolbeans/actions?query=workflow%3ABuild) 2 | [![Release](https://github.com/1xyz/coolbeans/workflows/Release/badge.svg)](https://github.com/1xyz/coolbeans/actions?query=workflow%3ARelease) 3 | [![Docker](https://img.shields.io/docker/pulls/1xyz/coolbeans)](https://hub.docker.com/r/1xyz/coolbeans/tags) 4 | [![Go Report Card](https://goreportcard.com/badge/github.com/1xyz/coolbeans)](https://goreportcard.com/report/github.com/1xyz/coolbeans) 5 | 6 | 7 | 8 | - [Coolbeans](#coolbeans) 9 | - [Motivation](#motivation) 10 | - [Key features](#key-features) 11 | - [Releases](#releases) 12 | - [Getting started](#getting-started) 13 | - [How to contribute](#how-to-contribute) 14 | - [Local Development](Contributing.md) 15 | 16 | 17 | 18 | 19 | Coolbeans 20 | ========= 21 | 22 | Coolbeans is a distributed replicated work queue service that implements the [beanstalkd protocol](https://github.com/beanstalkd/beanstalkd/blob/master/doc/protocol.txt). 23 | 24 | Unlike a message queue, [beanstalkd](https://github.com/beanstalkd/beanstalkd) is a work queue that provides primitive operations to work with jobs. 25 | 26 | Coolbeans primarily differs from beanstalkd in that it allows the work queue to be replicated across multiple machines. It uses the [RAFT consensus algorithm](https://raft.github.io/) to replicate the job state consistently across machines. 27 | 28 | Motivation 29 | ---------- 30 | 31 | Beanstalkd is a [feature-rich](https://www.igvita.com/2010/05/20/scalable-work-queues-with-beanstalk/) and easy to use queue. Beanstalkd, however has a few drawbacks that include: (i) A lack of replication or high availability in terms of machine failures. (ii) There is no native sharding, (iii) No native support for encryption & authentication between the service & the client. 32 | 33 | Given the initial setup of beanstalkd is simple, having a HA or sharded production setup is non-trivial. Our premise with Coolbeans is to provide a replicated beanstalkd queue followed by addressing the other issues incrementally. Read about our design approach [here](doc/Design.md). 34 | 35 | Key features 36 | ------------ 37 | 38 | - A fully replicated work queue built using [Hashicorp's Raft library](https://github.com/hashicorp/raft). 39 | - Strong consistency of all queue operations. 40 | - Compatible with [existing beanstalkd clients](https://github.com/beanstalkd/beanstalkd/wiki/Client-Libraries). 41 | - Easy installation, available as a static binary or as a Linux docker image. 42 | - [Monitor metrics using Prometheus and visualize them via Grafana](https://github.com/1xyz/coolbeans-k8s/blob/master/doc/Metrics.md#setup-grafana). 43 | 44 | 45 | Releases 46 | -------- 47 | 48 | - Static binary can be downloaded from the [release pages](https://github.com/1xyz/coolbeans/releases). 49 | - Docker release images can be pulled from [here](https://hub.docker.com/r/1xyz/coolbeans). 50 | - Docker development images can be pulled from [here](https://hub.docker.com/r/1xyz/coolbeans-developer/tags). 51 | 52 | 53 | Getting Started 54 | --------------- 55 | 56 | - Refer the [getting started guide](doc/GettingStarted.md). 57 | 58 | - To setup a three node cluster refer [here](doc/GettingStarted3.md). 59 | 60 | - Getting started guide to run coolbeans on Kubernetes, refer [here](https://github.com/1xyz/coolbeans-k8s). 61 | 62 | 63 | How to contribute 64 | ----------------- 65 | 66 | Coolbeans is currently at `alpha` release quality. It is all about improving the quality of this by testing, testing & more testing. 67 | 68 | Here are a few ways you can contribute: 69 | 70 | - Be an early adopter, Try it out on your machine, testbed or pre-production stack and give us [feedback or report issues](https://github.com/1xyz/coolbeans/issues/new/choose). 71 | 72 | - Have a feature in mind. Tell us more about by [filing an issue](https://github.com/1xyz/coolbeans/issues/new/choose). 73 | 74 | - Want to contribute to code, documentation. Checkout the [contribution guide](./Contributing.md). 75 | 76 | --- 77 | 78 | [icon](https://thenounproject.com/term/like/3185124/) by [Llisole](https://thenounproject.com/llisole/) from [the Noun Project](https://thenounproject.com) 79 | 80 | -------------------------------------------------------------------------------- /api/v1/client.pb.go: -------------------------------------------------------------------------------- 1 | // Code generated by protoc-gen-go. DO NOT EDIT. 2 | // versions: 3 | // protoc-gen-go v1.28.0 4 | // protoc v3.19.3 5 | // source: client.proto 6 | 7 | package coolbeans_api_v1 8 | 9 | import ( 10 | protoreflect "google.golang.org/protobuf/reflect/protoreflect" 11 | protoimpl "google.golang.org/protobuf/runtime/protoimpl" 12 | reflect "reflect" 13 | sync "sync" 14 | ) 15 | 16 | const ( 17 | // Verify that this generated code is sufficiently up-to-date. 18 | _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) 19 | // Verify that runtime/protoimpl is sufficiently up-to-date. 20 | _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) 21 | ) 22 | 23 | type ClientResvEntryProto struct { 24 | state protoimpl.MessageState 25 | sizeCache protoimpl.SizeCache 26 | unknownFields protoimpl.UnknownFields 27 | 28 | // identifier of the client that requested a reservation 29 | ClientId string `protobuf:"bytes,1,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"` 30 | // reservation deadline at (clock time) 31 | ResvDeadlineAt int64 `protobuf:"varint,2,opt,name=resv_deadline_at,json=resvDeadlineAt,proto3" json:"resv_deadline_at,omitempty"` 32 | // boolean indicating if the client is waiting for a reservation 33 | IsWaitingForResv bool `protobuf:"varint,3,opt,name=is_waiting_for_resv,json=isWaitingForResv,proto3" json:"is_waiting_for_resv,omitempty"` 34 | // clock time at which the client needs some processing 35 | TickAt int64 `protobuf:"varint,4,opt,name=tick_at,json=tickAt,proto3" json:"tick_at,omitempty"` 36 | // request id of the current reservation request 37 | ReqId string `protobuf:"bytes,5,opt,name=req_id,json=reqId,proto3" json:"req_id,omitempty"` 38 | // heap index value for this client reservation 39 | HeapIndex int32 `protobuf:"varint,6,opt,name=heap_index,json=heapIndex,proto3" json:"heap_index,omitempty"` 40 | // names of the tubes current watched for resrervations 41 | WatchedTube []string `protobuf:"bytes,7,rep,name=watched_tube,json=watchedTube,proto3" json:"watched_tube,omitempty"` 42 | } 43 | 44 | func (x *ClientResvEntryProto) Reset() { 45 | *x = ClientResvEntryProto{} 46 | if protoimpl.UnsafeEnabled { 47 | mi := &file_client_proto_msgTypes[0] 48 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 49 | ms.StoreMessageInfo(mi) 50 | } 51 | } 52 | 53 | func (x *ClientResvEntryProto) String() string { 54 | return protoimpl.X.MessageStringOf(x) 55 | } 56 | 57 | func (*ClientResvEntryProto) ProtoMessage() {} 58 | 59 | func (x *ClientResvEntryProto) ProtoReflect() protoreflect.Message { 60 | mi := &file_client_proto_msgTypes[0] 61 | if protoimpl.UnsafeEnabled && x != nil { 62 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 63 | if ms.LoadMessageInfo() == nil { 64 | ms.StoreMessageInfo(mi) 65 | } 66 | return ms 67 | } 68 | return mi.MessageOf(x) 69 | } 70 | 71 | // Deprecated: Use ClientResvEntryProto.ProtoReflect.Descriptor instead. 72 | func (*ClientResvEntryProto) Descriptor() ([]byte, []int) { 73 | return file_client_proto_rawDescGZIP(), []int{0} 74 | } 75 | 76 | func (x *ClientResvEntryProto) GetClientId() string { 77 | if x != nil { 78 | return x.ClientId 79 | } 80 | return "" 81 | } 82 | 83 | func (x *ClientResvEntryProto) GetResvDeadlineAt() int64 { 84 | if x != nil { 85 | return x.ResvDeadlineAt 86 | } 87 | return 0 88 | } 89 | 90 | func (x *ClientResvEntryProto) GetIsWaitingForResv() bool { 91 | if x != nil { 92 | return x.IsWaitingForResv 93 | } 94 | return false 95 | } 96 | 97 | func (x *ClientResvEntryProto) GetTickAt() int64 { 98 | if x != nil { 99 | return x.TickAt 100 | } 101 | return 0 102 | } 103 | 104 | func (x *ClientResvEntryProto) GetReqId() string { 105 | if x != nil { 106 | return x.ReqId 107 | } 108 | return "" 109 | } 110 | 111 | func (x *ClientResvEntryProto) GetHeapIndex() int32 { 112 | if x != nil { 113 | return x.HeapIndex 114 | } 115 | return 0 116 | } 117 | 118 | func (x *ClientResvEntryProto) GetWatchedTube() []string { 119 | if x != nil { 120 | return x.WatchedTube 121 | } 122 | return nil 123 | } 124 | 125 | var File_client_proto protoreflect.FileDescriptor 126 | 127 | var file_client_proto_rawDesc = []byte{ 128 | 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x10, 129 | 0x63, 0x6f, 0x6f, 0x6c, 0x62, 0x65, 0x61, 0x6e, 0x73, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 130 | 0x22, 0xfe, 0x01, 0x0a, 0x14, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x76, 0x45, 131 | 0x6e, 0x74, 0x72, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1b, 0x0a, 0x09, 0x63, 0x6c, 0x69, 132 | 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6c, 133 | 0x69, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x28, 0x0a, 0x10, 0x72, 0x65, 0x73, 0x76, 0x5f, 0x64, 134 | 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 135 | 0x52, 0x0e, 0x72, 0x65, 0x73, 0x76, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x41, 0x74, 136 | 0x12, 0x2d, 0x0a, 0x13, 0x69, 0x73, 0x5f, 0x77, 0x61, 0x69, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x66, 137 | 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x73, 0x76, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x69, 138 | 0x73, 0x57, 0x61, 0x69, 0x74, 0x69, 0x6e, 0x67, 0x46, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x76, 0x12, 139 | 0x17, 0x0a, 0x07, 0x74, 0x69, 0x63, 0x6b, 0x5f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 140 | 0x52, 0x06, 0x74, 0x69, 0x63, 0x6b, 0x41, 0x74, 0x12, 0x15, 0x0a, 0x06, 0x72, 0x65, 0x71, 0x5f, 141 | 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x72, 0x65, 0x71, 0x49, 0x64, 0x12, 142 | 0x1d, 0x0a, 0x0a, 0x68, 0x65, 0x61, 0x70, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x06, 0x20, 143 | 0x01, 0x28, 0x05, 0x52, 0x09, 0x68, 0x65, 0x61, 0x70, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x21, 144 | 0x0a, 0x0c, 0x77, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x74, 0x75, 0x62, 0x65, 0x18, 0x07, 145 | 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x77, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x54, 0x75, 0x62, 146 | 0x65, 0x42, 0x14, 0x5a, 0x12, 0x2e, 0x3b, 0x63, 0x6f, 0x6f, 0x6c, 0x62, 0x65, 0x61, 0x6e, 0x73, 147 | 0x5f, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 148 | } 149 | 150 | var ( 151 | file_client_proto_rawDescOnce sync.Once 152 | file_client_proto_rawDescData = file_client_proto_rawDesc 153 | ) 154 | 155 | func file_client_proto_rawDescGZIP() []byte { 156 | file_client_proto_rawDescOnce.Do(func() { 157 | file_client_proto_rawDescData = protoimpl.X.CompressGZIP(file_client_proto_rawDescData) 158 | }) 159 | return file_client_proto_rawDescData 160 | } 161 | 162 | var file_client_proto_msgTypes = make([]protoimpl.MessageInfo, 1) 163 | var file_client_proto_goTypes = []interface{}{ 164 | (*ClientResvEntryProto)(nil), // 0: coolbeans.api.v1.ClientResvEntryProto 165 | } 166 | var file_client_proto_depIdxs = []int32{ 167 | 0, // [0:0] is the sub-list for method output_type 168 | 0, // [0:0] is the sub-list for method input_type 169 | 0, // [0:0] is the sub-list for extension type_name 170 | 0, // [0:0] is the sub-list for extension extendee 171 | 0, // [0:0] is the sub-list for field type_name 172 | } 173 | 174 | func init() { file_client_proto_init() } 175 | func file_client_proto_init() { 176 | if File_client_proto != nil { 177 | return 178 | } 179 | if !protoimpl.UnsafeEnabled { 180 | file_client_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { 181 | switch v := v.(*ClientResvEntryProto); i { 182 | case 0: 183 | return &v.state 184 | case 1: 185 | return &v.sizeCache 186 | case 2: 187 | return &v.unknownFields 188 | default: 189 | return nil 190 | } 191 | } 192 | } 193 | type x struct{} 194 | out := protoimpl.TypeBuilder{ 195 | File: protoimpl.DescBuilder{ 196 | GoPackagePath: reflect.TypeOf(x{}).PkgPath(), 197 | RawDescriptor: file_client_proto_rawDesc, 198 | NumEnums: 0, 199 | NumMessages: 1, 200 | NumExtensions: 0, 201 | NumServices: 0, 202 | }, 203 | GoTypes: file_client_proto_goTypes, 204 | DependencyIndexes: file_client_proto_depIdxs, 205 | MessageInfos: file_client_proto_msgTypes, 206 | }.Build() 207 | File_client_proto = out.File 208 | file_client_proto_rawDesc = nil 209 | file_client_proto_goTypes = nil 210 | file_client_proto_depIdxs = nil 211 | } 212 | -------------------------------------------------------------------------------- /api/v1/client.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package coolbeans.api.v1; 4 | 5 | option go_package = ".;coolbeans_api_v1"; 6 | 7 | message ClientResvEntryProto { 8 | // identifier of the client that requested a reservation 9 | string client_id = 1; 10 | 11 | // reservation deadline at (clock time) 12 | int64 resv_deadline_at = 2; 13 | 14 | // boolean indicating if the client is waiting for a reservation 15 | bool is_waiting_for_resv = 3; 16 | 17 | // clock time at which the client needs some processing 18 | int64 tick_at = 4; 19 | 20 | // request id of the current reservation request 21 | string req_id = 5; 22 | 23 | // heap index value for this client reservation 24 | int32 heap_index = 6; 25 | 26 | // names of the tubes current watched for resrervations 27 | repeated string watched_tube = 7; 28 | } -------------------------------------------------------------------------------- /api/v1/cluster.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package coolbeans.api.v1; 4 | 5 | option go_package = ".;coolbeans_api_v1"; 6 | 7 | import "empty.proto"; 8 | 9 | // Service provides functionality to work with the cluster 10 | service Cluster { 11 | // Join, allows a calling node to make a request to join 12 | // this cluster. 13 | // 14 | // It is required that the node that this is called into is a leader node. 15 | // Refer: the specific implementation for error codes 16 | rpc Join (JoinRequest) returns (Empty) {} 17 | 18 | // Leave, allows a calling node to leave the cluster. 19 | // 20 | // It is required that the node that this is called into is a leader node. 21 | // Refer: the specific implementation for error codes 22 | rpc Leave (LeaveRequest) returns (Empty) {} 23 | 24 | // IsNodeLeader, Check if this current node is a leader 25 | rpc IsNodeLeader (Empty) returns (IsNodeLeaderResponse) {} 26 | 27 | // Snaoshot, requests the server to return a user defined snapshot of 28 | // the specific node. 29 | rpc Snapshot (Empty) returns (Empty) {} 30 | } 31 | 32 | message JoinRequest { 33 | // Node ID of the node interested in joining 34 | string node_id = 1; 35 | 36 | // Address of the replica interested in joining 37 | string addr = 2; 38 | } 39 | 40 | message LeaveRequest { 41 | // Node ID of the node requested to leave 42 | string node_id = 1; 43 | } 44 | 45 | message IsNodeLeaderResponse { 46 | // is_leader is a boolean value indicating if this current node is a leader or not 47 | bool is_leader = 1; 48 | } 49 | 50 | -------------------------------------------------------------------------------- /api/v1/empty.pb.go: -------------------------------------------------------------------------------- 1 | // Code generated by protoc-gen-go. DO NOT EDIT. 2 | // versions: 3 | // protoc-gen-go v1.28.0 4 | // protoc v3.19.3 5 | // source: empty.proto 6 | 7 | package coolbeans_api_v1 8 | 9 | import ( 10 | protoreflect "google.golang.org/protobuf/reflect/protoreflect" 11 | protoimpl "google.golang.org/protobuf/runtime/protoimpl" 12 | reflect "reflect" 13 | sync "sync" 14 | ) 15 | 16 | const ( 17 | // Verify that this generated code is sufficiently up-to-date. 18 | _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) 19 | // Verify that runtime/protoimpl is sufficiently up-to-date. 20 | _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) 21 | ) 22 | 23 | type Empty struct { 24 | state protoimpl.MessageState 25 | sizeCache protoimpl.SizeCache 26 | unknownFields protoimpl.UnknownFields 27 | } 28 | 29 | func (x *Empty) Reset() { 30 | *x = Empty{} 31 | if protoimpl.UnsafeEnabled { 32 | mi := &file_empty_proto_msgTypes[0] 33 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 34 | ms.StoreMessageInfo(mi) 35 | } 36 | } 37 | 38 | func (x *Empty) String() string { 39 | return protoimpl.X.MessageStringOf(x) 40 | } 41 | 42 | func (*Empty) ProtoMessage() {} 43 | 44 | func (x *Empty) ProtoReflect() protoreflect.Message { 45 | mi := &file_empty_proto_msgTypes[0] 46 | if protoimpl.UnsafeEnabled && x != nil { 47 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 48 | if ms.LoadMessageInfo() == nil { 49 | ms.StoreMessageInfo(mi) 50 | } 51 | return ms 52 | } 53 | return mi.MessageOf(x) 54 | } 55 | 56 | // Deprecated: Use Empty.ProtoReflect.Descriptor instead. 57 | func (*Empty) Descriptor() ([]byte, []int) { 58 | return file_empty_proto_rawDescGZIP(), []int{0} 59 | } 60 | 61 | var File_empty_proto protoreflect.FileDescriptor 62 | 63 | var file_empty_proto_rawDesc = []byte{ 64 | 0x0a, 0x0b, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x10, 0x63, 65 | 0x6f, 0x6f, 0x6c, 0x62, 0x65, 0x61, 0x6e, 0x73, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x22, 66 | 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x14, 0x5a, 0x12, 0x2e, 0x3b, 0x63, 0x6f, 67 | 0x6f, 0x6c, 0x62, 0x65, 0x61, 0x6e, 0x73, 0x5f, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x31, 0x62, 0x06, 68 | 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 69 | } 70 | 71 | var ( 72 | file_empty_proto_rawDescOnce sync.Once 73 | file_empty_proto_rawDescData = file_empty_proto_rawDesc 74 | ) 75 | 76 | func file_empty_proto_rawDescGZIP() []byte { 77 | file_empty_proto_rawDescOnce.Do(func() { 78 | file_empty_proto_rawDescData = protoimpl.X.CompressGZIP(file_empty_proto_rawDescData) 79 | }) 80 | return file_empty_proto_rawDescData 81 | } 82 | 83 | var file_empty_proto_msgTypes = make([]protoimpl.MessageInfo, 1) 84 | var file_empty_proto_goTypes = []interface{}{ 85 | (*Empty)(nil), // 0: coolbeans.api.v1.Empty 86 | } 87 | var file_empty_proto_depIdxs = []int32{ 88 | 0, // [0:0] is the sub-list for method output_type 89 | 0, // [0:0] is the sub-list for method input_type 90 | 0, // [0:0] is the sub-list for extension type_name 91 | 0, // [0:0] is the sub-list for extension extendee 92 | 0, // [0:0] is the sub-list for field type_name 93 | } 94 | 95 | func init() { file_empty_proto_init() } 96 | func file_empty_proto_init() { 97 | if File_empty_proto != nil { 98 | return 99 | } 100 | if !protoimpl.UnsafeEnabled { 101 | file_empty_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { 102 | switch v := v.(*Empty); i { 103 | case 0: 104 | return &v.state 105 | case 1: 106 | return &v.sizeCache 107 | case 2: 108 | return &v.unknownFields 109 | default: 110 | return nil 111 | } 112 | } 113 | } 114 | type x struct{} 115 | out := protoimpl.TypeBuilder{ 116 | File: protoimpl.DescBuilder{ 117 | GoPackagePath: reflect.TypeOf(x{}).PkgPath(), 118 | RawDescriptor: file_empty_proto_rawDesc, 119 | NumEnums: 0, 120 | NumMessages: 1, 121 | NumExtensions: 0, 122 | NumServices: 0, 123 | }, 124 | GoTypes: file_empty_proto_goTypes, 125 | DependencyIndexes: file_empty_proto_depIdxs, 126 | MessageInfos: file_empty_proto_msgTypes, 127 | }.Build() 128 | File_empty_proto = out.File 129 | file_empty_proto_rawDesc = nil 130 | file_empty_proto_goTypes = nil 131 | file_empty_proto_depIdxs = nil 132 | } 133 | -------------------------------------------------------------------------------- /api/v1/empty.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package coolbeans.api.v1; 4 | 5 | option go_package = ".;coolbeans_api_v1"; 6 | 7 | message Empty{} -------------------------------------------------------------------------------- /api/v1/job.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package coolbeans.api.v1; 4 | 5 | option go_package = ".;coolbeans_api_v1"; 6 | 7 | enum JobStateProto { 8 | INITIAL = 0; 9 | READY = 1; 10 | RESERVED = 2; 11 | BURIED = 3; 12 | DELAYED = 4; 13 | DELETED = 5; 14 | } 15 | 16 | message JobProto { 17 | // Unique identifier for job 18 | int64 id = 1; 19 | 20 | // Priority is an integer < 2**32. Jobs with smaller priority values will be 21 | // scheduled before jobs with larger priorities. The most urgent priority is 0; 22 | // the least urgent priority is 4,294,967,295. 23 | uint32 priority = 2; 24 | 25 | // delay is an integer number of seconds to wait before putting the job in 26 | // the ready queue. The job will be in the "delayed" state during this time. 27 | // Maximum delay is 2**32-1. 28 | int64 delay = 3; 29 | 30 | // TTR/time to run -- is an integer number of seconds to allow a worker 31 | // to run this job. This time is counted from the moment a worker reserves 32 | // this job. If the worker does not delete, release, or bury the job within 33 | // seconds, the job will time out and the server will release the job. 34 | // The minimum ttr is 1. If the client sends 0, the server will silently 35 | // increase the ttr to 1. Maximum ttr is 2**32-1. 36 | int32 ttr = 4; 37 | 38 | // tube_name is the name of the tube associated with this job 39 | string tube_name = 5; 40 | 41 | // created is the time in UTC the job is created 42 | int64 created_at = 6; 43 | 44 | // ready_at is the time in UTC the job is ready 45 | int64 ready_at = 7; 46 | 47 | // expires_at is the time in UTC, when current reservation expires 48 | int64 expires_at = 8; 49 | 50 | // state is the current state of this job 51 | JobStateProto state = 9; 52 | 53 | // reserved_by is the identifier of the client which has 54 | // reserved this job, the value is empty if un-reserved 55 | string reserved_by = 10; 56 | 57 | // body_size is an integer indicating the size of the job body, not including the 58 | // trailing "\r\n". This value must be less than max-job-size (default: 2**16) 59 | int32 body_size = 11; 60 | 61 | // body is the job body -- a sequence of bytes of length BodySize 62 | bytes body = 12; 63 | 64 | // buried_at the clock time when the job is buried 65 | int64 buried_at = 13; 66 | 67 | // reserve_count is the number of times this job has been reserved. 68 | uint32 reserve_count = 14; 69 | 70 | // timeoutCount is the number of times this job has timed out during a reservation. 71 | uint32 timeout_count = 15; 72 | 73 | // release_count is the number of times a client has released this job from a reservation. 74 | uint32 release_count = 16; 75 | 76 | // bury_count is the number of times this job has been buried. 77 | uint32 bury_count = 17; 78 | 79 | // kick_count is the number of times this job has been kicked 80 | uint32 kick_count = 18; 81 | } 82 | -------------------------------------------------------------------------------- /beanstalkd/cmd/beanstalkd.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "github.com/1xyz/coolbeans/beanstalkd/core" 5 | "github.com/1xyz/coolbeans/beanstalkd/proto" 6 | "github.com/davecgh/go-spew/spew" 7 | log "github.com/sirupsen/logrus" 8 | "os" 9 | "os/signal" 10 | "syscall" 11 | ) 12 | 13 | // runTCPServer - creates and run a beanstalkd TCP server to 14 | // listen on the configured addr/port. The server listens on 15 | // a separate go-routine and return back to caller. 16 | // Refer method: waitForShutdown, 17 | func runTCPServer(c *core.Config) *proto.TcpServer { 18 | tcpServer := proto.NewTcpServer(c) 19 | go func(tcpSrv *proto.TcpServer) { 20 | if err := tcpSrv.Listen(); err != nil { 21 | log.Errorf("runTCPServer: tcpServer.listen err=%v", err) 22 | } 23 | }(tcpServer) 24 | return tcpServer 25 | } 26 | 27 | // waitForShutdown waits for a terminate or interrupt signal 28 | // terminates the server once a signal is received. 29 | func waitForShutdown(tcpSrv *proto.TcpServer) { 30 | done := make(chan os.Signal, 1) 31 | signal.Notify(done, os.Interrupt, syscall.SIGINT, syscall.SIGTERM) 32 | 33 | <-done 34 | log.Infof("waitForShutdown: Shutdown signal received") 35 | tcpSrv.Shutdown() 36 | } 37 | 38 | // RunBeanstalkd runs the beanstalkd TCP proxy server with provided config 39 | func RunBeanstalkd(c *core.Config) error { 40 | spew.Dump(c) 41 | tcpSrv := runTCPServer(c) 42 | waitForShutdown(tcpSrv) 43 | return nil 44 | } 45 | -------------------------------------------------------------------------------- /beanstalkd/cmd/cmd_beanstalkd.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "github.com/1xyz/coolbeans/beanstalkd/core" 5 | "github.com/docopt/docopt-go" 6 | log "github.com/sirupsen/logrus" 7 | ) 8 | 9 | func CmdBeanstalkd(argv []string, version string) { 10 | usage := `usage: beanstalkd [options] 11 | Options: 12 | -h --help Show this screen. 13 | --listen-addr= Listen on address [default: 0.0.0.0]. 14 | --listen-port= Listen on port [default:11300]. 15 | --upstream-addrs= A CSV of upstream cluster-node servers. Defaults to any empty value where 16 | a complete cluster node & beanstalkd server runs in a single process [default: ]. 17 | --connect-timeout= Upstream connection timeout in seconds [default: 10]. 18 | --max-job-size= Maximum job size in bytes [default: 16384]. 19 | --max-reservation-timeout= Maximum reservation timeout in seconds [default: 3600]. 20 | ` 21 | opts, err := docopt.ParseArgs(usage, argv[1:], version) 22 | if err != nil { 23 | log.Fatalf("error parsing arguments. err=%v", err) 24 | } 25 | 26 | var bsConfig core.Config 27 | if err := opts.Bind(&bsConfig); err != nil { 28 | log.Fatalf("error in opts.bind. err=%v", err) 29 | } 30 | 31 | if err := RunBeanstalkd(&bsConfig); err != nil { 32 | log.Fatalf("error RunBeanstalkd. err=%v", err) 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /beanstalkd/core/client.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "fmt" 5 | "github.com/1xyz/coolbeans/state" 6 | "github.com/google/uuid" 7 | log "github.com/sirupsen/logrus" 8 | ) 9 | 10 | type client struct { 11 | // unique UUID for this client 12 | id state.ClientID 13 | 14 | // currently the tube used by this client 15 | useTube state.TubeName 16 | 17 | // Command Response channel 18 | responseCh chan CmdResponse 19 | 20 | // Tubes currently watched 21 | watchingTubes tubeSet 22 | } 23 | 24 | func (c client) String() string { 25 | return fmt.Sprintf("client: id = %v usetube = %v watching = %v", 26 | c.id, c.useTube, c.watchingTubes) 27 | } 28 | 29 | func NewClient(useTube state.TubeName) *client { 30 | id := uuid.New().URN() 31 | tubes := newTubeSet() 32 | err := tubes.Set(defaultTubeName) 33 | if err != nil { 34 | log.WithField("method", "useClient").Panicf("tubes.set %v", err) 35 | } 36 | 37 | return &client{ 38 | id: state.ClientID(id), 39 | useTube: useTube, 40 | responseCh: make(chan CmdResponse), 41 | watchingTubes: tubes, 42 | } 43 | } 44 | 45 | type ClientSet map[state.ClientID]*client 46 | 47 | func (cs ClientSet) Set(c *client) error { 48 | if cs.Contains(c) { 49 | return state.ErrEntryExists 50 | } 51 | 52 | cs[c.id] = c 53 | return nil 54 | } 55 | 56 | func (cs ClientSet) Contains(c *client) bool { 57 | _, ok := cs[c.id] 58 | return ok 59 | } 60 | 61 | func (cs ClientSet) Remove(c *client) error { 62 | if !cs.Contains(c) { 63 | return state.ErrEntryMissing 64 | } 65 | 66 | delete(cs, c.id) 67 | return nil 68 | } 69 | 70 | func (cs ClientSet) Find(clientID state.ClientID) (*client, error) { 71 | cli, ok := cs[clientID] 72 | if !ok { 73 | return nil, state.ErrEntryMissing 74 | } 75 | 76 | return cli, nil 77 | } 78 | 79 | func (cs ClientSet) Len() int { 80 | return len(cs) 81 | } 82 | 83 | func (cs ClientSet) Random() (*client, error) { 84 | for _, v := range cs { 85 | return v, nil 86 | } 87 | 88 | return nil, state.ErrContainerEmpty 89 | } 90 | 91 | type tubeSet map[state.TubeName]bool 92 | 93 | func newTubeSet() tubeSet { 94 | return make(tubeSet) 95 | } 96 | 97 | func (t tubeSet) Set(name state.TubeName) error { 98 | _, ok := t[name] 99 | if ok { 100 | return state.ErrEntryExists 101 | } 102 | t[name] = true 103 | return nil 104 | } 105 | 106 | func (t tubeSet) Remove(name state.TubeName) error { 107 | if _, ok := t[name]; !ok { 108 | return state.ErrEntryMissing 109 | } 110 | 111 | delete(t, name) 112 | return nil 113 | } 114 | 115 | func (t tubeSet) Len() int { 116 | return len(t) 117 | } 118 | 119 | func (t tubeSet) String() string { 120 | return fmt.Sprintf("tubeSet len = %v", t.Len()) 121 | } 122 | -------------------------------------------------------------------------------- /beanstalkd/core/client_test.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "github.com/1xyz/coolbeans/state" 5 | "github.com/stretchr/testify/assert" 6 | "testing" 7 | ) 8 | 9 | func TestClientSet(t *testing.T) { 10 | 11 | } 12 | 13 | func TestClientSet_Set(t *testing.T) { 14 | cs := make(ClientSet) 15 | 16 | cli := newTestClient() 17 | err := cs.Set(cli) 18 | assert.Nilf(t, err, "expect err to be nil") 19 | 20 | err = cs.Set(cli) 21 | assert.Equalf(t, state.ErrEntryExists, err, "expect err to be ErrEntryExists") 22 | 23 | err = cs.Set(newTestClient()) 24 | assert.Nilf(t, err, "expect err to be nil") 25 | } 26 | 27 | func TestClientSet_Remove(t *testing.T) { 28 | cs := make(ClientSet) 29 | 30 | cli := newTestClient() 31 | if err := cs.Set(cli); err != nil { 32 | t.Fatalf("test error %v", err) 33 | } 34 | 35 | err := cs.Remove(cli) 36 | assert.Nilf(t, err, "expect err to be nil") 37 | 38 | err = cs.Remove(cli) 39 | assert.Equalf(t, state.ErrEntryMissing, err, "expect err to be ErrEntryMissing") 40 | } 41 | 42 | func TestClientSet_Contains(t *testing.T) { 43 | cs := make(ClientSet) 44 | 45 | cli := newTestClient() 46 | if err := cs.Set(cli); err != nil { 47 | t.Fatalf("test error %v", err) 48 | } 49 | 50 | b := cs.Contains(cli) 51 | assert.Truef(t, b, "expect result to be true") 52 | 53 | if err := cs.Remove(cli); err != nil { 54 | t.Fatalf("test error %v", err) 55 | } 56 | b = cs.Contains(cli) 57 | assert.Falsef(t, b, "expect result to be false") 58 | } 59 | 60 | func TestClientSet_Find(t *testing.T) { 61 | cs := make(ClientSet) 62 | 63 | cli, err := cs.Find("abracadabra") 64 | assert.Equalf(t, state.ErrEntryMissing, err, "expectErrEntryMissing") 65 | 66 | cli = newTestClient() 67 | if err := cs.Set(cli); err != nil { 68 | t.Fatalf("test error %v", err) 69 | } 70 | 71 | resultCli, err := cs.Find(cli.id) 72 | assert.Nilf(t, err, "expect err to be nil") 73 | assert.Equalf(t, cli.id, resultCli.id, "expect client to be found") 74 | } 75 | 76 | func newTestClient() *client { 77 | cli := NewClient(state.TubeName("foo")) 78 | return cli 79 | } 80 | 81 | func TestTubeSet(t *testing.T) { 82 | ts := newTubeSet() 83 | 84 | tubeName := state.TubeName("othello") 85 | err := ts.Set(tubeName) 86 | assert.Nilf(t, err, "expect err to be nil") 87 | 88 | err = ts.Set(tubeName) 89 | assert.Equalf(t, state.ErrEntryExists, err, "expect ErrEntryExists") 90 | 91 | err = ts.Remove(tubeName) 92 | assert.Nilf(t, err, "expect err to be nil") 93 | 94 | err = ts.Remove(tubeName) 95 | assert.Equalf(t, state.ErrEntryMissing, err, "expect ErrEntryExists") 96 | } 97 | -------------------------------------------------------------------------------- /beanstalkd/core/cmd_data.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "fmt" 5 | "github.com/1xyz/coolbeans/state" 6 | log "github.com/sirupsen/logrus" 7 | "regexp" 8 | "strconv" 9 | ) 10 | 11 | // CmdData encapsulates a parsed command. 12 | type CmdData struct { 13 | CmdType CmdType 14 | Args string 15 | Data []byte 16 | NeedData bool 17 | } 18 | 19 | func (c CmdData) String() string { 20 | return fmt.Sprintf("CmdType: %v Args:[%v] NeedData:[%v]", 21 | c.CmdType, c.Args, c.NeedData) 22 | } 23 | 24 | var ( 25 | spaceRe = regexp.MustCompile(`(\s{2,}|\s+(^|$))`) 26 | splitRe = regexp.MustCompile(`\s`) 27 | ) 28 | 29 | // ParseCommandLine parses the command line string provided a connected client 30 | // into a valid CmdData struct 31 | func ParseCommandLine(cmdLine string, maxJobDataSizeBytes int) (*CmdData, error) { 32 | s := spaceRe.ReplaceAllLiteralString(cmdLine, "") 33 | tokens := splitRe.Split(s, 2) 34 | if len(tokens) == 0 || (len(tokens) == 1 && tokens[0] == "") { 35 | return nil, ErrCmdTokensMissing 36 | } 37 | 38 | if c, ok := commandTypeStrings[tokens[0]]; !ok { 39 | return nil, ErrCmdNotFound 40 | } else { 41 | var args string 42 | if len(tokens) == 2 { 43 | args = tokens[1] 44 | } 45 | 46 | var data []byte = nil 47 | cd := &CmdData{ 48 | CmdType: c, 49 | Args: args, 50 | Data: data, 51 | NeedData: false, 52 | } 53 | 54 | if c == Put { 55 | cd.Data = make([]byte, 0) 56 | cd.NeedData = true 57 | arg, err := NewPutArg(cd) 58 | if err != nil { 59 | log.Errorf("ParseCommandLine: NewPutArg err = %v", err) 60 | return nil, err 61 | } 62 | if arg.size > maxJobDataSizeBytes { 63 | return nil, ErrJobSizeTooBig 64 | } 65 | } 66 | 67 | return cd, nil 68 | } 69 | } 70 | 71 | type tokenMap map[string]string 72 | 73 | func matchNamedGroups(args string, re *regexp.Regexp) (tokenMap, bool) { 74 | if !re.MatchString(args) { 75 | return nil, false 76 | } 77 | 78 | names := re.SubexpNames() 79 | matches := re.FindAllStringSubmatch(args, -1) 80 | res := make(map[string]string) 81 | for _, e := range matches { 82 | for i, f := range e { 83 | if names[i] == "" { 84 | continue 85 | } 86 | 87 | res[names[i]] = f 88 | } 89 | } 90 | 91 | return res, true 92 | } 93 | 94 | var ( 95 | // put command regex -- put 96 | putRe = regexp.MustCompile(`^(?P\d+) (?P\d+) (?P\d+) (?P\d+)$`) 97 | 98 | // tube arg regex -- watch | ignore | use 99 | tubeArgRe = regexp.MustCompile(`(?P^[a-zA-Z0-9+\/;.$_()][a-zA-Z0-9\-+\/;.$_()]{0,199}$)`) 100 | 101 | // id arg regex -- 102 | // delete 103 | // kick-job 104 | idArgRe = regexp.MustCompile(`(?P^\d+$)`) 105 | 106 | // reserve-with-timeo 107 | // ut regex -- reserve-with-timeout 108 | reserveWithTimeoutRe = regexp.MustCompile(`(?P^\d+$)`) 109 | 110 | // bury 111 | buryArgRe = regexp.MustCompile(`^(?P^\d+) (?P\d+)$`) 112 | 113 | // kick 114 | kickNArgRe = regexp.MustCompile(`(?P^\d+$)`) 115 | 116 | // release 117 | releaseArgRe = regexp.MustCompile(`^(?P^\d+) (?P\d+) (?P\d+)$`) 118 | ) 119 | 120 | type putArg struct { 121 | pri uint32 122 | delay int64 123 | ttr int 124 | size int 125 | data []byte 126 | } 127 | 128 | // NewPutArg constructs a pointer to a new PutArg struct. 129 | func NewPutArg(data *CmdData) (*putArg, error) { 130 | ctxLog := log.WithFields(log.Fields{"method": "NewPutArg"}) 131 | tm, ok := matchNamedGroups(data.Args, putRe) 132 | if !ok { 133 | ctxLog.Errorf("matchNamedGroups ok=false") 134 | return nil, ErrBadFormat 135 | } 136 | 137 | ctxLog.Debugf("matchResponse %v", tm) 138 | pri, err := strconv.ParseUint(tm["pri"], 10, 32) 139 | if err != nil { 140 | ctxLog.Errorf("ParseUint(pri) err=%v", err) 141 | return nil, ErrBadFormat 142 | } 143 | 144 | delay, err := strconv.ParseInt(tm["delay"], 10, 64) 145 | if err != nil { 146 | ctxLog.Errorf("strconv.ParseInt(delay) err=%v", err) 147 | return nil, ErrBadFormat 148 | } 149 | 150 | ttr, err := strconv.Atoi(tm["ttr"]) 151 | if err != nil { 152 | ctxLog.Errorf("atoi(ttr) %v", err) 153 | return nil, ErrBadFormat 154 | } 155 | 156 | bytes, err := strconv.Atoi(tm["bytes"]) 157 | if err != nil { 158 | ctxLog.Errorf("atoi(bytes) %v", err) 159 | return nil, ErrBadFormat 160 | } 161 | 162 | return &putArg{ 163 | pri: uint32(pri), 164 | delay: delay, 165 | ttr: ttr, 166 | size: bytes, 167 | data: data.Data, 168 | }, nil 169 | } 170 | 171 | type tubeArg struct { 172 | tubeName state.TubeName 173 | } 174 | 175 | // NewTubeArg constructs a pointer to a new tubeArg struct. 176 | func NewTubeArg(data *CmdData) (*tubeArg, error) { 177 | ctxLog := log.WithFields(log.Fields{"method": "NewTubeArg"}) 178 | tm, ok := matchNamedGroups(data.Args, tubeArgRe) 179 | if !ok { 180 | ctxLog.Errorf("matchNamedGroups ok=false") 181 | return nil, ErrBadFormat 182 | } 183 | 184 | ctxLog.Debugf("matchResponse %v", tm) 185 | return &tubeArg{ 186 | tubeName: state.TubeName(tm["tube"]), 187 | }, nil 188 | } 189 | 190 | type idArg struct { 191 | id state.JobID 192 | } 193 | 194 | // NewIDArg constructs a pointer to a new idArg struct. 195 | func NewIDArg(data *CmdData) (*idArg, error) { 196 | ctxLog := log.WithFields(log.Fields{"method": "NewIDArg"}) 197 | tm, ok := matchNamedGroups(data.Args, idArgRe) 198 | if !ok { 199 | ctxLog.Errorf("matchNamedGroups ok=false") 200 | return nil, ErrBadFormat 201 | } 202 | 203 | ctxLog.Debugf("matchResponse %v", tm) 204 | id, err := strconv.ParseUint(tm["id"], 10, 64) 205 | if err != nil { 206 | ctxLog.Errorf("ParseUint(id) err=%v", err) 207 | return nil, ErrBadFormat 208 | } 209 | return &idArg{ 210 | id: state.JobID(id), 211 | }, nil 212 | } 213 | 214 | type reserveWithTimeoutArg struct { 215 | timeoutSeconds int 216 | } 217 | 218 | // NewReserveWithTimeoutArg constructs a pointer to a new reserveWithTimeoutArg struct. 219 | func NewReserveWithTimeoutArg(data *CmdData) (*reserveWithTimeoutArg, error) { 220 | ctxLog := log.WithFields(log.Fields{"method": "NewReserveWithTimeoutArg"}) 221 | tm, ok := matchNamedGroups(data.Args, reserveWithTimeoutRe) 222 | if !ok { 223 | ctxLog.Errorf("matchNamedGroups ok=false") 224 | return nil, ErrBadFormat 225 | } 226 | 227 | ctxLog.Debugf("matchResponse %v", tm) 228 | timeoutSeconds, err := strconv.Atoi(tm["seconds"]) 229 | if err != nil { 230 | ctxLog.Errorf("atoi(seconds) %v", err) 231 | return nil, ErrBadFormat 232 | } 233 | 234 | return &reserveWithTimeoutArg{ 235 | timeoutSeconds: timeoutSeconds, 236 | }, nil 237 | } 238 | 239 | type buryArg struct { 240 | id state.JobID 241 | pri uint32 242 | } 243 | 244 | // NewBuryArg constructs a pointer to a new buryArg struct. 245 | func NewBuryArg(data *CmdData) (*buryArg, error) { 246 | tm, ok := matchNamedGroups(data.Args, buryArgRe) 247 | if !ok { 248 | log.Errorf("NewBuryArg: matchNamedGroups ok=false") 249 | return nil, ErrBadFormat 250 | } 251 | 252 | id, err := strconv.ParseUint(tm["id"], 10, 64) 253 | if err != nil { 254 | log.Errorf("NewBuryArg: ParseUint(id) err=%v", err) 255 | return nil, ErrBadFormat 256 | } 257 | 258 | pri, err := strconv.ParseUint(tm["pri"], 10, 32) 259 | if err != nil { 260 | log.Errorf("NewBuryArg: ParseUint(pri) err=%v", err) 261 | return nil, ErrBadFormat 262 | } 263 | 264 | return &buryArg{ 265 | id: state.JobID(id), 266 | pri: uint32(pri), 267 | }, nil 268 | } 269 | 270 | type kickNArg struct { 271 | bound int 272 | } 273 | 274 | // NewKickNArg constructs a pointer to a new kickNArg struct. 275 | func NewKickNArg(data *CmdData) (*kickNArg, error) { 276 | tm, ok := matchNamedGroups(data.Args, kickNArgRe) 277 | if !ok { 278 | log.Errorf("NewKickNArg: matchNamedGroups ok=false") 279 | return nil, ErrBadFormat 280 | } 281 | 282 | bound, err := strconv.Atoi(tm["bound"]) 283 | if err != nil { 284 | log.Errorf("NewKickNArg: atoi(bound) %v", err) 285 | return nil, ErrBadFormat 286 | } 287 | return &kickNArg{ 288 | bound: bound, 289 | }, nil 290 | } 291 | 292 | type releaseArg struct { 293 | id state.JobID 294 | pri uint32 295 | delay int64 296 | } 297 | 298 | // NewReleaseArg constructs a pointer to a new releaseArg struct. 299 | func NewReleaseArg(data *CmdData) (*releaseArg, error) { 300 | tm, ok := matchNamedGroups(data.Args, releaseArgRe) 301 | if !ok { 302 | log.Errorf("NewReleaseArg: matchNamedGroups ok=false") 303 | return nil, ErrBadFormat 304 | } 305 | 306 | id, err := strconv.ParseUint(tm["id"], 10, 64) 307 | if err != nil { 308 | log.Errorf("NewReleaseArg: ParseUint(id) err=%v", err) 309 | return nil, ErrBadFormat 310 | } 311 | 312 | pri, err := strconv.ParseUint(tm["pri"], 10, 32) 313 | if err != nil { 314 | log.Errorf("NewReleaseArg: ParseUint(pri) err=%v", err) 315 | return nil, ErrBadFormat 316 | } 317 | 318 | delay, err := strconv.ParseInt(tm["delay"], 10, 64) 319 | if err != nil { 320 | log.Errorf("NewReleaseArg: ParseInt(delay) err=%v", err) 321 | return nil, ErrBadFormat 322 | } 323 | 324 | return &releaseArg{ 325 | id: state.JobID(id), 326 | pri: uint32(pri), 327 | delay: delay, 328 | }, nil 329 | } 330 | -------------------------------------------------------------------------------- /beanstalkd/core/cmd_data_test.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "fmt" 5 | "github.com/stretchr/testify/assert" 6 | "testing" 7 | ) 8 | 9 | const maxTestJobDataSizeBytes = 2 << 15 10 | 11 | func TestParseCommandLine(t *testing.T) { 12 | // put 13 | var entries = []struct { 14 | inCmdLine string 15 | outCmData *CmdData 16 | err error 17 | msg string 18 | }{ 19 | {"put 0 100 30 32898", 20 | &CmdData{Put, "0 100 30 32898", []byte{}, true}, 21 | nil, 22 | "expect valid put parsing"}, 23 | {"split 0 100 30 32898", 24 | nil, 25 | ErrCmdNotFound, 26 | "expect ErrCmdNotFound to be returned"}, 27 | {"put", 28 | nil, 29 | ErrBadFormat, 30 | "expect cmdline to return ErrBadFormat for Put if there are no args"}, 31 | {"stats-tube", 32 | &CmdData{StatsTube, "", nil, false}, 33 | nil, 34 | "expect cmdline to return nil for other commands if there are no args"}, 35 | {" ", 36 | nil, 37 | ErrCmdTokensMissing, 38 | "expect ErrCmdTokensMissing to be returned"}, 39 | {fmt.Sprintf("put 0 100 30 %d", maxTestJobDataSizeBytes+1), 40 | nil, 41 | ErrJobSizeTooBig, 42 | "expect valid put parsing"}, 43 | } 44 | 45 | for _, e := range entries { 46 | cmdData, err := ParseCommandLine(e.inCmdLine, maxTestJobDataSizeBytes) 47 | assert.Equalf(t, e.err, err, e.msg) 48 | assert.Equalf(t, e.outCmData, cmdData, e.msg) 49 | } 50 | } 51 | 52 | func TestPutArg(t *testing.T) { 53 | // put 54 | var entries = []struct { 55 | inArg string 56 | outArg *putArg 57 | err error 58 | msg string 59 | }{ 60 | {"0 100 30 32898", &putArg{pri: 0, delay: 100, ttr: 30, size: 32898}, nil, 61 | "expect valid put arg"}, 62 | {"0 100 30 32898 892", nil, ErrBadFormat, 63 | "put args must have exact arg count"}, 64 | {"0 100 30 ", nil, ErrBadFormat, 65 | "put args must have exact arg count"}, 66 | {"0 100 30 32898abc", nil, ErrBadFormat, 67 | "put args must have numeric args"}, 68 | } 69 | 70 | for _, e := range entries { 71 | d := &CmdData{ 72 | CmdType: Unknown, 73 | Args: e.inArg, 74 | Data: nil, 75 | NeedData: false, 76 | } 77 | pa, err := NewPutArg(d) 78 | assert.Equalf(t, e.err, err, e.msg) 79 | assert.Equalf(t, e.outArg, pa, e.msg) 80 | } 81 | } 82 | 83 | func TestTubeArg(t *testing.T) { 84 | var entries = []struct { 85 | inArg string 86 | tubeArg *tubeArg 87 | err error 88 | msg string 89 | }{ 90 | {"foo", &tubeArg{tubeName: "foo"}, nil, 91 | "expect valid tubename"}, 92 | {"pizza day", nil, ErrBadFormat, 93 | "tube cannot have spaces"}, 94 | {strN('a', 201), nil, ErrBadFormat, 95 | "tube name cannot exceed 200 bytes"}, 96 | {"-abc", nil, ErrBadFormat, 97 | "tube cannot start with '-'"}, 98 | {"asd-AAZ213;$_/.(+)", &tubeArg{tubeName: "asd-AAZ213;$_/.(+)"}, nil, 99 | "expect valid tubename"}, 100 | {"x", &tubeArg{tubeName: "x"}, nil, 101 | "expect valid tubename"}, 102 | } 103 | 104 | for _, e := range entries { 105 | d := &CmdData{ 106 | CmdType: Unknown, 107 | Args: e.inArg, 108 | Data: nil, 109 | NeedData: false, 110 | } 111 | tc, err := NewTubeArg(d) 112 | assert.Equalf(t, e.err, err, e.msg) 113 | assert.Equalf(t, e.tubeArg, tc, e.msg) 114 | } 115 | } 116 | 117 | func TestIDArg(t *testing.T) { 118 | var entries = []struct { 119 | inArg string 120 | idArg *idArg 121 | err error 122 | msg string 123 | }{ 124 | {"123456", &idArg{id: 123456}, nil, 125 | "expect valid job id"}, 126 | {"pizza", nil, ErrBadFormat, 127 | "id has to be numeric"}, 128 | {"12345 ", nil, ErrBadFormat, 129 | "id can't have spaces"}, 130 | {"12345 678", nil, ErrBadFormat, 131 | "id field has only one word"}, 132 | {strN('1', 200), nil, ErrBadFormat, 133 | "id field has only one word"}, 134 | {"-12234", nil, ErrBadFormat, 135 | "id has to be unsigned"}, 136 | } 137 | 138 | for _, e := range entries { 139 | d := &CmdData{ 140 | CmdType: Unknown, 141 | Args: e.inArg, 142 | Data: nil, 143 | NeedData: false, 144 | } 145 | tc, err := NewIDArg(d) 146 | assert.Equalf(t, e.err, err, e.msg) 147 | assert.Equalf(t, e.idArg, tc, e.msg) 148 | } 149 | } 150 | 151 | func strN(c byte, n int) string { 152 | b := make([]byte, n) 153 | for i := range b { 154 | b[i] = c 155 | } 156 | return string(b) 157 | } 158 | 159 | func TestReserveWithTimeoutArg(t *testing.T) { 160 | var entries = []struct { 161 | inArg string 162 | outArg *reserveWithTimeoutArg 163 | err error 164 | msg string 165 | }{ 166 | {"123456", &reserveWithTimeoutArg{timeoutSeconds: 123456}, nil, 167 | "expect valid argument"}, 168 | {"0", &reserveWithTimeoutArg{timeoutSeconds: 0}, nil, 169 | "expect valid argument"}, 170 | {"123456 3287", nil, ErrBadFormat, 171 | "timeoutSeconds has to be a single numeric value"}, 172 | {"-123456", nil, ErrBadFormat, 173 | "timeoutSeconds cannot be negative"}, 174 | {strN('2', 200), nil, ErrBadFormat, 175 | "timeoutSeconds cannot exceed max int"}, 176 | {"pizza", nil, ErrBadFormat, 177 | "timeoutSeconds has to be numeric"}, 178 | } 179 | 180 | for _, e := range entries { 181 | d := &CmdData{ 182 | CmdType: Unknown, 183 | Args: e.inArg, 184 | Data: nil, 185 | NeedData: false, 186 | } 187 | tc, err := NewReserveWithTimeoutArg(d) 188 | assert.Equalf(t, e.err, err, e.msg) 189 | assert.Equalf(t, e.outArg, tc, e.msg) 190 | } 191 | } 192 | 193 | func TestBuryArg(t *testing.T) { 194 | // bury 195 | var entries = []struct { 196 | inArg string 197 | outArg *buryArg 198 | err error 199 | msg string 200 | }{ 201 | {"101 255", &buryArg{pri: 255, id: 101}, nil, 202 | "expect valid bury arg"}, 203 | {"100", nil, ErrBadFormat, 204 | "bury args must have exact arg count"}, 205 | {"10 100 30 ", nil, ErrBadFormat, 206 | "bury args must have exact arg count"}, 207 | {"32898abc 11", nil, ErrBadFormat, 208 | "bury args must have numeric args"}, 209 | } 210 | 211 | for _, e := range entries { 212 | d := &CmdData{ 213 | CmdType: Unknown, 214 | Args: e.inArg, 215 | Data: nil, 216 | NeedData: false, 217 | } 218 | pa, err := NewBuryArg(d) 219 | assert.Equalf(t, e.err, err, e.msg) 220 | assert.Equalf(t, e.outArg, pa, e.msg) 221 | } 222 | } 223 | 224 | func TestKickArg(t *testing.T) { 225 | // bury 226 | var entries = []struct { 227 | inArg string 228 | outArg *kickNArg 229 | err error 230 | msg string 231 | }{ 232 | {"101", &kickNArg{bound: 101}, nil, 233 | "expect valid arg"}, 234 | {"10 100 30 ", nil, ErrBadFormat, 235 | "must have exact arg count"}, 236 | {"32898abc", nil, ErrBadFormat, 237 | "args must have numeric args"}, 238 | } 239 | 240 | for _, e := range entries { 241 | d := &CmdData{ 242 | CmdType: Unknown, 243 | Args: e.inArg, 244 | Data: nil, 245 | NeedData: false, 246 | } 247 | pa, err := NewKickNArg(d) 248 | assert.Equalf(t, e.err, err, e.msg) 249 | assert.Equalf(t, e.outArg, pa, e.msg) 250 | } 251 | } 252 | 253 | func TestReleaseArg(t *testing.T) { 254 | // bury 255 | var entries = []struct { 256 | inArg string 257 | outArg *releaseArg 258 | err error 259 | msg string 260 | }{ 261 | {"12328 1 10", &releaseArg{id: 12328, pri: 1, delay: 10}, nil, 262 | "expect valid arg"}, 263 | {"10 100 ", nil, ErrBadFormat, 264 | "must have exact arg count"}, 265 | {"32898abc", nil, ErrBadFormat, 266 | "args must have numeric args"}, 267 | } 268 | 269 | for _, e := range entries { 270 | d := &CmdData{ 271 | CmdType: Unknown, 272 | Args: e.inArg, 273 | Data: nil, 274 | NeedData: false, 275 | } 276 | pa, err := NewReleaseArg(d) 277 | assert.Equalf(t, e.err, err, e.msg) 278 | assert.Equalf(t, e.outArg, pa, e.msg) 279 | } 280 | } 281 | -------------------------------------------------------------------------------- /beanstalkd/core/cmd_type.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "unicode" 5 | "unicode/utf8" 6 | ) 7 | 8 | // CmdType refers to the type of command in beanstalkd context 9 | 10 | //go:generate stringer -type=CmdType --output cmd_type_string.go 11 | type CmdType int 12 | 13 | const ( 14 | Unknown CmdType = iota 15 | Bury 16 | Delete 17 | Ignore 18 | Kick 19 | KickJob 20 | ListTubes 21 | ListTubeUser 22 | PauseTube 23 | Peek 24 | PeekBuried 25 | PeekDelayed 26 | PeekReady 27 | Put 28 | Quit 29 | Release 30 | Reserve 31 | ReserveJob 32 | ReserveWithTimeout 33 | Stats 34 | StatsJob 35 | StatsTube 36 | Touch 37 | Use 38 | Watch 39 | Max 40 | ) 41 | 42 | var commandTypeStrings map[string]CmdType 43 | 44 | func init() { 45 | commandTypeStrings = make(map[string]CmdType) 46 | for c := Unknown + 1; c < Max; c++ { 47 | commandTypeStrings[kebabCase(c.String())] = c 48 | } 49 | } 50 | 51 | func kebabCase(s string) string { 52 | result := make([]byte, 0, len(s)) 53 | for i, ch := range s { 54 | if unicode.IsUpper(ch) && i > 0 { 55 | result = append(result, '-') 56 | } 57 | 58 | ch = unicode.ToLower(ch) 59 | eLen := utf8.RuneLen(ch) 60 | b := make([]byte, eLen, eLen) 61 | n := utf8.EncodeRune(b, ch) 62 | for j := 0; j < n; j++ { 63 | result = append(result, b[j]) 64 | } 65 | } 66 | 67 | return string(result) 68 | } 69 | -------------------------------------------------------------------------------- /beanstalkd/core/cmd_type_string.go: -------------------------------------------------------------------------------- 1 | // Code generated by "stringer -type=CmdType --output cmd_type_string.go"; DO NOT EDIT. 2 | 3 | package core 4 | 5 | import "strconv" 6 | 7 | func _() { 8 | // An "invalid array index" compiler error signifies that the constant values have changed. 9 | // Re-run the stringer command to generate them again. 10 | var x [1]struct{} 11 | _ = x[Unknown-0] 12 | _ = x[Bury-1] 13 | _ = x[Delete-2] 14 | _ = x[Ignore-3] 15 | _ = x[Kick-4] 16 | _ = x[KickJob-5] 17 | _ = x[ListTubes-6] 18 | _ = x[ListTubeUser-7] 19 | _ = x[PauseTube-8] 20 | _ = x[Peek-9] 21 | _ = x[PeekBuried-10] 22 | _ = x[PeekDelayed-11] 23 | _ = x[PeekReady-12] 24 | _ = x[Put-13] 25 | _ = x[Quit-14] 26 | _ = x[Release-15] 27 | _ = x[Reserve-16] 28 | _ = x[ReserveJob-17] 29 | _ = x[ReserveWithTimeout-18] 30 | _ = x[Stats-19] 31 | _ = x[StatsJob-20] 32 | _ = x[StatsTube-21] 33 | _ = x[Touch-22] 34 | _ = x[Use-23] 35 | _ = x[Watch-24] 36 | _ = x[Max-25] 37 | } 38 | 39 | const _CmdType_name = "UnknownBuryDeleteIgnoreKickKickJobListTubesListTubeUserPauseTubePeekPeekBuriedPeekDelayedPeekReadyPutQuitReleaseReserveReserveJobReserveWithTimeoutStatsStatsJobStatsTubeTouchUseWatchMax" 40 | 41 | var _CmdType_index = [...]uint8{0, 7, 11, 17, 23, 27, 34, 43, 55, 64, 68, 78, 89, 98, 101, 105, 112, 119, 129, 147, 152, 160, 169, 174, 177, 182, 185} 42 | 43 | func (i CmdType) String() string { 44 | if i < 0 || i >= CmdType(len(_CmdType_index)-1) { 45 | return "CmdType(" + strconv.FormatInt(int64(i), 10) + ")" 46 | } 47 | return _CmdType_name[_CmdType_index[i]:_CmdType_index[i+1]] 48 | } 49 | -------------------------------------------------------------------------------- /beanstalkd/core/core.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "errors" 5 | "github.com/1xyz/coolbeans/state" 6 | "regexp" 7 | "time" 8 | ) 9 | 10 | var ( 11 | // Delimiter for commands and data 12 | DelimRe = regexp.MustCompile(`\r\n`) 13 | ) 14 | 15 | type Config struct { 16 | ListenAddr string 17 | ListenPort int 18 | UpstreamAddrs string 19 | ConnectTimeout int 20 | MaxJobSize int 21 | MaxReservationTimeout int 22 | } 23 | 24 | const ( 25 | TickDuration = 1000 * time.Millisecond 26 | 27 | // Max. Command size in bytes (inclusive of 2 byte delimiter) 28 | MaxCmdSizeBytes = 226 29 | 30 | // Max. Job Data size in bytes (inclusive of 2 byte delimiter) 31 | MaxJobDataSizeBytes = (128 * 1024) + 2 32 | 33 | // The size of a read buffer 34 | readBufferSizeBytes = 4 * 1024 35 | 36 | // Default tube name 37 | defaultTubeName = state.TubeName("default") 38 | ) 39 | 40 | const ( 41 | // Error message response indicating an internal server error. Typically, indicative 42 | // of a bug in the implementation. 43 | MsgInternalError = "INTERNAL_ERROR" 44 | 45 | // The client sent a command line that was not well-formed. 46 | // This can happen if the line's length exceeds 224 bytes including \r\n, 47 | // if the name of a tube exceeds 200 bytes, if non-numeric 48 | // characters occur where an integer is expected, if the wrong number of 49 | // arguments are present, or if the command line is mal-formed in any other way. 50 | MsgBadFormat = "BAD_FORMAT" 51 | 52 | // The client sent a command that the server does not know. 53 | MsgUnknownCommand = "UNKNOWN_COMMAND" 54 | 55 | // Error message if the client attempts to ignore the only tube in its watch list. 56 | MsgCannotIgnoreTube = "NOT_IGNORED" 57 | 58 | // Error message to indicate if a reservation request timed out 59 | MsgTimedOut = "TIMED_OUT" 60 | 61 | // Error message to indicate if a reservation request is in DeadlineSoon 62 | MsgDeadlineSoon = "DEADLINE_SOON" 63 | 64 | // Error message to indicate if the entity (job etc) cannot be found 65 | MsgNotFound = "NOT_FOUND" 66 | 67 | // Error message to indicate that the job is too big 68 | MsgJobTooBig = "JOB_TOO_BIG" 69 | 70 | // Error message to indicate that CRLF is needed 71 | MsgExpectCRLF = "EXPECTED_CRLF" 72 | ) 73 | 74 | var ( 75 | // ErrNoData - when the input stream has no data this can be the case 76 | // if the underlying reader return zero bytes and is however not at EOF 77 | ErrNoData = errors.New("no data") 78 | 79 | // ErrDelimiterMissing - when the input stream has no newlines (\r\n) 80 | ErrDelimiterMissing = errors.New("delimiter (\\r\\n) missing in command") 81 | 82 | // ErrBadFormat The client sent a command line that was not well-formed. 83 | // This can happen if the line's length exceeds 224 bytes including \r\n, 84 | // if the name of a tube exceeds 200 bytes, if non-numeric 85 | // characters occur where an integer is expected, if the wrong number of 86 | // arguments are present, or if the command line is mal-formed in any other 87 | ErrBadFormat = errors.New("bad format command") 88 | 89 | // ErrJobSizeTooBig he client has requested to put a job with a body larger that the configured limit 90 | ErrJobSizeTooBig = errors.New("job size too big") 91 | 92 | // ErrCmdTokensMissing - when the provided command has no tokens 93 | ErrCmdTokensMissing = errors.New("bad command, cannot find atleast one token") 94 | 95 | // ErrCmdNotFound - the provided command is not found or supported 96 | ErrCmdNotFound = errors.New("command not found") 97 | ) 98 | 99 | func nowSeconds() int64 { 100 | return time.Now().UTC().Unix() 101 | } 102 | 103 | func addToNow(delaySeconds int) int64 { 104 | return nowSeconds() + int64(delaySeconds) 105 | } 106 | -------------------------------------------------------------------------------- /beanstalkd/core/parse.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "bytes" 5 | "io" 6 | ) 7 | 8 | func Discard(rdr io.Reader, b []byte) ([]byte, error) { 9 | mr := io.MultiReader(bytes.NewReader(b), rdr) 10 | isLastByteCarriageReturn := false 11 | 12 | for { 13 | b := make([]byte, 1024) 14 | n, err := mr.Read(b) 15 | if err != nil { 16 | return nil, err 17 | } 18 | 19 | if n < 0 { 20 | panic("un-expected: negative value for n") 21 | } else if n == 0 { 22 | return nil, ErrNoData 23 | } else { 24 | // check the case when \r\n encounters on a buffer boundary 25 | // check to see if the first byte is a newline 26 | if isLastByteCarriageReturn && b[0] == '\n' { 27 | // discard the first byte (\n) and return the rest 28 | return b[1:n], nil 29 | } 30 | 31 | if _, right, ok := split(b[0:n]); ok { 32 | return right, nil 33 | } 34 | 35 | isLastByteCarriageReturn = b[n-1] == '\r' 36 | } 37 | } 38 | } 39 | 40 | func Scan(rdr io.Reader, b []byte, limitBytes int) ([]byte, []byte, error) { 41 | buf := make([]byte, 0) 42 | if len(b) > 0 { 43 | if left, right, ok := split(b); ok { 44 | return clone(left), clone(right), nil 45 | } else { 46 | buf = append(buf, b...) 47 | } 48 | } 49 | 50 | // set to true if the last byte in last read call scanned is \r 51 | isLastByteCarriageReturn := len(buf) > 0 && buf[len(buf)-1] == '\r' 52 | // read buffer limitBytes in bytes 53 | bufSize := readBufferSizeBytes 54 | if bufSize > limitBytes { 55 | bufSize = limitBytes 56 | } 57 | 58 | for len(buf) < limitBytes { 59 | b := make([]byte, bufSize) 60 | n, err := rdr.Read(b) 61 | if err != nil { 62 | return nil, nil, err 63 | } 64 | 65 | if n < 0 { 66 | panic("un-expected: negative value for n") 67 | } else if n == 0 { 68 | return nil, buf, ErrNoData 69 | } else { 70 | // check the case when \r\n encounters on a buffer boundary 71 | // check to see if the first byte is a newline 72 | if isLastByteCarriageReturn && b[0] == '\n' { 73 | // discard the first byte (\n) and return the rest 74 | return buf[:len(buf)-1], b[1:n], nil 75 | } 76 | 77 | if left, right, ok := split(b[0:n]); ok { 78 | buf = append(buf, left...) 79 | return buf, right, nil 80 | } 81 | 82 | buf = append(buf, b[0:n]...) 83 | isLastByteCarriageReturn = b[n-1] == '\r' 84 | } 85 | } 86 | 87 | return nil, nil, ErrDelimiterMissing 88 | } 89 | 90 | // ScanJobData - Scans the provided "b" byte slice in search of a newline 91 | // (\\r\\n) delimiter. If the provided slice does'nt have a newline, then 92 | // scan the reader upto "maxJobDataSizeBytes-len(b)" bytes in search of a 93 | // delimiter 94 | // Returns a triple of command, extra byte slice and an error 95 | // The extra byte slice are any additional bytes read after encountering the 96 | // delimiter. 97 | func ScanJobData(rdr io.Reader, b []byte, maxJobDataSizeBytes int) ([]byte, []byte, error) { 98 | // Note this temporarily create a byte-array upto 16Kb in size 99 | return Scan(rdr, b, maxJobDataSizeBytes) 100 | } 101 | 102 | // ScanCmdLine - Scans the provided "b" byte slice in search of a newline 103 | // (\\r\\n) delimiter. If the provided slice does not have a newline, 104 | // then scan the reader upto "MaxCmdSizeBytes-len(b)" bytes in search of 105 | // the delimiter. 106 | // Returns a triple of command, extra byte slice and an error 107 | // The extra byte slice are any additional bytes read after encountering the 108 | // delimiter. 109 | func ScanCmdLine(rdr io.Reader, b []byte) ([]byte, []byte, error) { 110 | return Scan(rdr, b, MaxCmdSizeBytes) 111 | } 112 | 113 | // scans the input byte slice in search of a newline (\\r\\n) delimiter 114 | // returns a triple of left and right hand slices and bool indicating if 115 | // a result was found 116 | func split(b []byte) ([]byte, []byte, bool) { 117 | if loc := DelimRe.FindIndex(b); loc != nil { 118 | return b[0:loc[0]], b[loc[1]:], true 119 | } else { 120 | return nil, nil, false 121 | } 122 | } 123 | 124 | // return a clone of the src byte slice 125 | func clone(src []byte) []byte { 126 | // https://github.com/go101/go101/wiki/How-to-efficiently-clone-a-slice%3F 127 | return append(src[:0:0], src...) 128 | } 129 | -------------------------------------------------------------------------------- /beanstalkd/core/parse_test.go: -------------------------------------------------------------------------------- 1 | package core 2 | 3 | import ( 4 | "bytes" 5 | . "github.com/smartystreets/goconvey/convey" 6 | "io" 7 | "testing" 8 | ) 9 | 10 | func TestDiscard(t *testing.T) { 11 | Convey("when Discard is called", t, func() { 12 | Convey("with an empty buffer", func() { 13 | callDiscard := func(s string) ([]byte, error) { 14 | return Discard(bytes.NewReader([]byte(s)), make([]byte, 0)) 15 | } 16 | 17 | Convey("and an input stream with a delimiter", func() { 18 | rt, err := callDiscard("peek example_tube\r\n peek a boo") 19 | 20 | Convey("all bytes before delim are discarded", func() { 21 | So(rt, ShouldResemble, []byte(" peek a boo")) 22 | So(err, ShouldBeNil) 23 | }) 24 | }) 25 | 26 | Convey("and an input stream ending with a delimiter", func() { 27 | rt, err := callDiscard("peek example_tube\r\n") 28 | 29 | Convey("all bytes before delim are discarded", func() { 30 | So(rt, ShouldResemble, []byte("")) 31 | So(err, ShouldBeNil) 32 | }) 33 | }) 34 | 35 | Convey("and an input stream with no delimiter", func() { 36 | rt, err := callDiscard("peek example_tube ....") 37 | 38 | Convey("all bytes are discarded until EOF", func() { 39 | So(rt, ShouldBeNil) 40 | So(err, ShouldEqual, io.EOF) 41 | }) 42 | }) 43 | }) 44 | 45 | Convey("with a non-empty buffer", func() { 46 | callDiscard := func(s string, buf string) ([]byte, error) { 47 | return Discard(bytes.NewReader([]byte(s)), []byte(buf)) 48 | } 49 | 50 | Convey("which has a delimiter", func() { 51 | buf := "peek \r\n hello" 52 | 53 | Convey("and an input stream", func() { 54 | rt, err := callDiscard("peek example_tube\r\n peek a boo", buf) 55 | 56 | Convey("all bytes before delim are discarded", func() { 57 | So(rt, ShouldResemble, []byte(" hello")) 58 | So(err, ShouldBeNil) 59 | }) 60 | }) 61 | }) 62 | 63 | Convey("which has no delimiter", func() { 64 | buf := "peek hello" 65 | 66 | Convey("and an input stream with a delimiter", func() { 67 | rt, err := callDiscard("peek example_tube\r\n peek a boo", buf) 68 | 69 | Convey("all bytes before delim are discarded", func() { 70 | So(string(rt), ShouldResemble, " peek a boo") 71 | So(err, ShouldBeNil) 72 | }) 73 | }) 74 | 75 | Convey("and an input stream with no delimiter", func() { 76 | rt, err := callDiscard("peek example_tube peek a boo", buf) 77 | 78 | Convey("all bytes before delim are discarded", func() { 79 | So(rt, ShouldBeNil) 80 | So(err, ShouldEqual, io.EOF) 81 | }) 82 | }) 83 | }) 84 | 85 | Convey("which ends with carriage return", func() { 86 | buf := "peek \r" 87 | 88 | Convey("and an input stream starting with newline", func() { 89 | rt, err := callDiscard("\npeek example_tube\r\n peek a boo", buf) 90 | 91 | Convey("all bytes before delim are discarded", func() { 92 | So(string(rt), ShouldResemble, "peek example_tube\r\n peek a boo") 93 | So(err, ShouldBeNil) 94 | }) 95 | }) 96 | }) 97 | }) 98 | }) 99 | } 100 | 101 | func TestScan(t *testing.T) { 102 | Convey("when Scan is called", t, func() { 103 | Convey("with an empty buffer and a limit", func() { 104 | callScan := func(s string) ([]byte, []byte, error) { 105 | return Scan(bytes.NewReader([]byte(s)), make([]byte, 0), 30) 106 | } 107 | 108 | Convey("and an input stream ending with the delimiter", func() { 109 | lt, rt, err := callScan("peek example_tube\r\n") 110 | 111 | Convey("scans the stream across the delimiter", func() { 112 | So(string(lt), ShouldResemble, "peek example_tube") 113 | So(rt, ShouldResemble, []byte("")) 114 | So(err, ShouldBeNil) 115 | }) 116 | }) 117 | 118 | Convey("and an input stream containing the delimiter", func() { 119 | lt, rt, err := callScan("peek example_tube\r\n hello ") 120 | 121 | Convey("scans the stream across the delimiter", func() { 122 | So(string(lt), ShouldResemble, "peek example_tube") 123 | So(string(rt), ShouldResemble, " hello ") 124 | So(err, ShouldBeNil) 125 | }) 126 | }) 127 | 128 | Convey("and an input stream containing just the delimiter", func() { 129 | lt, rt, err := callScan("\r\n") 130 | 131 | Convey("scans the stream as expected", func() { 132 | So(lt, ShouldBeEmpty) 133 | So(rt, ShouldBeEmpty) 134 | So(err, ShouldBeNil) 135 | }) 136 | }) 137 | 138 | Convey("and an input stream not containing a delimiter within a limit", func() { 139 | lt, rt, err := callScan("peek example") 140 | 141 | Convey("scans results in a EOF error", func() { 142 | So(lt, ShouldBeNil) 143 | So(rt, ShouldBeNil) 144 | So(err, ShouldEqual, io.EOF) 145 | }) 146 | }) 147 | 148 | Convey("and an input stream with no delimiter and size exceeding the limit", func() { 149 | lt, rt, err := callScan("peek a large line with no end in site hello") 150 | 151 | Convey("scan results in ErrDelimiterMissing error", func() { 152 | So(lt, ShouldBeNil) 153 | So(rt, ShouldBeNil) 154 | So(err, ShouldEqual, ErrDelimiterMissing) 155 | }) 156 | }) 157 | 158 | Convey("and an input stream with a delimiter and size exceeding the limit", func() { 159 | lt, rt, err := callScan("peek a large line\r\n with no end in site hello") 160 | 161 | Convey("scan the stream as expected upto only limit bytes", func() { 162 | So(string(lt), ShouldEqual, "peek a large line") 163 | So(string(rt), ShouldEqual, " with no en") 164 | So(err, ShouldBeNil) 165 | }) 166 | }) 167 | 168 | Convey("and an input stream with a delimiter at the limit boundary", func() { 169 | lt, rt, err := callScan("peek a large line with no en\r\n in site hello") 170 | 171 | Convey("scan the stream as expected upto only limit bytes", func() { 172 | So(string(lt), ShouldEqual, "peek a large line with no en") 173 | So(string(rt), ShouldEqual, "") 174 | So(err, ShouldBeNil) 175 | }) 176 | }) 177 | }) 178 | 179 | Convey("with a fixed limit", func() { 180 | callScan := func(s string, buf string) ([]byte, []byte, error) { 181 | return Scan(bytes.NewReader([]byte(s)), []byte(buf), 30) 182 | } 183 | 184 | Convey("and a non-empty buffer with a part of the delimiter", func() { 185 | buf := "peek \r" 186 | 187 | Convey("and an input stream with the second half of the delimiter", func() { 188 | lt, rt, err := callScan("\nexample_tube\r\nhello", buf) 189 | 190 | Convey("results in a success", func() { 191 | So(string(lt), ShouldResemble, "peek ") 192 | So(string(rt), ShouldResemble, "example_tube\r\nhello") 193 | So(err, ShouldBeNil) 194 | }) 195 | }) 196 | 197 | Convey("and an input stream with only the second half of the delimiter", func() { 198 | lt, rt, err := callScan("\n", buf) 199 | 200 | Convey("results in a success", func() { 201 | So(string(lt), ShouldResemble, "peek ") 202 | So(string(rt), ShouldResemble, "") 203 | So(err, ShouldBeNil) 204 | }) 205 | }) 206 | }) 207 | 208 | Convey("and a non-empty buffer with no delimiter", func() { 209 | buf := "peek " 210 | 211 | Convey("and an input stream with a delimiter", func() { 212 | lt, rt, err := callScan("example_tube\r\nhello", buf) 213 | 214 | Convey("results in a success", func() { 215 | So(string(lt), ShouldResemble, "peek example_tube") 216 | So(rt, ShouldResemble, []byte("hello")) 217 | So(err, ShouldBeNil) 218 | }) 219 | }) 220 | 221 | Convey("and an input stream with no delimiter exceeding the limit", func() { 222 | lt, rt, err := callScan("example_tube hello word this crazy cool", buf) 223 | 224 | Convey("results in a ErrDelimiterMissing error", func() { 225 | So(lt, ShouldBeNil) 226 | So(rt, ShouldBeNil) 227 | So(err, ShouldEqual, ErrDelimiterMissing) 228 | }) 229 | }) 230 | }) 231 | 232 | Convey("and a non-empty buffer with delimiter", func() { 233 | buf := "peek queue \r\n" 234 | 235 | Convey("and an input stream", func() { 236 | lt, rt, err := callScan("example_tube\r\nhello", buf) 237 | 238 | Convey("results in a success", func() { 239 | So(string(lt), ShouldResemble, "peek queue ") 240 | So(string(rt), ShouldResemble, "") 241 | So(err, ShouldBeNil) 242 | }) 243 | }) 244 | 245 | Convey("and an empty input stream", func() { 246 | lt, rt, err := callScan("l", buf) 247 | 248 | Convey("results in a success", func() { 249 | So(string(lt), ShouldResemble, "peek queue ") 250 | So(rt, ShouldBeEmpty) 251 | So(err, ShouldBeNil) 252 | }) 253 | }) 254 | }) 255 | }) 256 | }) 257 | } 258 | -------------------------------------------------------------------------------- /beanstalkd/proto/conn.go: -------------------------------------------------------------------------------- 1 | package proto 2 | 3 | import ( 4 | "fmt" 5 | "github.com/1xyz/coolbeans/beanstalkd/core" 6 | log "github.com/sirupsen/logrus" 7 | "io" 8 | "net" 9 | ) 10 | 11 | // Conn encapsulates the connection (stream) with a client 12 | type Conn struct { 13 | // clientRegistration refers to client's unique registration 14 | clientReg *core.ClientReg 15 | 16 | // Reference to the command processor to execute client commands 17 | cmdProcessor core.CommandProcessor 18 | 19 | // represents the underlying client network stream 20 | conn net.Conn 21 | 22 | // reader for the incoming connection 23 | reader io.Reader 24 | 25 | // Current state of this connection 26 | state ConnState 27 | 28 | // Buffer of un-processed bytes read from the connection/reader 29 | buffer []byte 30 | 31 | // Represents the last parsed commandData. Used primarily 32 | // in the case, when the commandData need additional data 33 | // Currently, only the PUT command works makes use of this. 34 | lastCmd *core.CmdData 35 | } 36 | 37 | // NewConn returns a pointer to a new Conn struct 38 | func NewConn(conn net.Conn, clientReg *core.ClientReg, cmdProcessor core.CommandProcessor) *Conn { 39 | return &Conn{ 40 | clientReg: clientReg, 41 | cmdProcessor: cmdProcessor, 42 | conn: conn, 43 | reader: conn, 44 | state: WantCommand, 45 | buffer: make([]byte, 0, 1024), 46 | lastCmd: nil, 47 | } 48 | } 49 | 50 | func (c *Conn) reply(s string) { 51 | c.replyBytes([]byte(s)) 52 | } 53 | 54 | func (c *Conn) replyBytes(b []byte) { 55 | if n, err := c.conn.Write(b); err != nil { 56 | log.Warnf("conn.replyBytes: c.conn.Write. err = %v", err) 57 | } else if n != len(b) { 58 | log.Warnf("conn.replyBytes: n=%d != len(b) = %d", n, len(b)) 59 | } 60 | if _, err := c.conn.Write([]byte("\r\n")); err != nil { 61 | log.Warnf("conn.replyBytes: c.conn.Write (newline). err = %v", err) 62 | } 63 | } 64 | 65 | func (c *Conn) dispatchCommand() { 66 | ctxLog := log.WithFields(log.Fields{ 67 | "method": "conn.dispatchCommand", 68 | "clientID": c.clientReg.ID}) 69 | 70 | if c.lastCmd == nil { 71 | ctxLog.Panicf("inconsistent state: expected lastCmd to not be nil") 72 | } 73 | 74 | defer func() { c.lastCmd = nil }() 75 | 76 | if c.lastCmd.NeedData { 77 | ctxLog.Debugf("Command Data %v", string(c.lastCmd.Data)) 78 | } 79 | 80 | req, err := core.NewCmdRequest(c.lastCmd, c.clientReg.ID) 81 | log.Infof("conn.dispatchCommand: CmdRequest %v", req) 82 | if err != nil { 83 | ctxLog.Errorf("core.NewCmdRequest err=%v", err) 84 | if err == core.ErrCmdNotFound { 85 | c.reply(core.MsgUnknownCommand) 86 | } else if err == core.ErrBadFormat { 87 | ctxLog.Errorf("Badformat") 88 | c.reply(core.MsgBadFormat) 89 | } else { 90 | c.reply(core.MsgInternalError) 91 | } 92 | return 93 | } 94 | 95 | ctxLog = ctxLog.WithField("requestID", req.ID) 96 | ctxLog.Debugf("Dispatching cmdRequest req %v", req) 97 | c.cmdProcessor.DispatchRequest(req) 98 | for { 99 | reply := <-c.clientReg.ResponseCh 100 | if reply.Response != nil { 101 | c.replyBytes(reply.Response) 102 | } 103 | 104 | if !reply.HasMore { 105 | break 106 | } 107 | } 108 | 109 | // Note we have to drain all messages and stop this SM 110 | if req.CmdType == core.Quit { 111 | for e := range c.clientReg.ResponseCh { 112 | ctxLog.Debugf("Discarding response %v", e) 113 | } 114 | c.state = Stopped 115 | } 116 | } 117 | 118 | func (c *Conn) close() { 119 | if c.state != Close { 120 | return 121 | } 122 | 123 | c.lastCmd = &core.CmdData{ 124 | CmdType: core.Quit, 125 | } 126 | c.dispatchCommand() 127 | } 128 | 129 | func (c *Conn) stop() { 130 | if c.state != Stopped { 131 | return 132 | } 133 | 134 | log.WithField("method", "conn.stop").Debugf("closing connection") 135 | if err := c.conn.Close(); err != nil { 136 | log.Warnf("conn.stop: c.conn.close(): err = %v", err) 137 | } 138 | } 139 | 140 | func (c *Conn) wantEndLine() { 141 | ctxLog := log.WithField("method", "conn.wantEndLine") 142 | if c.state != WantEndLine { 143 | return 144 | } 145 | 146 | extraBytes, err := core.Discard(c.reader, c.buffer) 147 | c.buffer = extraBytes 148 | if err != nil { 149 | if err == io.EOF { 150 | c.state = Close 151 | } else { 152 | ctxLog.Errorf("internal-error %v", err) 153 | c.reply(core.MsgInternalError) 154 | } 155 | 156 | return 157 | } 158 | 159 | c.state = WantCommand 160 | } 161 | 162 | func (c *Conn) wantData() { 163 | if c.state != WantData { 164 | return 165 | } 166 | 167 | if c.lastCmd == nil { 168 | panic("inconsistent state: expected lastCmd to be nil") 169 | } 170 | 171 | ctxLog := log.WithField("method", "conn.wantData") 172 | dataBytes, extraBytes, err := core.ScanJobData(c.reader, c.buffer, c.cmdProcessor.MaxJobDataSize()) 173 | c.buffer = extraBytes 174 | if err != nil { 175 | if err == io.EOF { 176 | ctxLog.Errorf("EOF detected") 177 | c.state = Close 178 | } else if err == core.ErrDelimiterMissing { 179 | ctxLog.Errorf("Delimiter not detected") 180 | c.reply(core.MsgExpectCRLF) 181 | c.state = WantEndLine 182 | } else { 183 | ctxLog.Errorf("internal-error %v", err) 184 | c.reply(core.MsgInternalError) 185 | } 186 | 187 | return 188 | } 189 | 190 | c.lastCmd.Data = dataBytes 191 | c.dispatchCommand() 192 | c.state = WantCommand 193 | } 194 | 195 | func (c *Conn) wantCommand() { 196 | if c.state != WantCommand { 197 | return 198 | } 199 | 200 | ctxLog := log.WithField("method", "conn.wantCommand") 201 | cmdBytes, extraBytes, err := core.ScanCmdLine(c.reader, c.buffer) 202 | c.buffer = extraBytes 203 | if err != nil { 204 | if err == io.EOF { 205 | ctxLog.Infof("EOF detected") 206 | c.state = Close 207 | } else if err == core.ErrDelimiterMissing { 208 | ctxLog.Errorf("Badformat") 209 | c.reply(core.MsgBadFormat) 210 | c.state = WantEndLine 211 | } else { 212 | ctxLog.Errorf("internal-error %v", err) 213 | c.reply(core.MsgInternalError) 214 | } 215 | 216 | return 217 | } 218 | 219 | cmdData, err := core.ParseCommandLine(string(cmdBytes), 220 | c.cmdProcessor.MaxJobDataSize()) 221 | if err != nil { 222 | if err == core.ErrCmdNotFound || err == core.ErrCmdTokensMissing { 223 | ctxLog.Errorf("Badformat") 224 | c.reply(core.MsgBadFormat) 225 | } else if err == core.ErrJobSizeTooBig { 226 | ctxLog.Errorf("ErrJobSizeTooBig") 227 | c.reply(core.MsgJobTooBig) 228 | c.state = WantEndLine 229 | } else { 230 | ctxLog.Errorf("internal-error %v", err) 231 | c.reply(core.MsgInternalError) 232 | } 233 | 234 | return 235 | } 236 | 237 | c.lastCmd = cmdData 238 | if cmdData.NeedData { 239 | c.state = WantData 240 | return 241 | } 242 | 243 | ctxLog.Debugf("Cmddata = %v", cmdData) 244 | c.dispatchCommand() 245 | } 246 | 247 | // Tick run the underlying state machine 248 | func (c *Conn) Tick() { 249 | ctxLog := log.WithField("method", "conn.Tick") 250 | for c.state != Stopped { 251 | ctxLog.Debugf("current state = %v", c.state) 252 | c.wantCommand() 253 | c.wantData() 254 | c.wantEndLine() 255 | c.close() 256 | c.stop() 257 | } 258 | } 259 | 260 | func (c *Conn) String() string { 261 | return fmt.Sprintf("Client: %v State: %v conn.localAddr: %v conn.remoteAddr: %v", 262 | c.clientReg, c.state, c.conn.LocalAddr(), c.conn.RemoteAddr()) 263 | } 264 | -------------------------------------------------------------------------------- /beanstalkd/proto/conn_state.go: -------------------------------------------------------------------------------- 1 | package proto 2 | 3 | type ConnState int 4 | 5 | const ( 6 | // Conn waiting for a command from the client 7 | WantCommand ConnState = iota 8 | 9 | // Conn ready to dispatch 10 | ReadyToProcess 11 | 12 | // Conn waiting for job data from the client 13 | WantData 14 | 15 | // Conn is sending a reservation job data to the client 16 | SendJob 17 | 18 | // Conn is sending a response to the client 19 | SendWord 20 | 21 | // Conn is waiting for an available job reservation 22 | WaitReservation 23 | 24 | // Conn is discarding job data (due to an error) 25 | BitBucket 26 | 27 | // Conn is closing & cleaning up 28 | Close 29 | 30 | // Conn is discarding data until it gets to \r\n 31 | WantEndLine 32 | 33 | // The connection is cleaned up and shutdown 34 | Stopped 35 | ) 36 | -------------------------------------------------------------------------------- /beanstalkd/proto/conn_state_string.go: -------------------------------------------------------------------------------- 1 | // Code generated by "stringer -type=ConnState"; DO NOT EDIT. 2 | 3 | package proto 4 | 5 | import "strconv" 6 | 7 | func _() { 8 | // An "invalid array index" compiler error signifies that the constant values have changed. 9 | // Re-run the stringer command to generate them again. 10 | var x [1]struct{} 11 | _ = x[WantCommand-0] 12 | _ = x[ReadyToProcess-1] 13 | _ = x[WantData-2] 14 | _ = x[SendJob-3] 15 | _ = x[SendWord-4] 16 | _ = x[WaitReservation-5] 17 | _ = x[BitBucket-6] 18 | _ = x[Close-7] 19 | _ = x[WantEndLine-8] 20 | _ = x[Stopped-9] 21 | } 22 | 23 | const _ConnState_name = "WantCommandReadyToProcessWantDataSendJobSendWordWaitReservationBitBucketCloseWantEndLineStopped" 24 | 25 | var _ConnState_index = [...]uint8{0, 11, 25, 33, 40, 48, 63, 72, 77, 88, 95} 26 | 27 | func (i ConnState) String() string { 28 | if i < 0 || i >= ConnState(len(_ConnState_index)-1) { 29 | return "ConnState(" + strconv.FormatInt(int64(i), 10) + ")" 30 | } 31 | return _ConnState_name[_ConnState_index[i]:_ConnState_index[i+1]] 32 | } 33 | -------------------------------------------------------------------------------- /beanstalkd/proto/conn_test.go: -------------------------------------------------------------------------------- 1 | package proto 2 | -------------------------------------------------------------------------------- /beanstalkd/proto/tcp_server.go: -------------------------------------------------------------------------------- 1 | package proto 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "github.com/1xyz/coolbeans/beanstalkd/core" 7 | "github.com/1xyz/coolbeans/beanstalkd/proxy" 8 | "github.com/1xyz/coolbeans/state" 9 | "github.com/google/uuid" 10 | log "github.com/sirupsen/logrus" 11 | "net" 12 | "strings" 13 | "sync" 14 | "time" 15 | ) 16 | 17 | type TcpServer struct { 18 | // Address:Port to open the connection 19 | address string 20 | 21 | // Command processor reference 22 | cmdProc core.CommandProcessor 23 | 24 | // bool to signal stopping the server 25 | doneCh chan bool 26 | 27 | // waitgroup to wait for shutdown 28 | shutdownWG *sync.WaitGroup 29 | } 30 | 31 | func NewJSM(upstreamAddrs string, connTimeout time.Duration) state.JSM { 32 | if len(upstreamAddrs) == 0 { 33 | jsm, err := state.NewJSM() 34 | if err != nil { 35 | log.Panicf("NewJSM: err=%v", err) 36 | } 37 | return jsm 38 | } 39 | 40 | s := strings.Split(upstreamAddrs, ",") 41 | for i, e := range s { 42 | log.Debugf("NewJSM: jsm server addr %d = %v", i, e) 43 | } 44 | 45 | nc := proxy.NewClient(uuid.New().URN(), s, connTimeout) 46 | if err := nc.Open(); err != nil { 47 | log.Panicf("NewJSM: proxyClient.Open(..). err=%v", err) 48 | } 49 | log.Infof("NewJSM: proxy %s connected to %v", nc.ProxyID, nc.ServerAddrs) 50 | return nc 51 | } 52 | 53 | func NewTcpServer(cfg *core.Config) *TcpServer { 54 | addr := fmt.Sprintf("%s:%v", cfg.ListenAddr, cfg.ListenPort) 55 | connectTimeout := time.Duration(cfg.ConnectTimeout) * time.Second 56 | jsm := NewJSM(cfg.UpstreamAddrs, connectTimeout) 57 | return &TcpServer{ 58 | address: addr, 59 | cmdProc: core.NewCommandProcessor(jsm, cfg), 60 | doneCh: make(chan bool), 61 | shutdownWG: &sync.WaitGroup{}, 62 | } 63 | } 64 | 65 | const deadline = time.Second 66 | 67 | func (srv *TcpServer) Listen() error { 68 | srv.shutdownWG.Add(1) 69 | ctxLog := log.WithFields(log.Fields{"method": "TcpServer.Listen", "addr": srv.address}) 70 | tcpAddr, err := net.ResolveTCPAddr("tcp", srv.address) 71 | if err != nil { 72 | return fmt.Errorf("resolveTcpAddr failed %v", err) 73 | } 74 | 75 | listener, err := net.ListenTCP("tcp", tcpAddr) 76 | if err != nil { 77 | return fmt.Errorf("creating listener failed %v", err) 78 | } 79 | ctxLog.Debugf("Listener started") 80 | go srv.cmdProc.Run() 81 | for { 82 | select { 83 | // check to see if the doneCh has been signalled 84 | case <-srv.doneCh: 85 | if err := listener.Close(); err != nil { 86 | log.Errorf("listener.close err=%v", err) 87 | } 88 | srv.cmdProc.Shutdown() 89 | log.Infof("waiting for server shutdown") 90 | srv.shutdownWG.Done() 91 | return nil 92 | default: 93 | // Nothing to do here 94 | } 95 | 96 | // set the deadline for the TCP listener; forces accept to timeout 97 | if err := listener.SetDeadline(time.Now().Add(deadline)); err != nil { 98 | return fmt.Errorf("setDeadline err=%v", err) 99 | } 100 | 101 | conn, err := listener.Accept() 102 | var opErr *net.OpError 103 | if errors.As(err, &opErr) && opErr.Timeout() { 104 | continue 105 | } else if err != nil { 106 | ctxLog.Errorf("listener.Accept err %v", err) 107 | continue 108 | } 109 | 110 | clientReg := srv.cmdProc.RegisterClient() 111 | if clientReg.Error != nil { 112 | ctxLog.Errorf("Unable to register client %v", clientReg.Error) 113 | if err := conn.Close(); err != nil { 114 | ctxLog.Errorf("conn.close err=%v", err) 115 | } 116 | continue 117 | } 118 | 119 | c := NewConn(conn, clientReg, srv.cmdProc) 120 | ctxLog.Debugf("Connected a new client connection %v", c) 121 | go c.Tick() 122 | } 123 | } 124 | 125 | func (srv *TcpServer) Shutdown() { 126 | srv.doneCh <- true 127 | close(srv.doneCh) 128 | srv.shutdownWG.Wait() 129 | } 130 | -------------------------------------------------------------------------------- /beanstalkd/proxy/bool.go: -------------------------------------------------------------------------------- 1 | package proxy 2 | 3 | import "sync/atomic" 4 | 5 | // Encapsulates a boolean value that is safe to perform 6 | // conditional set/resets across go-routines. 7 | type AtomicBool int32 8 | 9 | // Construct a new AtomicBool with a value of false 10 | func NewAtomicBool() *AtomicBool { 11 | a := new(AtomicBool) 12 | atomic.StoreInt32((*int32)(a), 0) 13 | return a 14 | } 15 | 16 | // Value returns the current boolean value 17 | func (a *AtomicBool) Value() bool { 18 | return atomic.LoadInt32((*int32)(a)) == 1 19 | } 20 | 21 | // SetIfFalse updates the boolean value to true from false. 22 | // Returns the boolean result of this transition operation, true if successful, false otherwise 23 | func (a *AtomicBool) SetIfFalse() bool { 24 | return atomic.CompareAndSwapInt32((*int32)(a), 0, 1) 25 | } 26 | 27 | // ResetIfTrue updates the boolean value to false from true. 28 | // Returns the boolean result of this transition operation, true if successful, false otherwise 29 | func (a *AtomicBool) ResetIfTrue() bool { 30 | return atomic.CompareAndSwapInt32((*int32)(a), 1, 0) 31 | } 32 | -------------------------------------------------------------------------------- /beanstalkd/proxy/bool_test.go: -------------------------------------------------------------------------------- 1 | package proxy 2 | 3 | import ( 4 | "github.com/stretchr/testify/assert" 5 | "testing" 6 | ) 7 | 8 | func TestNewAtomicBool(t *testing.T) { 9 | ab := NewAtomicBool() 10 | assert.Falsef(t, ab.Value(), "expect ab.Value to be initialized to false") 11 | 12 | assert.Falsef(t, ab.ResetIfTrue(), "expect ab.ResetIfTrue to return false") 13 | assert.Truef(t, ab.SetIfFalse(), "expect ab.SetIfFalse to return true") 14 | 15 | assert.Truef(t, ab.Value(), "expect ab.Value to be true") 16 | 17 | assert.Falsef(t, ab.SetIfFalse(), "expect ab.SetIfFalse to return false") 18 | assert.Truef(t, ab.ResetIfTrue(), "expect ab.ResetIfTrue to return true") 19 | 20 | assert.Falsef(t, ab.Value(), "expect ab.Value to be false") 21 | } 22 | -------------------------------------------------------------------------------- /cluster/client/cluster_client.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | "fmt" 5 | v1 "github.com/1xyz/coolbeans/api/v1" 6 | log "github.com/sirupsen/logrus" 7 | "golang.org/x/net/context" 8 | "google.golang.org/grpc" 9 | _ "google.golang.org/grpc/health" 10 | "google.golang.org/grpc/resolver" 11 | "google.golang.org/grpc/resolver/manual" 12 | "strings" 13 | "time" 14 | ) 15 | 16 | var ( 17 | serviceConfig = `{ 18 | "loadBalancingPolicy": "round_robin", 19 | "healthCheckConfig": { 20 | "serviceName": "" 21 | } 22 | }` 23 | ) 24 | 25 | type ClusterNodeClient struct { 26 | v1.ClusterClient 27 | timeout time.Duration 28 | conn *grpc.ClientConn 29 | HostAddrs []string 30 | DispHostAddr string 31 | } 32 | 33 | func NewClusterNodeClient(hostAddr string, connTimeout time.Duration) (*ClusterNodeClient, error) { 34 | conn, err := grpc.Dial(hostAddr, grpc.WithInsecure()) 35 | if err != nil { 36 | log.Errorf("NewClusterNodeClient: grpc.Dial err=%v", err) 37 | return nil, err 38 | } 39 | return &ClusterNodeClient{ 40 | HostAddrs: []string{hostAddr}, 41 | DispHostAddr: hostAddr, 42 | conn: conn, 43 | ClusterClient: v1.NewClusterClient(conn), 44 | timeout: connTimeout, 45 | }, nil 46 | } 47 | 48 | func NewClusterNodeClientWithLB(hostAddrs []string, connTimeout time.Duration, opts ...grpc.DialOption) (*ClusterNodeClient, error) { 49 | conn, err := connectWithLB(hostAddrs) 50 | if err != nil { 51 | log.Errorf("NewClusterNodeClientWithLB: connectWithLB: err = %v", err) 52 | return nil, err 53 | } 54 | return &ClusterNodeClient{ 55 | ClusterClient: v1.NewClusterClient(conn), 56 | timeout: connTimeout, 57 | conn: conn, 58 | HostAddrs: hostAddrs, 59 | DispHostAddr: strings.Join(hostAddrs, ","), 60 | }, nil 61 | } 62 | 63 | func connectWithLB(hostAddrs []string) (*grpc.ClientConn, error) { 64 | r, cleanup := manual.GenerateAndRegisterManualResolver() 65 | defer cleanup() 66 | 67 | addresses := make([]resolver.Address, len(hostAddrs)) 68 | for i, s := range hostAddrs { 69 | addresses[i] = resolver.Address{Addr: s} 70 | } 71 | 72 | log.Debugf("ClusterNodeClient: connectWithLB: Addresses: %v", addresses) 73 | r.InitialState(resolver.State{Addresses: addresses}) 74 | address := fmt.Sprintf("%s:///unused", r.Scheme()) 75 | options := []grpc.DialOption{ 76 | grpc.WithInsecure(), 77 | grpc.WithDefaultServiceConfig(serviceConfig), 78 | } 79 | 80 | conn, err := grpc.Dial(address, options...) 81 | return conn, err 82 | } 83 | 84 | func (c *ClusterNodeClient) Close() error { 85 | return c.conn.Close() 86 | } 87 | 88 | func (c *ClusterNodeClient) newCtx() (context.Context, context.CancelFunc) { 89 | ctx, cancel := context.WithTimeout(context.Background(), c.timeout) 90 | return ctx, cancel 91 | } 92 | 93 | func (c *ClusterNodeClient) LeaveCluster(nodeID string) error { 94 | ctx, cancel := c.newCtx() 95 | defer cancel() 96 | if _, err := c.Leave(ctx, &v1.LeaveRequest{NodeId: nodeID}); err != nil { 97 | log.Errorf("LeaveCluster: error %v", err) 98 | return err 99 | } 100 | 101 | log.Debugf("LeaveCluster: nodeId:%v complete", nodeID) 102 | return nil 103 | } 104 | 105 | func (c *ClusterNodeClient) IsNodeLeader() (bool, error) { 106 | ctx, cancel := c.newCtx() 107 | defer cancel() 108 | b, err := c.ClusterClient.IsNodeLeader(ctx, &v1.Empty{}) 109 | if err != nil { 110 | log.Errorf("IsNodeLeader: error %v", err) 111 | return false, err 112 | } 113 | 114 | log.Debugf("IsNodeLeader:%v ", b.IsLeader) 115 | return b.IsLeader, nil 116 | } 117 | 118 | func (c *ClusterNodeClient) JoinCluster(nodeID, raftAddr string) error { 119 | req := &v1.JoinRequest{NodeId: nodeID, Addr: raftAddr} 120 | ctx, cancel := c.newCtx() 121 | defer cancel() 122 | if _, err := c.Join(ctx, req); err != nil { 123 | log.Errorf("JoinCluster: error %v", err) 124 | return err 125 | } 126 | 127 | log.Debugf("JoinCluster: nodeId:%v raftAddr:%v complete", nodeID, raftAddr) 128 | return nil 129 | } 130 | -------------------------------------------------------------------------------- /cluster/cmd/cmd_client.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "fmt" 5 | "github.com/1xyz/coolbeans/cluster/client" 6 | "github.com/1xyz/coolbeans/tools" 7 | "github.com/docopt/docopt-go" 8 | log "github.com/sirupsen/logrus" 9 | "os" 10 | "time" 11 | ) 12 | 13 | func CmdClusterClient(argv []string, version string) { 14 | usage := ` 15 | Usage: 16 | cluster-client is_leader [--node-addr=] [--timeout=] 17 | 18 | options: 19 | -h, --help 20 | --node-addr= Address of a cluster node [default: 127.0.0.1:11000]. 21 | --timeout= Connect timeout in seconds [default: 30] 22 | ` 23 | opts, err := docopt.ParseArgs(usage, argv[1:], version) 24 | if err != nil { 25 | log.Fatalf("CmdClusterClient: error parsing arguments. err=%v", err) 26 | } 27 | 28 | ldr := tools.OptsBool(opts, "is_leader") 29 | if ldr { 30 | cmdLeader( 31 | tools.OptsStr(opts, "--node-addr"), 32 | tools.OptsSeconds(opts, "--timeout")) 33 | return 34 | } 35 | log.Infof("Unknown command") 36 | } 37 | 38 | func cmdLeader(nodeAddr string, timeout time.Duration) { 39 | c, err := client.NewClusterNodeClient(nodeAddr, timeout) 40 | if err != nil { 41 | log.Fatalf("cmdLeader: err = %v", err) 42 | } 43 | defer c.Close() 44 | 45 | b, err := c.IsNodeLeader() 46 | if err != nil { 47 | log.Fatalf("cmdLeader err %v", err) 48 | } 49 | fmt.Printf("isNodeLeader: %v\n", b) 50 | if !b { 51 | os.Exit(1) 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /cluster/server/cluster_server.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "errors" 5 | v1 "github.com/1xyz/coolbeans/api/v1" 6 | "github.com/1xyz/coolbeans/store" 7 | log "github.com/sirupsen/logrus" 8 | "golang.org/x/net/context" 9 | "google.golang.org/grpc/codes" 10 | "google.golang.org/grpc/status" 11 | ) 12 | 13 | type RaftCluster interface { 14 | // Join, joins this node, identified by nodeID and reachable at addr, 15 | // to an existing Raft cluster. 16 | Join(nodeID, addr string) error 17 | 18 | // Leave, leave this specified node, identified by nodeID from 19 | // an existing Raft cluster. 20 | Leave(nodeID string) error 21 | 22 | // Returns true if this specified node is a Leader 23 | IsLeader() bool 24 | 25 | // Ask the node to take a snapshot 26 | Snapshot() error 27 | } 28 | 29 | type ClusterServer struct { 30 | v1.UnimplementedClusterServer 31 | rc RaftCluster 32 | } 33 | 34 | func NewClusterServer(rc RaftCluster) *ClusterServer { 35 | return &ClusterServer{ 36 | rc: rc, 37 | } 38 | } 39 | 40 | // Join joins a node, identified by nodeID and located at addr, to this cluster. 41 | // The node must be ready to respond to Raft communications at that address. 42 | // 43 | // It is required that the node that this is called into is a leader node. 44 | func (c *ClusterServer) Join(ctx context.Context, req *v1.JoinRequest) (*v1.Empty, error) { 45 | logc := log.WithFields(log.Fields{"method": "Join", "nodeID": req.NodeId, "addr": req.Addr}) 46 | if err := c.rc.Join(req.NodeId, req.Addr); err != nil { 47 | logc.Errorf("c.rc.Join. err = %v", err) 48 | if errors.Is(err, store.ErrNotRaftLeader) { 49 | return nil, status.Errorf(codes.FailedPrecondition, "%s", store.ErrNotRaftLeader) 50 | } 51 | 52 | return nil, err 53 | } else { 54 | logc.Infof("join completed success") 55 | return &v1.Empty{}, nil 56 | } 57 | } 58 | 59 | // Leave leaves a node, identified by nodeID and located at addr, to this store. 60 | // 61 | // It is required that the node that this is called into is a leader node. 62 | func (c *ClusterServer) Leave(ctx context.Context, req *v1.LeaveRequest) (*v1.Empty, error) { 63 | logc := log.WithFields(log.Fields{"method": "Leave", "nodeID": req.NodeId}) 64 | if err := c.rc.Leave(req.NodeId); err != nil { 65 | logc.Errorf("c.rc.Leave. err = %v", err) 66 | if errors.Is(err, store.ErrNotRaftLeader) { 67 | return nil, status.Errorf(codes.FailedPrecondition, "%s", store.ErrNotRaftLeader) 68 | } 69 | 70 | return nil, err 71 | } else { 72 | logc.Infof("leaved completed success") 73 | return &v1.Empty{}, nil 74 | } 75 | } 76 | 77 | func (c *ClusterServer) IsNodeLeader(ctx context.Context, req *v1.Empty) (*v1.IsNodeLeaderResponse, error) { 78 | return &v1.IsNodeLeaderResponse{ 79 | IsLeader: c.rc.IsLeader(), 80 | }, nil 81 | } 82 | 83 | func (c *ClusterServer) Snapshot(ctx context.Context, req *v1.Empty) (*v1.Empty, error) { 84 | if err := c.rc.Snapshot(); err != nil { 85 | return nil, err 86 | } 87 | 88 | return &v1.Empty{}, nil 89 | } 90 | -------------------------------------------------------------------------------- /cluster/server/health_server.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | log "github.com/sirupsen/logrus" 5 | "golang.org/x/net/context" 6 | healthV1 "google.golang.org/grpc/health/grpc_health_v1" 7 | "time" 8 | ) 9 | 10 | type ServiceReadiness interface { 11 | // Ready indicates if this service is ready to accept traffic 12 | Ready() bool 13 | } 14 | 15 | type HealthCheckServer struct { 16 | healthV1.UnimplementedHealthServer 17 | tickDuration time.Duration 18 | shutdownCh chan bool 19 | serviceReadiness ServiceReadiness 20 | } 21 | 22 | func (h *HealthCheckServer) Check(ctx context.Context, 23 | req *healthV1.HealthCheckRequest) (*healthV1.HealthCheckResponse, error) { 24 | s := healthV1.HealthCheckResponse_UNKNOWN 25 | if h.serviceReadiness == nil { 26 | s = healthV1.HealthCheckResponse_UNKNOWN 27 | } else if h.serviceReadiness.Ready() { 28 | s = healthV1.HealthCheckResponse_SERVING 29 | } else { 30 | s = healthV1.HealthCheckResponse_NOT_SERVING 31 | } 32 | 33 | log.Debugf("check: req svc: %v status= %v", req.Service, s) 34 | 35 | return &healthV1.HealthCheckResponse{ 36 | Status: s, 37 | }, nil 38 | } 39 | 40 | func (h *HealthCheckServer) Watch(req *healthV1.HealthCheckRequest, stream healthV1.Health_WatchServer) error { 41 | ticker := time.NewTicker(h.tickDuration) 42 | defer ticker.Stop() 43 | 44 | for { 45 | select { 46 | case t := <-ticker.C: 47 | resp, err := h.Check(stream.Context(), req) 48 | if err != nil { 49 | return err 50 | } 51 | 52 | log.Debugf("watch: sending resp at %v resp=%v", t, resp) 53 | stream.Send(resp) 54 | 55 | case <-h.shutdownCh: 56 | log.Debugf("watch: shutdown signaled") 57 | return nil 58 | } 59 | } 60 | } 61 | 62 | func NewHealthCheckServer(s ServiceReadiness) *HealthCheckServer { 63 | return &HealthCheckServer{ 64 | tickDuration: time.Second, 65 | shutdownCh: make(chan bool), 66 | serviceReadiness: s, 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /cluster/server/reservations_controller.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "errors" 5 | v1 "github.com/1xyz/coolbeans/api/v1" 6 | log "github.com/sirupsen/logrus" 7 | "time" 8 | ) 9 | 10 | // reservationsController provides the ability to stream 11 | // reservation updates from the job state machine (jsm) 12 | // back to the connected clients (aka. "proxy" clients) 13 | // 14 | // A high level overview: 15 | // 16 | // ┌----------------┐ ┌----------------┐ 17 | // │ State Proxy │ │ State Proxy │ 18 | // │ Client │ ...... │ Client │ 19 | // └----------------┘ └----------------┘ 20 | // ^ ^ 21 | // | | 22 | // | | (stream Reservations) 23 | // | | 24 | // ┌---------------------------------------------------┐ 25 | // │ reservationsController │ 26 | // └---------------------------------------------------┘ 27 | // | ^ 28 | // | (every 1s) | Reservations 29 | // | | 30 | // V | 31 | // ┌---------------------------------------------------┐ 32 | // │ JSM.Tick() │ 33 | // └---------------------------------------------------┘ 34 | type ReservationsController struct { 35 | // connProxies, represents connected proxy clients which 36 | // can receive reservation updates. 37 | // the map is keyed of the proxyID and the value is 38 | // channel where specific proxies receive their updates 39 | connProxies map[string]chan *ProxyResp 40 | 41 | // proxyJoinCh, all proxy join requests are sent here 42 | proxyJoinCh chan *ProxyJoinReq 43 | 44 | // proxyLeaveCh, all proxy leave requests are sent here 45 | proxyLeaveCh chan *ProxyLeaveReq 46 | 47 | // interface allowing the controller to access periodic 48 | // tick functionality 49 | jsmTick JsmTick 50 | 51 | // Channel to signal a stop 52 | doneCh chan bool 53 | } 54 | 55 | var ( 56 | // Returned if the same proxy client attempts to connect with the controller 57 | ErrProxyExists = errors.New("proxy with id exists") 58 | ErrNotLeader = errors.New("current node is not a leader") 59 | ) 60 | 61 | func NewReservationsController(jsmTick JsmTick) *ReservationsController { 62 | return &ReservationsController{ 63 | connProxies: make(map[string]chan *ProxyResp), 64 | proxyJoinCh: make(chan *ProxyJoinReq), 65 | proxyLeaveCh: make(chan *ProxyLeaveReq), 66 | jsmTick: jsmTick, 67 | doneCh: make(chan bool), 68 | } 69 | } 70 | 71 | // Register makes a request to add this proxy client (identified by proxyID) 72 | // 73 | // Register returns back a read only channel to receive updates 74 | func (rctrl *ReservationsController) Register(proxyID string) (<-chan *ProxyResp, error) { 75 | logc := log.WithField("method", "jsmController.Register") 76 | 77 | req := &ProxyJoinReq{ 78 | proxyID: proxyID, 79 | respCh: make(chan *ProxyResp), 80 | } 81 | 82 | rctrl.proxyJoinCh <- req 83 | resp := <-req.respCh 84 | if resp.Err != nil { 85 | logc.Errorf("resp.Err with proxyID %v. Err %v", proxyID, resp.Err) 86 | return nil, resp.Err 87 | } 88 | 89 | logc.Infof("Register for proxyID=%v done", proxyID) 90 | return req.respCh, nil 91 | } 92 | 93 | // UnRegister makes a request to remove this proxy client (identified by the proxyID) 94 | // 95 | // Additionally, once the unRegister is complete, it drains the response channel 96 | func (rctrl *ReservationsController) UnRegister(proxyID string, respCh <-chan *ProxyResp) { 97 | logc := log.WithField("method", "jsmController.UnRegister") 98 | rctrl.proxyLeaveCh <- &ProxyLeaveReq{ 99 | proxyID: proxyID, 100 | } 101 | 102 | // drain all the responses & log errors 103 | for resp := range respCh { 104 | if resp.Err != nil { 105 | logc.Errorf("resp.Err with proxyID %v. Err %v", proxyID, resp.Err) 106 | } 107 | } 108 | 109 | logc.Infof("UnRegister for proxyID=%v done", proxyID) 110 | } 111 | 112 | // Run, runs this controller. the control loop does not return immediately (unless there is an error) 113 | // 114 | // Run performs the following functions 115 | // 1) Periodically (for every second), queries the underlying JSM (job state machine) for any 116 | // reservation updates. (These could include newly assigned jobs, timeouts or deadline-soon to any 117 | // Reservations request). These updates are dispatched to the appropriate client proxy (if they are 118 | // connected. 119 | // 2) Processes any register (join) or un-register (leave) requests from proxies 120 | func (rctrl *ReservationsController) Run() error { 121 | logc := log.WithField("method", "reservationsController.Run") 122 | 123 | ticker := time.NewTicker(time.Second) 124 | defer ticker.Stop() 125 | 126 | for { 127 | select { 128 | case <-rctrl.doneCh: 129 | logc.Infof("Done signalled") 130 | return nil 131 | 132 | case t := <-ticker.C: 133 | logc.Debugf("tick-req at %v", t) 134 | if tickResp, err := rctrl.jsmTick.Tick(); err != nil { 135 | logc.Debugf("jsmTick.Tick() Err=%v", err) 136 | continue 137 | } else { 138 | // map of proxies responded 139 | responded := make(map[string]bool) 140 | for proxyId, reservation := range tickResp.ProxyReservations { 141 | if respCh, ok := rctrl.connProxies[proxyId]; !ok { 142 | logc.Warnf("no entry with proxyID=%v discarding forwarding", proxyId) 143 | } else { 144 | respCh <- &ProxyResp{ 145 | RespType: Reservation, 146 | Reservations: reservation.Entries, 147 | Err: nil, 148 | } 149 | } 150 | 151 | responded[proxyId] = true 152 | } 153 | 154 | // send a nil response back to proxies which don't have 155 | // any responses to handle. 156 | for proxyId, respCh := range rctrl.connProxies { 157 | if _, ok := responded[proxyId]; !ok { 158 | respCh <- &ProxyResp{ 159 | RespType: Reservation, 160 | Reservations: nil, 161 | Err: nil, 162 | } 163 | } 164 | } 165 | } 166 | 167 | case jreq := <-rctrl.proxyJoinCh: 168 | logc.Debugf("join-req proxyID=%v", jreq.proxyID) 169 | if _, ok := rctrl.connProxies[jreq.proxyID]; ok { 170 | logc.Errorf("join-req proxy with id = %v exists", jreq.proxyID) 171 | jreq.respCh <- &ProxyResp{ 172 | RespType: Join, 173 | Reservations: nil, 174 | Err: ErrProxyExists, 175 | } 176 | close(jreq.respCh) 177 | } else { 178 | rctrl.connProxies[jreq.proxyID] = jreq.respCh 179 | jreq.respCh <- &ProxyResp{ 180 | RespType: Join, 181 | Reservations: nil, 182 | Err: nil, 183 | } 184 | } 185 | 186 | case lreq := <-rctrl.proxyLeaveCh: 187 | logc.Debugf("leave-req proxyID=%v", lreq.proxyID) 188 | if respCh, ok := rctrl.connProxies[lreq.proxyID]; !ok { 189 | logc.Errorf("leave-req proxy with id = %v does not exists", lreq.proxyID) 190 | } else { 191 | respCh <- &ProxyResp{ 192 | RespType: Leave, 193 | Reservations: nil, 194 | Err: nil, 195 | } 196 | close(respCh) 197 | delete(rctrl.connProxies, lreq.proxyID) 198 | logc.Infof("leave-req deleted entry with proxyID=%v", lreq.proxyID) 199 | } 200 | } 201 | 202 | } 203 | } 204 | 205 | func (rctrl *ReservationsController) Stop() { 206 | rctrl.doneCh <- true 207 | close(rctrl.doneCh) 208 | } 209 | 210 | // ////////////////////////////////////////////////////////////////// 211 | 212 | type ProxyRespType int 213 | 214 | const ( 215 | Unknown ProxyRespType = iota 216 | Join 217 | Leave 218 | Reservation 219 | ) 220 | 221 | func (p ProxyRespType) String() string { 222 | return [...]string{"Unknown", "Join", "Leave", "Reservation"}[p] 223 | } 224 | 225 | type ProxyResp struct { 226 | RespType ProxyRespType 227 | Reservations []*v1.Reservation 228 | Err error 229 | } 230 | 231 | type ProxyJoinReq struct { 232 | proxyID string 233 | respCh chan *ProxyResp 234 | } 235 | 236 | type ProxyLeaveReq struct { 237 | proxyID string 238 | } 239 | 240 | //go:generate go run github.com/maxbrunsfeld/counterfeiter/v6 . JsmTick 241 | 242 | type JsmTick interface { 243 | Tick() (*v1.TickResponse, error) 244 | } 245 | -------------------------------------------------------------------------------- /cluster/server/reservations_controller_test.go: -------------------------------------------------------------------------------- 1 | package server_test 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | v1 "github.com/1xyz/coolbeans/api/v1" 7 | "github.com/1xyz/coolbeans/cluster/server" 8 | "github.com/1xyz/coolbeans/cluster/server/serverfakes" 9 | "github.com/stretchr/testify/assert" 10 | "testing" 11 | "time" 12 | ) 13 | 14 | func TestReservationsController_RunStop(t *testing.T) { 15 | tests := []struct { 16 | tr *v1.TickResponse 17 | err error 18 | }{ 19 | {&v1.TickResponse{ 20 | ProxyReservations: map[string]*v1.Reservations{}}, nil}, 21 | {nil, errors.New("hello")}, 22 | } 23 | for _, test := range tests { 24 | jsmTick := &serverfakes.FakeJsmTick{} 25 | jsmTick.TickReturns(test.tr, test.err) 26 | rctrl := server.NewReservationsController(jsmTick) 27 | 28 | doneCh := make(chan bool) 29 | go func() { 30 | err := rctrl.Run() 31 | assert.Nilf(t, err, "expect Err to be nil") 32 | doneCh <- true 33 | }() 34 | 35 | time.Sleep(2 * time.Second) 36 | assert.Truef(t, jsmTick.TickCallCount() > 0, 37 | "expect TickCallCount > 0, actual = %v", jsmTick.TickCallCount()) 38 | rctrl.Stop() 39 | <-doneCh 40 | } 41 | } 42 | 43 | func TestReservationsController_Register(t *testing.T) { 44 | doneCh := make(chan bool) 45 | rctrl := runTestController(t, doneCh) 46 | defer func() { 47 | rctrl.Stop() 48 | <-doneCh 49 | }() 50 | 51 | respCh, err := rctrl.Register("foobar") 52 | assert.Nilf(t, err, "expect Err to be nil") 53 | assert.NotNilf(t, respCh, "expect respCh to not be nil") 54 | 55 | respCh, err = rctrl.Register("foobar") 56 | assert.Equalf(t, server.ErrProxyExists, err, "expect Err to be ErrProxyExists") 57 | assert.Nilf(t, respCh, "expect respCh to be nil") 58 | } 59 | 60 | func TestReservationsController_UnRegister(t *testing.T) { 61 | doneCh := make(chan bool) 62 | rctrl := runTestController(t, doneCh) 63 | defer func() { 64 | rctrl.Stop() 65 | <-doneCh 66 | }() 67 | 68 | respCh, _ := rctrl.Register("foobar") 69 | rctrl.UnRegister("foobar", respCh) 70 | rctrl.UnRegister("foobar", respCh) 71 | 72 | respCh, err := rctrl.Register("foobar") 73 | assert.Nilf(t, err, "expect Err to be nil") 74 | assert.NotNilf(t, respCh, "expect respCh to not be nil") 75 | 76 | } 77 | 78 | func TestReservationsController_Run_With_Assinged_Reservations(t *testing.T) { 79 | doneCh := make(chan bool) 80 | proxyID := "proxy1" 81 | nReservations := 3 82 | rctrl := runtestControllerWithResponses(t, doneCh, proxyID, nReservations) 83 | defer func() { 84 | rctrl.Stop() 85 | <-doneCh 86 | }() 87 | 88 | respCh, _ := rctrl.Register(proxyID) 89 | for r := range respCh { 90 | assert.Equalf(t, server.Reservation, r.RespType, "expect respType to be reservation") 91 | assert.Equalf(t, nReservations, len(r.Reservations), "expect count of reservation to be %v actual=%v", 92 | nReservations, len(r.Reservations)) 93 | break 94 | } 95 | } 96 | 97 | func TestReservationsController_Run_With_NoAssigned_Reservations(t *testing.T) { 98 | doneCh := make(chan bool) 99 | rctrl := runtestControllerWithResponses(t, doneCh, "proxy1", 1) 100 | defer func() { 101 | rctrl.Stop() 102 | <-doneCh 103 | }() 104 | 105 | respCh, _ := rctrl.Register("proxy2") 106 | for r := range respCh { 107 | assert.Equalf(t, server.Reservation, r.RespType, 108 | "expect respType to be of type %v got %v", server.Reservation, r.RespType) 109 | assert.Nilf(t, r.Reservations, "expect reservations to be nil") 110 | break 111 | } 112 | 113 | rctrl.UnRegister("proxy2", respCh) 114 | } 115 | 116 | func runTestController(t *testing.T, doneCh chan<- bool) *server.ReservationsController { 117 | jsmTick := &serverfakes.FakeJsmTick{} 118 | jsmTick.TickReturns(nil, errors.New("foo")) 119 | rctrl := server.NewReservationsController(jsmTick) 120 | 121 | go func() { 122 | if err := rctrl.Run(); err != nil { 123 | t.Fatalf("expect Err=%v to be nil", err) 124 | } 125 | 126 | doneCh <- true 127 | }() 128 | 129 | return rctrl 130 | } 131 | 132 | func runtestControllerWithResponses(t *testing.T, doneCh chan<- bool, proxyID string, nReservations int) *server.ReservationsController { 133 | resvn := make([]*v1.Reservation, nReservations) 134 | for i := 0; i < nReservations; i++ { 135 | resvn[i] = &v1.Reservation{ 136 | RequestId: fmt.Sprintf("test-request-%d", i), 137 | ClientId: fmt.Sprintf("client://%s/test-clientid-%d", proxyID, i), 138 | Status: v1.ReservationStatus_Timeout, 139 | JobId: 0, 140 | BodySize: 0, 141 | Body: nil, 142 | ErrorMsg: "", 143 | ProxyId: proxyID, 144 | } 145 | } 146 | 147 | resp := v1.TickResponse{ 148 | ProxyReservations: map[string]*v1.Reservations{ 149 | proxyID: {Entries: resvn}, 150 | }, 151 | } 152 | 153 | jsmTick := &serverfakes.FakeJsmTick{} 154 | jsmTick.TickReturns(&resp, nil) 155 | rctrl := server.NewReservationsController(jsmTick) 156 | 157 | go func() { 158 | if err := rctrl.Run(); err != nil { 159 | t.Fatalf("expect Err=%v to be nil", err) 160 | } 161 | 162 | doneCh <- true 163 | }() 164 | 165 | return rctrl 166 | } 167 | -------------------------------------------------------------------------------- /cluster/server/serverfakes/fake_jsm_tick.go: -------------------------------------------------------------------------------- 1 | // Code generated by counterfeiter. DO NOT EDIT. 2 | package serverfakes 3 | 4 | import ( 5 | "sync" 6 | 7 | coolbeans_api_v1 "github.com/1xyz/coolbeans/api/v1" 8 | "github.com/1xyz/coolbeans/cluster/server" 9 | ) 10 | 11 | type FakeJsmTick struct { 12 | TickStub func() (*coolbeans_api_v1.TickResponse, error) 13 | tickMutex sync.RWMutex 14 | tickArgsForCall []struct { 15 | } 16 | tickReturns struct { 17 | result1 *coolbeans_api_v1.TickResponse 18 | result2 error 19 | } 20 | tickReturnsOnCall map[int]struct { 21 | result1 *coolbeans_api_v1.TickResponse 22 | result2 error 23 | } 24 | invocations map[string][][]interface{} 25 | invocationsMutex sync.RWMutex 26 | } 27 | 28 | func (fake *FakeJsmTick) Tick() (*coolbeans_api_v1.TickResponse, error) { 29 | fake.tickMutex.Lock() 30 | ret, specificReturn := fake.tickReturnsOnCall[len(fake.tickArgsForCall)] 31 | fake.tickArgsForCall = append(fake.tickArgsForCall, struct { 32 | }{}) 33 | fake.recordInvocation("Tick", []interface{}{}) 34 | fake.tickMutex.Unlock() 35 | if fake.TickStub != nil { 36 | return fake.TickStub() 37 | } 38 | if specificReturn { 39 | return ret.result1, ret.result2 40 | } 41 | fakeReturns := fake.tickReturns 42 | return fakeReturns.result1, fakeReturns.result2 43 | } 44 | 45 | func (fake *FakeJsmTick) TickCallCount() int { 46 | fake.tickMutex.RLock() 47 | defer fake.tickMutex.RUnlock() 48 | return len(fake.tickArgsForCall) 49 | } 50 | 51 | func (fake *FakeJsmTick) TickCalls(stub func() (*coolbeans_api_v1.TickResponse, error)) { 52 | fake.tickMutex.Lock() 53 | defer fake.tickMutex.Unlock() 54 | fake.TickStub = stub 55 | } 56 | 57 | func (fake *FakeJsmTick) TickReturns(result1 *coolbeans_api_v1.TickResponse, result2 error) { 58 | fake.tickMutex.Lock() 59 | defer fake.tickMutex.Unlock() 60 | fake.TickStub = nil 61 | fake.tickReturns = struct { 62 | result1 *coolbeans_api_v1.TickResponse 63 | result2 error 64 | }{result1, result2} 65 | } 66 | 67 | func (fake *FakeJsmTick) TickReturnsOnCall(i int, result1 *coolbeans_api_v1.TickResponse, result2 error) { 68 | fake.tickMutex.Lock() 69 | defer fake.tickMutex.Unlock() 70 | fake.TickStub = nil 71 | if fake.tickReturnsOnCall == nil { 72 | fake.tickReturnsOnCall = make(map[int]struct { 73 | result1 *coolbeans_api_v1.TickResponse 74 | result2 error 75 | }) 76 | } 77 | fake.tickReturnsOnCall[i] = struct { 78 | result1 *coolbeans_api_v1.TickResponse 79 | result2 error 80 | }{result1, result2} 81 | } 82 | 83 | func (fake *FakeJsmTick) Invocations() map[string][][]interface{} { 84 | fake.invocationsMutex.RLock() 85 | defer fake.invocationsMutex.RUnlock() 86 | fake.tickMutex.RLock() 87 | defer fake.tickMutex.RUnlock() 88 | copiedInvocations := map[string][][]interface{}{} 89 | for key, value := range fake.invocations { 90 | copiedInvocations[key] = value 91 | } 92 | return copiedInvocations 93 | } 94 | 95 | func (fake *FakeJsmTick) recordInvocation(key string, args []interface{}) { 96 | fake.invocationsMutex.Lock() 97 | defer fake.invocationsMutex.Unlock() 98 | if fake.invocations == nil { 99 | fake.invocations = map[string][][]interface{}{} 100 | } 101 | if fake.invocations[key] == nil { 102 | fake.invocations[key] = [][]interface{}{} 103 | } 104 | fake.invocations[key] = append(fake.invocations[key], args) 105 | } 106 | 107 | var _ server.JsmTick = new(FakeJsmTick) 108 | -------------------------------------------------------------------------------- /coverage_test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | touch ./coverage.tmp 4 | echo 'mode: atomic' > coverage.txt 5 | go list ./... | grep -v /cmd | grep -v /vendor | xargs -n1 -I{} sh -c 'go test -race -covermode=atomic -coverprofile=coverage.tmp -coverpkg $(go list ./... | grep -v /vendor | tr "\n" ",") {} && tail -n +2 coverage.tmp >> coverage.txt || exit 255' && rm coverage.tmp -------------------------------------------------------------------------------- /deploy/beanstalkd.procfile: -------------------------------------------------------------------------------- 1 | # running beanstalkd as a single server 2 | beanstalkd: ./bin/coolbeans --verbose beanstalkd --listen-addr 127.0.0.1 --listen-port 11300 -------------------------------------------------------------------------------- /deploy/dev-cluster.procfile: -------------------------------------------------------------------------------- 1 | # Setup of a three node cluster with beanstalkd-0 as a proxy connected 2 | cluster-node0: ./bin/coolbeans --quiet cluster-node --node-id bean0 --root-dir /tmp/bean0 --bootstrap-node-id bean0 --node-peer-addrs=:11000,:12000,:13000 --raft-listen-addr=127.0.0.1:21000 --node-listen-addr=127.0.0.1:11000 --raft-advertized-addr=127.0.0.1:21000 --prometheus-addr=127.0.0.1:2020 3 | 4 | cluster-node1: ./bin/coolbeans --quiet cluster-node --node-id bean1 --root-dir /tmp/bean1 --bootstrap-node-id bean0 --node-peer-addrs=:11000,:12000,:13000 --raft-listen-addr=127.0.0.1:22000 --node-listen-addr=127.0.0.1:12000 --raft-advertized-addr=127.0.0.1:22000 --prometheus-addr=127.0.0.1:2021 5 | 6 | cluster-node2: ./bin/coolbeans --quiet cluster-node --node-id bean2 --root-dir /tmp/bean2 --bootstrap-node-id bean0 --node-peer-addrs=:11000,:12000,:13000 --raft-listen-addr=127.0.0.1:23000 --node-listen-addr=127.0.0.1:13000 --raft-advertized-addr=127.0.0.1:23000 --prometheus-addr=127.0.0.1:2022 7 | 8 | beanstalkd-0: ./bin/coolbeans --quiet beanstalkd --listen-addr 127.0.0.1 --upstream-addrs 127.0.0.1:11000,127.0.0.1:12000,127.0.0.1:13000 --listen-port 11300 -------------------------------------------------------------------------------- /deploy/dev.procfile: -------------------------------------------------------------------------------- 1 | # Setup of a single node cluster with beanstalkd-0 as a proxy connected 2 | cluster-node0: ./bin/coolbeans --quiet cluster-node --node-id bean0 --root-dir /tmp/single-bean0 --bootstrap-node-id bean0 --raft-listen-addr=127.0.0.1:21000 --node-listen-addr=127.0.0.1:11000 --prometheus-addr=127.0.0.1:2020 3 | beanstalkd-0: ./bin/coolbeans --quiet beanstalkd --listen-addr 127.0.0.1 --upstream-addrs 127.0.0.1:11000 --listen-port 11300 -------------------------------------------------------------------------------- /deploy/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.7" 2 | 3 | services: 4 | bean1: 5 | build: . 6 | ports: 7 | - "127.0.0.1:12000:12000/tcp" 8 | networks: 9 | - cb_network 10 | command: ["/root/coolbeans", "cluster-node", "--node-id", "bean1", 11 | "--root-dir", "/tmp/bean1", 12 | "--bootstrap-node-id", "bean0", 13 | "--node-peer-addrs", "bean0:11000,bean1:12000,bean2:13000", 14 | "--raft-listen-addr", "bean1:22000", 15 | "--node-listen-addr", "bean1:12000", 16 | "--raft-advertized-addr=bean3:22000"] 17 | 18 | bean0: 19 | build: . 20 | ports: 21 | - "127.0.0.1:11000:11000/tcp" 22 | networks: 23 | - cb_network 24 | command: ["/root/coolbeans", "cluster-node", "--node-id", "bean0", 25 | "--root-dir", "/tmp/bean", 26 | "--bootstrap-node-id", "bean0", 27 | "--node-peer-addrs", "bean0:11000,bean1:12000,bean2:13000", 28 | "--raft-listen-addr", "bean0:21000", 29 | "--node-listen-addr", "bean0:11000", 30 | "--raft-advertized-addr=bean0:21000"] 31 | 32 | bean2: 33 | build: . 34 | ports: 35 | - "127.0.0.1:13000:13000/tcp" 36 | networks: 37 | - cb_network 38 | command: ["/root/coolbeans", "cluster-node", 39 | "--node-id", "bean2", 40 | "--root-dir", "/tmp/bean", 41 | "--bootstrap-node-id", "bean0", 42 | "--node-peer-addrs", "bean0:11000,bean1:12000,bean2:13000", 43 | "--raft-listen-addr=bean2:23000", 44 | "--node-listen-addr=bean2:13000", 45 | "--raft-advertized-addr=bean2:23000"] 46 | 47 | beanstalkd-proxy0: 48 | build: . 49 | ports: 50 | - "127.0.0.1:11300:11300/tcp" 51 | networks: 52 | - cb_network 53 | command: ["/root/coolbeans", 54 | "beanstalkd", 55 | "-p", "11300", 56 | "-j", "bean0:11000,bean1:12000,bean2:13000"] 57 | 58 | networks: 59 | cb_network: -------------------------------------------------------------------------------- /deploy/prometheus.yml: -------------------------------------------------------------------------------- 1 | # sample local Prometheus config 2 | global: 3 | scrape_interval: 30s # Default is every 1 minute. 4 | evaluation_interval: 30s # Default is every 1 minute. 5 | # scrape_timeout is set to the global default (10s). 6 | 7 | alerting: 8 | alertmanagers: 9 | - static_configs: 10 | - targets: 11 | # - alertmanager:9093 12 | 13 | rule_files: 14 | # - "first_rules.yml" 15 | # - "second_rules.yml" 16 | 17 | scrape_configs: 18 | - job_name: cluster-node 19 | scrape_interval: 30s 20 | static_configs: 21 | - targets: 22 | - localhost:2020 23 | - localhost:2021 24 | - localhost:2022 -------------------------------------------------------------------------------- /doc/Beanstalkd-Proxy.md: -------------------------------------------------------------------------------- 1 | Beanstalkd Proxy 2 | ================ 3 | 4 | High level flow 5 | --------------- 6 | 7 | - The cluster-node server exposes the job state machine as a GRPC service, allowing a client to make necessary transitions. 8 | - Beanstalkd-proxy server, which is a client of the cluster-node makes these transitions on behalf of its clients. 9 | - The GRPC requests should be made to the cluster-node which is the Raft leader. 10 | - The Raft leader cluster-node internally runs a ticker (executing every second), that updates reservations, timeouts etc. All updates from these are routed to the appropriate beanstalkd-proxy via a GRPC streaming method from the raft leader to the beanstalkd-proxy. 11 | 12 | Connectivity 13 | ------------ 14 | 15 | ### Problem 16 | 17 | Example: Consider a setup of a three cluster-node setup, and a beanstalkd (proxy) server as a client to this cluster. We have to solve the following problem. 18 | - The beanstalkd-proxy is given addresses to these three cluster-nodes. It must identify the leader, so that all GRPC requests are made to the leader. 19 | - If the leadership is re-assigned, then beanstalkd-proxy must again identify the leader and re-connect to the leader. - 20 | - If there is no leader, beanstalkd-proxy must have a retry policy in place 21 | - During the period of disruption, beanstalkd-proxy's clients see minimal disruption. Especially, with streaming reservations 22 | 23 | ### Approach using GRPC health check 24 | 25 | - The proposed approach is to use [GRPC health check protocol](https://github.com/grpc/grpc/blob/master/doc/health-checking.md) for the cluster-node's job state machine GRPC service. 26 | 27 | - A GRPC service can report one of the health states (UNKNOWN, SERVING, NOT_SERVING) 28 | 29 | ``` 30 | enum ServingStatus { 31 | // Indicates that the service's serving status is unknown 32 | UNKNOWN = 0; 33 | 34 | // Indicates that the service is running and ready to serve requests 35 | SERVING = 1; 36 | 37 | // Indicates that the service is running and not ready to take traffic 38 | NOT_SERVING = 2; 39 | } 40 | ``` 41 | Here, only the leader reports a ServingStatus of SERVING, and the other cluster-nodes report NON_SERVING 42 | 43 | 44 | - A GRPC client, here like the beanstalkd-proxy registers a round-robin loadbalancer with the addresses of all the GRPC cluster-node, with health checking enabled. Here, the health probe should automatically ensure that the request is routed to the leader. 45 | 46 | - Drawback: A leader change can trigger some of the streaming reservations to be missed. One approach is to have the StreamingGRPC setup with all the cluster-node (independent of the health serving status). This minimizes the streaming reservations. -------------------------------------------------------------------------------- /doc/Design.md: -------------------------------------------------------------------------------- 1 | - [Coolbeans High Level Design](#coolbeans-high-level-design) 2 | - [Consensus](#consensus) 3 | - [Beanstalkd Job life-cycle](#beanstalkd-job-lifecycle) 4 | - [Node Design](#node-design) 5 | - [Setup options](#setup-options) 6 | 7 | Coolbeans High Level Design 8 | =========================== 9 | 10 | This document summarizes design notes for coolbeans. 11 | 12 | To summarize, Coolbeans is a distributed work queue that implements the [beanstalkd protocol](https://github.com/beanstalkd/beanstalkd/blob/master/doc/protocol.txt). 13 | 14 | Coolbeans primarily differs from beanstalkd in that it allows the work queue to be replicated across multiple machines. Coolbeans, uses the [Raft consensus algorithm](https://raft.github.io/) to consistently replicate the job state transitions across multiple machines. 15 | 16 | Consensus 17 | --------- 18 | 19 | Concept: A state machine consists of a collection of states, a collection of transitions between states, and a current state. A transition to a new current state happens in response to an issued operation and produces an output. In a deterministic state machine, for any state and operation, the transition enabled by the operation is unique and the output is a function only of the state and the operation. 20 | 21 | In essence, a replicated state machine is realized as follows: A collection of replicas of a deterministic state machine are created. The replicas are then provided with the same sequence of operations, so they go through the same sequence of state transitions and end up in the same state and produce the same sequence of outputs. Consensus is the process of agreeing on one result among a group of participants. Specifically, these participants agree on the transitions to made on the replicated state machine. The paper [Paxos made moderately complex](http://www.cs.cornell.edu/courses/cs7412/2011sp/paxos.pdf) provides an in-depth discussion to the distributed consensus problem. 22 | 23 | Raft provides a consensus protocol for state machine replication in a distributed asynchronous environment. 24 | 25 | Beanstalkd Job lifecycle 26 | ------------------------ 27 | 28 | The diagram shows the life-cycle of a beanstalkd job, the different states & transitions. 29 | 30 | Source: This diagram is copied & modified from the [beanstalkd protocol](https://github.com/beanstalkd/beanstalkd/blob/master/doc/protocol.txt). The document is extensive and explains the life-cycle in detail. 31 | 32 | ![beanstalkd lifecycle](beanstalkd_lifecycle.png) 33 | 34 | In essence, this is the state machine that we are going to replicate across the nodes. Code reference is interface JSM [here](../state/state.go). 35 | 36 | 37 | Node Design 38 | ----------- 39 | 40 | This diagram shows the high level processes, its functionality and interactions 41 | 42 | There are two primary processes managed by coolbeans, the beanstalkd-proxy and the cluster-node server. 43 | 44 | | Process | Description | 45 | |---------------------------|--------------| 46 | | Client | A client of the beanstalkd service, the client interacts with the beanstalkd proxy server over TCP using the beanstalkd protocol. | 47 | | Beanstalkd Proxy Server | Represents the coolbeans beanstalkd proxy server, which interacts with the client via TCP using the beanstalkd protocol. The beanstalkd server is a client to the cluster node server, decoding the beanstalkd requests into GRPC requests to the cluster node server. | 48 | | Cluster Node Server | The cluster node server maintains the job state. The node is essentially a part of a Raft cluster, where job state transitions are replicated consistently using the Raft consensus protocol. | 49 | 50 | 51 | 52 | The rationale for separating the proxy and the cluster-node include: 53 | 54 | - By having two processes, we can natively secure & authenticate the GRPC connection between the proxy-server and the cluster-node server. 55 | - The cluster node server can be scaled independently of the beanstalkd-proxy. 56 | 57 | Notes: 58 | 59 | - [RQlite](https://github.com/rqlite/rqlite/blob/master/DOC/DESIGN.md) is a good reference on how to design a replicated system using Raft. 60 | 61 | Setup options 62 | ------------- 63 | - It it recommended to have a three cluster-nodes in your RAFT cluster to be resilient to single node failures. 64 | 65 | - Options for setting up beanstalkd-proxy: 66 | - A single beanstalkd proxy server as a side-car for every machine, or pod, similar to [this](https://cloud.google.com/sql/docs/mysql/sql-proxy). 67 | - Have a fleet of one ore more beanstalkd-proxy servers with the cluster-nodes configured as upstreams. 68 | 69 | 70 | 71 | 72 | 73 | -------------------------------------------------------------------------------- /doc/GettingStarted.md: -------------------------------------------------------------------------------- 1 | Getting Started 2 | =============== 3 | 4 | A quick way to setup coolbeans on your local machine is to [download the latest binary from the release page](https://github.com/1xyz/coolbeans/releases). 5 | 6 | Example (download and unzip for Darwin (macOS)): 7 | 8 | wget https://github.com/1xyz/coolbeans/releases/download/v0.1.10/coolbeans-darwin-amd64-v0.1.10.zip 9 | 10 | unzip coolbeans-darwin-amd64-v0.1.11.zip 11 | Archive: coolbeans-darwin-amd64-v0.1.11.zip 12 | inflating: README.md 13 | inflating: LICENSE.txt 14 | inflating: coolbeans 15 | 16 | 17 | Setup up a single node cluster 18 | ------------------------------ 19 | 20 | The simplest example is to setup a single node cluster. This setup has two processes: (i) coolbeans cluster-node server and (ii) beanstalkd proxy. 21 | 22 | #### Start a new coolbeans cluster-node. 23 | 24 | ./coolbeans cluster-node --node-id bean0 --root /tmp/single-bean0 --bootstrap-node-id bean0 --raft-listen-addr :21000 --node-listen-addr :11000 25 | 26 | 27 | The above example starts a new cluster node: 28 | 29 | - with a unique node-id bean0. 30 | - the cluster-node starts two listeners, the raft service listens on `:21000` and the GRPC service listens on `:11000`. 31 | - all data logs and snapshots will be persisted under the directory: `/tmp/single-bean0`. 32 | - the bootstrap node, which is the default assigned leader during the first time cluster formation, is `bean0`. 33 | 34 | 35 | #### Start a new beanstalkd proxy-server 36 | 37 | ./coolbeans beanstalkd --upstream-addr 127.0.0.1:11000 --listen-port 11300 38 | 39 | The above example starts a new beanstalkd proxy: 40 | 41 | - with the proxy upstream pointing to the GRPC service: `127.0.0.1:11000`. 42 | - listening for beanstalkd client requests on port `11300`. 43 | 44 | 45 | Run a beanstalkd client 46 | ----------------------- 47 | 48 | Checkout [beanstalkd's community page](https://github.com/beanstalkd/beanstalkd/wiki/Tools) for some tools 49 | 50 | Example: We picked yabean since we are familiar with it. 51 | 52 | ### Download & unzip yabean 53 | 54 | Download, and unzip the yabean CLI for the OS/arch https://github.com/1xyz/yabean/releases 55 | 56 | wget https://github.com/1xyz/yabean/releases/download/v0.1.4/yabean_0.1.4_darwin_amd64.tar.gz 57 | Saving to: ‘yabean_0.1.4_darwin_amd64.tar.gz’ 58 | 59 | tar xvf yabean_0.1.4_darwin_amd64.tar.gz 60 | x LICENSE 61 | x README.md 62 | x yabean 63 | 64 | ### Run a few sample commands 65 | 66 | Run some put(s) 67 | 68 | ./yabean put --tube "tube01" --body "hello world" 69 | c.Put() returned id = 1 70 | 71 | ./yabean put --tube "tube01" --body "你好" 72 | c.Put() returned id = 2 73 | 74 | ./yabean put --tube "tube01" --body "नमस्ते" 75 | c.Put() returned id = 3 76 | 77 | 78 | Reserve a job & delete the reserved job 79 | 80 | ./yabean reserve --tube "tube01" --string --del 81 | reserved job id=1 body=11 82 | body = hello world 83 | 84 | 85 | Reserve a job and allow a TTL to timeout, the job can be reserved again after ttr 86 | 87 | ./yabean reserve --tube "tube01" --string 88 | reserved job id=2 body=6 89 | body = 你好 90 | INFO[0000] job allowed to timeout without delete, bury or release actions 91 | 92 | Reserve a job & bury the deleted job 93 | 94 | ./yabean reserve --tube "tube01" --string --bury 95 | reserved job id=3 body=18 96 | body = नमते 97 | buried job 3, pri = 1024 98 | 99 | 100 | View the tube stats, check out current-jobs-reserved & current-jobs-buried are 1 101 | 102 | ./yabean stats-tube tube01 103 | StatsTube tube=tube01 104 | (cmd-delete => 0) 105 | (cmd-pause-tube => 0) 106 | (current-jobs-buried => 1) 107 | (current-jobs-delayed => 0) 108 | (current-jobs-ready => 1) 109 | (current-jobs-reserved => 1) 110 | (current-jobs-urgent => 0) 111 | (current-using => 0) 112 | (current-waiting => 0) 113 | (current-watching => 0) 114 | (name => tube01) 115 | (pause => 0) 116 | (pause-time-left => 0) 117 | (total-jobs => 0) 118 | 119 | 120 | Kick the buried job 121 | 122 | ./yabean kick 3 123 | job with id = 3 Kicked. 124 | 125 | View the tube stats again check out current-jobs-ready is 2, job id 2 moved from reserved to ready (after ttr timeout) and job id 3 got kicked to ready again 126 | 127 | ./yabean stats-tube tube01 128 | StatsTube tube=tube01 129 | (cmd-delete => 0) 130 | (cmd-pause-tube => 0) 131 | (current-jobs-buried => 0) 132 | (current-jobs-delayed => 0) 133 | (current-jobs-ready => 2) 134 | (current-jobs-reserved => 0) 135 | (current-jobs-urgent => 0) 136 | (current-using => 0) 137 | (current-waiting => 0) 138 | (current-watching => 0) 139 | (name => tube01) 140 | (pause => 0) 141 | (pause-time-left => 0) 142 | (total-jobs => 0) 143 | 144 | Run a UX program 145 | ---------------- 146 | 147 | I also liked using the [Aurora UI](https://github.com/xuri/aurora) 148 | 149 | wget https://github.com/xuri/aurora/releases/download/2.2/aurora_darwin_amd64_v2.2.tar.gz 150 | aurora_darwin_amd64_v2.2.tar.gz 151 | 152 | tar xvf aurora_darwin_amd64_v2.2.tar.gz 153 | x aurora 154 | 155 | ./aurora 156 | 157 | - This opens the browser window to http://127.0.0.1:3000/ 158 | 159 | - Click on the Add Server and add a server at Host = localhost and Port = 11300 160 | 161 | - Visit http://127.0.0.1:3000/server?server=localhost:11300 162 | 163 | - Explore further -------------------------------------------------------------------------------- /doc/GettingStarted3.md: -------------------------------------------------------------------------------- 1 | Getting started - Three node cluster 2 | ==================================== 3 | 4 | In the [Getting started page](./GettingStarted.md), we setup a single node cluster. Here, we setup a three node node cluster, which is tolerant to a single node failure. We call this setup highly available (HA). 5 | 6 | Setup cluster 7 | ------------- 8 | 9 | Following is a step-by-step guide to getting this HA cluster up and running. This setup has three cluster node processes and one beanstalkd proxy process. You can see a more production-like setup for kubernetes [here](https://github.com/1xyz/coolbeans-k8s) 10 | 11 | 12 | #### Setup cluster-node bean0 13 | 14 | 15 | # create a directory for demonstration purposes. 16 | $ mkdir -p /tmp/ha 17 | 18 | $ ./coolbeans --quiet cluster-node --node-id bean0 --root-dir /tmp/ha/bean0 --bootstrap-node-id bean0 --node-peer-addrs=:11000,:12000,:13000 --raft-listen-addr=127.0.0.1:21000 --node-listen-addr=127.0.0.1:11000 --raft-advertized-addr=127.0.0.1:21000 --prometheus-addr=127.0.0.1:2020 19 | 20 | The above example starts a new cluster node 21 | 22 | - with a unique node-id `bean0`. 23 | - the cluster-node starts two listeners, the raft service listens on `127.0.0.1:21000` and the GRPC service listens on `:11000`. 24 | - the raft advertised address indicates the address the other raft peers will use to connect. 25 | - all data logs and snapshots will be persisted under the directory: `/tmp/ha/bean0`. 26 | - the bootstrap node, which the default assigned leader during the first time cluster formation, is `bean0`. 27 | - additionally, we define all the initial peers: `:11000`,`:12000`,`:13000`. 28 | 29 | 30 | #### Setup cluster-nodes bean1 & bean2 31 | 32 | ./coolbeans --quiet cluster-node --node-id bean1 --root-dir /tmp/ha/bean1 --bootstrap-node-id bean0 --node-peer-addrs=:11000,:12000,:13000 --raft-listen-addr=127.0.0.1:22000 --node-listen-addr=127.0.0.1:12000 --raft-advertized-addr=127.0.0.1:22000 --prometheus-addr=127.0.0.1:2021 33 | 34 | ./coolbeans --quiet cluster-node --node-id bean2 --root-dir /tmp/ha/bean2 --bootstrap-node-id bean0 --node-peer-addrs=:11000,:12000,:13000 --raft-listen-addr=127.0.0.1:23000 --node-listen-addr=127.0.0.1:13000 --raft-advertized-addr=127.0.0.1:23000 --prometheus-addr=127.0.0.1:2022 35 | 36 | The above example starts two cluster nodes 37 | 38 | - with unique ids `bean1` and `bean2` which join node `bean0` to form a three node cluster. 39 | - the bootstrap node in this case is still `bean0`. What that means, is if we bring up all the three nodes in parallel, only bean0 becomes the leader, and bean1 and bean2 become followers. 40 | 41 | 42 | #### Setup beanstalkd proxy 43 | 44 | ./coolbeans --quiet beanstalkd --listen-addr 127.0.0.1 --upstream-addrs 127.0.0.1:11000,127.0.0.1:12000,127.0.0.1:13000 --listen-port 11300 >> /tmp/coolbeans-sidecar.log 2>> /tmp/coolbeans-sidecar_error.log 45 | 46 | The above example starts a new beanstalkd proxy: 47 | 48 | - with the proxy upstream pointing to the GRPC services: `127.0.0.1:11000`, `127.0.0.1:12000` and `127.0.0.1:13000`. The proxy automatically detects which of three is the leader and forwards all the requests to the current leader. 49 | - listening for beanstalkd client requests on port `11300`. 50 | 51 | #### Query to find out which node is the elected leader 52 | 53 | ``` 54 | $ ./coolbeans cluster-client is_leader --node-addr 127.0.0.1:11000 55 | isNodeLeader: false 56 | 57 | $ ./coolbeans cluster-client is_leader --node-addr 127.0.0.1:12000 58 | isNodeLeader: false 59 | 60 | $ ./coolbeans cluster-client is_leader --node-addr 127.0.0.1:13000 61 | isNodeLeader: true 62 | ``` 63 | 64 | From the above, you can see that the node `bean2` was elected the leader even though the boostrapped leader initially was `bean0`. -------------------------------------------------------------------------------- /doc/arch.dot: -------------------------------------------------------------------------------- 1 | # High level arch 2 | # dot -Tpng arch.dot -o arch.png 3 | digraph { 4 | subgraph cluster_client { 5 | label="Client"; style=filled; bgcolor="#D0C0A0" fontsize=13; 6 | client [shape=record label="{Beanstalkd Client\nLib}" fontsize=13 style=filled ] 7 | } 8 | subgraph cluster_proxy { 9 | label="Beanstalkd Proxy\n Server"; style=filled; bgcolor="#D0C0A0" fontsize=13; 10 | proxy [shape=record label="{Beanstalkd server|Proxy|Cluster node\nGRPC Client}" fontsize=13 style=filled ]; 11 | }; 12 | subgraph cluster_node { 13 | label="Cluster Node server" style=filled; bgcolor="#D0C0A0" fontsize=13; 14 | server [shape=record label="{GRPC server|{Raft Leader|Raft RPC\nServer}|Beanstalkd state|RAM or Disk Storage}" fontsize=13] 15 | } 16 | 17 | client -> proxy 18 | proxy -> server 19 | } -------------------------------------------------------------------------------- /doc/arch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/1xyz/coolbeans/997483e2504d9b1e6689f3e55a3115b898a226b8/doc/arch.png -------------------------------------------------------------------------------- /doc/bean_3185124.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/1xyz/coolbeans/997483e2504d9b1e6689f3e55a3115b898a226b8/doc/bean_3185124.png -------------------------------------------------------------------------------- /doc/bean_3185124.svg: -------------------------------------------------------------------------------- 1 | Created by Llisolefrom the Noun Project -------------------------------------------------------------------------------- /doc/beanstalkd_lifecycle.dot: -------------------------------------------------------------------------------- 1 | ## 2 | # Lifecycle of a beanstalkd job 3 | # Refer: https://github.com/beanstalkd/beanstalkd/blob/master/doc/protocol.txt 4 | # for original documentation of the job lifecycle 5 | digraph job_lifecycle { 6 | rankdir=LR; 7 | size="8,5" 8 | node [margin=0 fontcolor=blue fontsize=10 width=0.6 shape=circle style=filled] 9 | Start -> Delayed[ label = "put with delay",fontsize=10 ]; 10 | Start -> Ready [ label = "put", fontsize=10 ]; 11 | Ready -> Reserved [ label = "reserve", fontsize=10 ]; 12 | Reserved -> Reserved [ label = "touch", fontsize=10 ]; 13 | Reserved -> Ready [label = "release/ttr timeout", fontsize=10 ]; 14 | Reserved -> Buried [ label = "bury", fontsize=10 ]; 15 | Reserved -> Delayed [ label = "release with delay", fontsize=10 ]; 16 | Reserved -> Done [ label = "delete", fontsize=10 ]; 17 | Buried -> Done [ label = "delete", fontsize=10 ]; 18 | Buried -> Ready [ label = "kick", fontsize=10 ]; 19 | Delayed -> Ready [ label = "delay timeout", fontsize=10 ]; 20 | 21 | subgraph foo { 22 | rank="same" 23 | Ready 24 | Delayed 25 | } 26 | subgraph bar { 27 | rank="same" 28 | Reserved 29 | Buried 30 | } 31 | } -------------------------------------------------------------------------------- /doc/beanstalkd_lifecycle.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/1xyz/coolbeans/997483e2504d9b1e6689f3e55a3115b898a226b8/doc/beanstalkd_lifecycle.png -------------------------------------------------------------------------------- /generate_changelog.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | current_tag=$(git describe --tags) 4 | previous_tag=$(git tag --sort=-creatordate | head -n 2 | tail -n 1) 5 | 6 | tag_date=$(git log -1 --pretty=format:'%ad' --date=short ${current_tag}) 7 | printf "## ${current_tag} (${tag_date})\n\n" 8 | git log ${current_tag}...${previous_tag} --pretty=format:'* %s [View](https://github.com/1xyz/coolbeans/commit/%H)' --reverse | grep -v Merge 9 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/1xyz/coolbeans 2 | 3 | go 1.13 4 | 5 | require ( 6 | github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 7 | github.com/beanstalkd/go-beanstalk v0.0.0-20200229072127-2b7b37f17578 8 | github.com/davecgh/go-spew v1.1.1 9 | github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815 10 | github.com/golang/protobuf v1.5.2 11 | github.com/google/uuid v1.1.1 12 | github.com/hashicorp/raft v1.1.2 13 | github.com/hashicorp/raft-boltdb v0.0.0-20191021154308-4207f1bf0617 14 | github.com/mattn/goreman v0.3.12 // indirect 15 | github.com/maxbrunsfeld/counterfeiter/v6 v6.2.3 // indirect 16 | github.com/ory/go-acc v0.2.1 17 | github.com/pkg/errors v0.8.1 18 | github.com/prometheus/client_golang v0.9.3 19 | github.com/sirupsen/logrus v1.5.0 20 | github.com/smartystreets/goconvey v1.6.4 21 | github.com/stretchr/testify v1.5.1 22 | golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e 23 | golang.org/x/tools v0.0.0-20200413161937-250b2131eb8b 24 | google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 // indirect 25 | google.golang.org/grpc v1.28.1 26 | google.golang.org/protobuf v1.28.0 27 | gopkg.in/yaml.v2 v2.2.8 28 | ) 29 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | cmd_beanstalkd "github.com/1xyz/coolbeans/beanstalkd/cmd" 5 | cmd_cluster "github.com/1xyz/coolbeans/cluster/cmd" 6 | "github.com/1xyz/coolbeans/tools" 7 | "github.com/docopt/docopt-go" 8 | log "github.com/sirupsen/logrus" 9 | "os" 10 | ) 11 | 12 | const version = "0.1.alpha" 13 | 14 | func init() { 15 | log.SetFormatter(&log.TextFormatter{}) 16 | log.SetOutput(os.Stdout) 17 | log.SetLevel(log.InfoLevel) 18 | } 19 | 20 | func main() { 21 | usage := `usage: coolbeans [--version] [(--verbose|--quiet)] [--help] 22 | [...] 23 | options: 24 | -h, --help 25 | --verbose Change the logging level verbosity 26 | The commands are: 27 | cluster-node Run a cluster node server 28 | beanstalkd Run a beanstalkd proxy server 29 | cluster-client Run the cluster's CLI client 30 | See 'coolbeans --help' for more information on a specific command. 31 | ` 32 | parser := &docopt.Parser{OptionsFirst: true} 33 | args, err := parser.ParseArgs(usage, nil, version) 34 | if err != nil { 35 | log.Errorf("error = %v", err) 36 | os.Exit(1) 37 | } 38 | 39 | cmd := args[""].(string) 40 | cmdArgs := args[""].([]string) 41 | 42 | log.Debugf("global arguments: %v", args) 43 | log.Debugf("command arguments: %v %v", cmd, cmdArgs) 44 | 45 | verbose := tools.OptsBool(args, "--verbose") 46 | quiet := tools.OptsBool(args, "--quiet") 47 | if verbose == true { 48 | log.SetLevel(log.DebugLevel) 49 | } else if quiet == true { 50 | log.SetLevel(log.WarnLevel) 51 | } 52 | 53 | RunCommand(cmd, cmdArgs, version) 54 | log.Infof("done") 55 | } 56 | 57 | // RunCommand runs a specific command and the provided arguments 58 | func RunCommand(c string, args []string, version string) { 59 | argv := append([]string{c}, args...) 60 | switch c { 61 | case "cluster-node": 62 | cmd_cluster.CmdClusterNode(argv, version) 63 | case "beanstalkd": 64 | cmd_beanstalkd.CmdBeanstalkd(argv, version) 65 | case "cluster-client": 66 | cmd_cluster.CmdClusterClient(argv, version) 67 | default: 68 | log.Fatalf("RunCommand: %s is not a supported command. See 'coolbeans help'", c) 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /state/client_resv.go: -------------------------------------------------------------------------------- 1 | package state 2 | 3 | import ( 4 | "container/heap" 5 | "container/list" 6 | "fmt" 7 | "github.com/google/uuid" 8 | log "github.com/sirupsen/logrus" 9 | "math" 10 | ) 11 | 12 | // Represents a client reservation as requested by the client 13 | type ClientResvEntry struct { 14 | // ID of the client making reservations 15 | CliID ClientID 16 | 17 | // Tubes are the tube names watched by this client 18 | // for this reservation 19 | WatchedTubes []TubeName 20 | 21 | // reservation Deadline timestamp 22 | ResvDeadlineAt int64 23 | 24 | // Indicates if this entry is waiting for a reservation 25 | IsWaitingForResv bool 26 | 27 | // clock at which the client needs some processing 28 | // If client IsWaitingForResv is set to true 29 | // - if there is a job already reserved at the lowest job's 30 | // deadline is within a second of now, then send a DEADLINE_SOON 31 | // and un-reserve the client 32 | // - If there are no jobs reserved at the current time is past 33 | // the client's reservation ResvDeadlineAt, then un-reserve the client 34 | // Check to see if any reservations need to be cleaned 35 | TickAt int64 36 | 37 | // the request ID of the reservation 38 | ReqID string 39 | 40 | // Index of the client in the client Heap 41 | HeapIndex int 42 | } 43 | 44 | func newClientResvEntry() *ClientResvEntry { 45 | return &ClientResvEntry{ 46 | CliID: ClientID(uuid.New().URN()), 47 | ReqID: uuid.New().URN(), 48 | TickAt: 0, 49 | WatchedTubes: make([]TubeName, 0), 50 | ResvDeadlineAt: math.MaxInt64, 51 | HeapIndex: -1, 52 | } 53 | } 54 | 55 | // A heap of client reservations indexed prioritized by 56 | // the clientResvEntry TickAt. A lower TickAt gets a higher 57 | // priority. 58 | type clientResvHeap []*ClientResvEntry 59 | 60 | // create and initialize a client resv heap 61 | func newClientResvHeap() *clientResvHeap { 62 | h := make(clientResvHeap, 0) 63 | heap.Init(&h) 64 | return &h 65 | } 66 | 67 | // create an un-initialized heap (typically needed for snapshot restore) 68 | func newClientResvHeapWithSize(size int) *clientResvHeap { 69 | h := make(clientResvHeap, size) 70 | return &h 71 | } 72 | 73 | // initialize a populated heap 74 | func initializeHeap(h heap.Interface) { 75 | heap.Init(h) 76 | } 77 | 78 | func (h clientResvHeap) Len() int { 79 | return len(h) 80 | } 81 | 82 | func (h clientResvHeap) Less(i, j int) bool { 83 | // we want to pop the entry with the lowest TickAt first 84 | // in order to get total ordering if the two TickAt's 85 | // are the same then break the tie on CliID 86 | if h[i].TickAt == h[j].TickAt { 87 | return h[i].CliID < h[j].CliID 88 | } 89 | return h[i].TickAt < h[j].TickAt 90 | } 91 | 92 | func (h clientResvHeap) Swap(i, j int) { 93 | h[i], h[j] = h[j], h[i] 94 | h[i].HeapIndex = i 95 | h[j].HeapIndex = j 96 | } 97 | 98 | func (h *clientResvHeap) Enqueue(cli *ClientResvEntry) { 99 | heap.Push(h, cli) 100 | } 101 | 102 | func (h *clientResvHeap) Dequeue() *ClientResvEntry { 103 | cli, ok := heap.Pop(h).(*ClientResvEntry) 104 | if !ok { 105 | log.Panicf("cast-error, interface %T cannot be cast to *clientResvEntry", cli) 106 | } 107 | 108 | return cli 109 | } 110 | 111 | func (h *clientResvHeap) Remove(cli *ClientResvEntry) error { 112 | if cli.HeapIndex >= h.Len() { 113 | return ErrInvalidIndex 114 | } 115 | 116 | if cli.CliID != (*h)[cli.HeapIndex].CliID { 117 | return ErrMismatchJobEntry 118 | } 119 | 120 | _, ok := heap.Remove(h, cli.HeapIndex).(*ClientResvEntry) 121 | if !ok { 122 | log.Panicf("cast-error, interface cannot be cast to *clientResvEntry") 123 | } 124 | 125 | log.WithField("client.id", cli.CliID).Debugf("clientHeap.remove success") 126 | return nil 127 | } 128 | 129 | func (h *clientResvHeap) Peek() *ClientResvEntry { 130 | if h.Len() == 0 { 131 | return nil 132 | } else { 133 | return (*h)[0] 134 | } 135 | } 136 | 137 | // update modifies the priority and value of an Item in the queue. 138 | func (h *clientResvHeap) UpdateTickAt(cli *ClientResvEntry, newTickAt int64) { 139 | cli.TickAt = newTickAt 140 | heap.Fix(h, cli.HeapIndex) 141 | } 142 | 143 | func (h *clientResvHeap) Push(x interface{}) { 144 | n := h.Len() 145 | item, ok := x.(*ClientResvEntry) 146 | if !ok { 147 | log.Panicf("cast-error, interface %T cannot be cast to *client", x) 148 | } 149 | 150 | item.HeapIndex = n 151 | *h = append(*h, item) 152 | } 153 | 154 | func (h *clientResvHeap) Pop() interface{} { 155 | old := *h 156 | n := len(old) 157 | item := old[n-1] 158 | old[n-1] = nil // avoid memory leak 159 | item.HeapIndex = -1 // for safety 160 | *h = old[0 : n-1] 161 | return item 162 | } 163 | 164 | // ClientResvQueue is container that provides two functions. 165 | // 1. queue of client reservations that supports Enqueue & Dequeue operations 166 | // 2. An index (map) of client reservations indexed by CliID, that provides 167 | // O(1) operations to Find, Contains & Remove 168 | type clientResvQueue struct { 169 | m map[ClientID]*list.Element 170 | 171 | l *list.List 172 | } 173 | 174 | func newClientResvQueue() *clientResvQueue { 175 | return &clientResvQueue{ 176 | m: map[ClientID]*list.Element{}, 177 | l: list.New(), 178 | } 179 | } 180 | 181 | func (c *clientResvQueue) Enqueue(entry *ClientResvEntry) error { 182 | if c.Contains(entry) { 183 | return fmt.Errorf("entry with clientID=%s exists %w", entry.CliID, ErrEntryExists) 184 | } 185 | 186 | elem := c.l.PushBack(entry) 187 | c.m[entry.CliID] = elem 188 | return nil 189 | } 190 | 191 | func (c *clientResvQueue) Find(clientID ClientID) (*ClientResvEntry, error) { 192 | elem, ok := c.m[clientID] 193 | if !ok { 194 | return nil, ErrEntryMissing 195 | } 196 | 197 | res, ok := elem.Value.(*ClientResvEntry) 198 | if !ok { 199 | log.Panicf("cast-error expected value %T to be of type *clientResvEntry", elem.Value) 200 | } 201 | 202 | return res, nil 203 | } 204 | 205 | func (c *clientResvQueue) Contains(entry *ClientResvEntry) bool { 206 | _, ok := c.m[entry.CliID] 207 | return ok 208 | } 209 | 210 | func (c *clientResvQueue) Remove(entry *ClientResvEntry) error { 211 | elem, ok := c.m[entry.CliID] 212 | if !ok { 213 | return ErrEntryMissing 214 | } 215 | 216 | _, ok = elem.Value.(*ClientResvEntry) 217 | if !ok { 218 | log.Panicf("cast-error expected value %T to be of type *clientResvEntry", elem.Value) 219 | } 220 | 221 | c.l.Remove(elem) 222 | elem = nil 223 | 224 | delete(c.m, entry.CliID) 225 | return nil 226 | } 227 | 228 | func (c *clientResvQueue) Len() int { 229 | return len(c.m) 230 | } 231 | 232 | func (c *clientResvQueue) Dequeue() (*ClientResvEntry, error) { 233 | if c.l.Len() == 0 { 234 | return nil, ErrContainerEmpty 235 | } 236 | 237 | firstElem := c.l.Front() 238 | 239 | res, ok := firstElem.Value.(*ClientResvEntry) 240 | if !ok { 241 | log.Panicf("cast-error expected value %T to be of type *clientResvEntry", firstElem.Value) 242 | } 243 | 244 | return res, nil 245 | } 246 | 247 | // Return a read-only channel of *clientResvEntry 248 | func (c *clientResvQueue) Entries() <-chan *ClientResvEntry { 249 | entriesCh := make(chan *ClientResvEntry) 250 | go func(ch chan<- *ClientResvEntry) { 251 | defer close(ch) 252 | for e := c.l.Front(); e != nil; e = e.Next() { 253 | cli, ok := e.Value.(*ClientResvEntry) 254 | if !ok { 255 | log.WithField("method", "clientResvQueue.Entries"). 256 | Panicf("class-cast error") 257 | } 258 | 259 | if cli.HeapIndex < 0 { 260 | log.WithField("method", "clientResvQueue.Entries"). 261 | Warnf("cli %v with heapIndex = %v is skipped", cli.CliID, cli.HeapIndex) 262 | } else { 263 | ch <- cli 264 | } 265 | } 266 | }(entriesCh) 267 | return entriesCh 268 | } 269 | 270 | // clientResvMap is a map from ClientID to a *clientResvEntry 271 | type clientResvMap map[ClientID]*ClientResvEntry 272 | 273 | func (c clientResvMap) Contains(id ClientID) bool { 274 | _, ok := c[id] 275 | return ok 276 | } 277 | 278 | func (c clientResvMap) Put(entry *ClientResvEntry) error { 279 | if c.Contains(entry.CliID) { 280 | return fmt.Errorf("entry with clientID:%v exists. %w", entry.CliID, ErrEntryExists) 281 | } 282 | 283 | c[entry.CliID] = entry 284 | return nil 285 | } 286 | 287 | func (c clientResvMap) Get(id ClientID) (*ClientResvEntry, error) { 288 | entry, ok := c[id] 289 | if !ok { 290 | return nil, ErrEntryMissing 291 | } 292 | 293 | return entry, nil 294 | } 295 | 296 | func (c clientResvMap) Remove(id ClientID) error { 297 | if !c.Contains(id) { 298 | return ErrEntryMissing 299 | } 300 | 301 | delete(c, id) 302 | return nil 303 | } 304 | 305 | func (c clientResvMap) AddOrUpdate(id ClientID, watchedTubes []TubeName, reqID string, resvDeadlineAt int64) (*ClientResvEntry, error) { 306 | if entry, ok := c[id]; ok { 307 | entry.ReqID = reqID 308 | entry.ResvDeadlineAt = resvDeadlineAt 309 | 310 | return entry, nil 311 | } 312 | 313 | entry := &ClientResvEntry{ 314 | CliID: id, 315 | WatchedTubes: watchedTubes, 316 | ResvDeadlineAt: resvDeadlineAt, 317 | IsWaitingForResv: true, 318 | TickAt: 0, 319 | ReqID: reqID, 320 | HeapIndex: -1, 321 | } 322 | 323 | if err := c.Put(entry); err != nil { 324 | return nil, err 325 | } 326 | 327 | return entry, nil 328 | } 329 | -------------------------------------------------------------------------------- /state/client_resv_test.go: -------------------------------------------------------------------------------- 1 | package state 2 | 3 | import ( 4 | "errors" 5 | "github.com/stretchr/testify/assert" 6 | "testing" 7 | ) 8 | 9 | func TestNewClientResvHeap(t *testing.T) { 10 | ch := newClientResvHeap() 11 | assert.Equalf(t, 0, ch.Len(), "initial length is zero") 12 | } 13 | 14 | func TestClientResvHeap_Enqueue(t *testing.T) { 15 | ch := newClientResvHeap() 16 | c1, c2 := newTestClientResvEntry(30), newTestClientResvEntry(20) 17 | 18 | ch.Enqueue(c1) 19 | ch.Enqueue(c2) 20 | 21 | assert.Equalf(t, 0, c2.HeapIndex, 22 | "client with lower nextTickAt is at the head of queue") 23 | assert.Equalf(t, 1, c1.HeapIndex, 24 | "client with lower nextTickAt is not at head of queue") 25 | } 26 | 27 | func TestClientHeap_Dequeue(t *testing.T) { 28 | ch := newClientResvHeap() 29 | c1, c2 := newTestClientResvEntry(30), newTestClientResvEntry(20) 30 | 31 | ch.Enqueue(c1) 32 | ch.Enqueue(c2) 33 | 34 | out1 := ch.Dequeue() 35 | out2 := ch.Dequeue() 36 | assert.Equalf(t, c2.CliID, out1.CliID, "expect c2 to be dequeued first") 37 | assert.Equalf(t, c1.CliID, out2.CliID, "expect c1 to be dequeued next") 38 | } 39 | 40 | func TestClientHeap_Peek(t *testing.T) { 41 | ch := newClientResvHeap() 42 | c1, c2 := newTestClientResvEntry(30), newTestClientResvEntry(20) 43 | 44 | ch.Enqueue(c1) 45 | ch.Enqueue(c2) 46 | 47 | assert.Equalf(t, c2.CliID, ch.Peek().CliID, "expect c2 to be peeked") 48 | } 49 | 50 | func TestClientHeap_Peek_Nil(t *testing.T) { 51 | ch := newClientResvHeap() 52 | assert.Nilf(t, ch.Peek(), "expect peek of empty heap to return nil") 53 | } 54 | 55 | func TestClientHeap_UpdateTickAt(t *testing.T) { 56 | ch := newClientResvHeap() 57 | 58 | c0, c1, c2 := newTestClientResvEntry(30), newTestClientResvEntry(20), newTestClientResvEntry(10) 59 | ch.Enqueue(c0) 60 | ch.Enqueue(c1) 61 | ch.Enqueue(c2) 62 | 63 | ch.UpdateTickAt(c2, 100) 64 | 65 | assert.Equalf(t, 0, c1.HeapIndex, "expect c1 to be at head of heap") 66 | assert.Equalf(t, 1, c0.HeapIndex, "expect c0 to be at middle of heap") 67 | assert.Equalf(t, 2, c2.HeapIndex, "expect c2 to be at end of heap") 68 | } 69 | 70 | func TestClientHeap_Remove(t *testing.T) { 71 | ch := newClientResvHeap() 72 | c1, c2 := newTestClientResvEntry(30), newTestClientResvEntry(20) 73 | 74 | ch.Enqueue(c1) 75 | ch.Enqueue(c2) 76 | 77 | err := ch.Remove(c2) 78 | assert.Nilf(t, err, "expect err to be nil") 79 | assert.Equalf(t, -1, c2.HeapIndex, "expect heap index to be reset to -1") 80 | assert.Equalf(t, ch.Len(), 1, "expect client heap's length to be 1") 81 | } 82 | 83 | func TestClientHeap_Remove_InvalidJobEntry(t *testing.T) { 84 | ch := newClientResvHeap() 85 | c1, c2 := newTestClientResvEntry(30), newTestClientResvEntry(20) 86 | 87 | ch.Enqueue(c1) 88 | ch.Enqueue(c2) 89 | 90 | c3 := newTestClientResvEntry(20) 91 | c3.HeapIndex = c2.HeapIndex 92 | err := ch.Remove(c3) 93 | assert.Equalf(t, err, ErrMismatchJobEntry, "expect err to be ErrMismatchJobEntry") 94 | } 95 | 96 | func TestClientHeap_Remove_ErrInvalidIndex(t *testing.T) { 97 | ch := newClientResvHeap() 98 | c1, c2 := newTestClientResvEntry(30), newTestClientResvEntry(20) 99 | 100 | ch.Enqueue(c1) 101 | ch.Enqueue(c2) 102 | 103 | c2.HeapIndex = 100 104 | err := ch.Remove(c2) 105 | assert.Equalf(t, err, ErrInvalidIndex, "expect err to be ErrInvalidIndex") 106 | } 107 | 108 | func TestClientResvQueue_Enqueue(t *testing.T) { 109 | cs := newClientResvQueue() 110 | 111 | cli := newTestClientResvEntry(1) 112 | err := cs.Enqueue(cli) 113 | assert.Nilf(t, err, "expect err to be nil") 114 | 115 | err = cs.Enqueue(cli) 116 | assert.Truef(t, errors.Is(err, ErrEntryExists), "expect err to contain ErrEntryExists") 117 | 118 | err = cs.Enqueue(newTestClientResvEntry(1)) 119 | assert.Nilf(t, err, "expect err to be nil") 120 | } 121 | 122 | func TestClientResvQueue_Remove(t *testing.T) { 123 | cs := newClientResvQueue() 124 | 125 | cli := newTestClientResvEntry(1) 126 | if err := cs.Enqueue(cli); err != nil { 127 | t.Fatalf("test error %v", err) 128 | } 129 | 130 | err := cs.Remove(cli) 131 | assert.Nilf(t, err, "expect err to be nil") 132 | 133 | err = cs.Remove(cli) 134 | assert.Equalf(t, ErrEntryMissing, err, "expect err to be ErrEntryMissing") 135 | 136 | assert.Equalf(t, 0, cs.Len(), "expect len to be zero") 137 | } 138 | 139 | func TestClientResvQueue_Contains(t *testing.T) { 140 | cs := newClientResvQueue() 141 | 142 | cli := newTestClientResvEntry(1) 143 | if err := cs.Enqueue(cli); err != nil { 144 | t.Fatalf("test error %v", err) 145 | } 146 | 147 | b := cs.Contains(cli) 148 | assert.Truef(t, b, "expect result to be true") 149 | 150 | if err := cs.Remove(cli); err != nil { 151 | t.Fatalf("test error %v", err) 152 | } 153 | b = cs.Contains(cli) 154 | assert.Falsef(t, b, "expect result to be false") 155 | } 156 | 157 | func TestClientResvQueue_Find(t *testing.T) { 158 | cs := newClientResvQueue() 159 | 160 | cli, err := cs.Find("abracadabra") 161 | assert.Equalf(t, ErrEntryMissing, err, "expectErrEntryMissing") 162 | 163 | cli = newTestClientResvEntry(1) 164 | if err := cs.Enqueue(cli); err != nil { 165 | t.Fatalf("test error %v", err) 166 | } 167 | 168 | resultCli, err := cs.Find(cli.CliID) 169 | assert.Nilf(t, err, "expect err to be nil") 170 | assert.Equalf(t, cli.CliID, resultCli.CliID, "expect client to be found") 171 | } 172 | 173 | func TestClientResvQueue_Dequeue(t *testing.T) { 174 | cs := newClientResvQueue() 175 | 176 | cli1 := newTestClientResvEntry(1) 177 | cli2 := newTestClientResvEntry(1) 178 | cs.Enqueue(cli2) 179 | cs.Enqueue(cli1) 180 | 181 | resultCli, err := cs.Dequeue() 182 | assert.Nilf(t, err, "expect err to be nil") 183 | assert.Equalf(t, cli2.CliID, resultCli.CliID, "expect cli2 to be first result") 184 | } 185 | 186 | func TestClientResvQueue_Dequeue_PostRemove(t *testing.T) { 187 | cs := newClientResvQueue() 188 | 189 | cli1 := newTestClientResvEntry(1) 190 | cli2 := newTestClientResvEntry(1) 191 | cs.Enqueue(cli2) 192 | cs.Enqueue(cli1) 193 | 194 | resultCli, err := cs.Dequeue() 195 | cs.Remove(resultCli) 196 | 197 | resultCli, err = cs.Dequeue() 198 | assert.Nilf(t, err, "expect err to be nil") 199 | assert.Equalf(t, cli1.CliID, resultCli.CliID, "expect cli2 to be first result") 200 | } 201 | 202 | func TestClientResvQueue_Dequeue_Err(t *testing.T) { 203 | cs := newClientResvQueue() 204 | 205 | cli1 := newTestClientResvEntry(1) 206 | cli2 := newTestClientResvEntry(1) 207 | cs.Enqueue(cli2) 208 | cs.Enqueue(cli1) 209 | 210 | resultCli, err := cs.Dequeue() 211 | cs.Remove(resultCli) 212 | resultCli, err = cs.Dequeue() 213 | cs.Remove(resultCli) 214 | 215 | resultCli, err = cs.Dequeue() 216 | assert.Equalf(t, ErrContainerEmpty, err, "expect err to be ErrContainerEmpty") 217 | assert.Nilf(t, resultCli, "expect resultCli to be nil") 218 | } 219 | 220 | func TestClientResvMap_Put(t *testing.T) { 221 | c := make(clientResvMap) 222 | 223 | cli1 := newTestClientResvEntry(1) 224 | err := c.Put(cli1) 225 | assert.Nilf(t, err, "expect err to be nil") 226 | } 227 | 228 | func TestClientResvMap_Put_ReturnsErr(t *testing.T) { 229 | c := make(clientResvMap) 230 | 231 | cli1 := newTestClientResvEntry(1) 232 | err := c.Put(cli1) 233 | err = c.Put(cli1) 234 | assert.Truef(t, errors.Is(err, ErrEntryExists), "expect err to contain ErrEntryExists") 235 | } 236 | 237 | func TestClientResvMap_Get(t *testing.T) { 238 | c := make(clientResvMap) 239 | 240 | cli1 := newTestClientResvEntry(1) 241 | err := c.Put(cli1) 242 | 243 | cli2, err := c.Get(cli1.CliID) 244 | assert.Nilf(t, err, "expect err to be nil") 245 | assert.Equalf(t, cli1, cli2, "expect two client resv entry to match") 246 | } 247 | 248 | func TestClientResvMap_Get_ReturnsErr(t *testing.T) { 249 | c := make(clientResvMap) 250 | _, err := c.Get(ClientID("abrcadabra")) 251 | assert.Equalf(t, ErrEntryMissing, err, "expect err to be ErrEntryMissing") 252 | } 253 | 254 | func TestClientResvMap_Remove(t *testing.T) { 255 | c := make(clientResvMap) 256 | 257 | cli1 := newTestClientResvEntry(1) 258 | c.Put(cli1) 259 | err := c.Remove(cli1.CliID) 260 | assert.Nilf(t, err, "expect err to be nil") 261 | } 262 | 263 | func TestClientResvMap_Remove_ReturnsErr(t *testing.T) { 264 | c := make(clientResvMap) 265 | err := c.Remove(ClientID("abrcadabra")) 266 | assert.Equalf(t, ErrEntryMissing, err, "expect err to be ErrEntryMissing") 267 | 268 | cli1 := newTestClientResvEntry(1) 269 | c.Put(cli1) 270 | c.Remove(cli1.CliID) 271 | err = c.Remove(cli1.CliID) 272 | assert.Equalf(t, ErrEntryMissing, err, "expect err to be ErrEntryMissing") 273 | } 274 | 275 | func newTestClientResvEntry(tickAt int64) *ClientResvEntry { 276 | cli := newClientResvEntry() 277 | cli.TickAt = tickAt 278 | return cli 279 | } 280 | -------------------------------------------------------------------------------- /state/errors.go: -------------------------------------------------------------------------------- 1 | package state 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | ) 7 | 8 | var ( 9 | // ErrInvalidIndex no job entry found at the index 10 | ErrInvalidIndex = errors.New("provided index is out of range of the heap") 11 | 12 | // ErrMismatchJobEntry - returned when job entry at index does not match the heap's value 13 | ErrMismatchJobEntry = errors.New("job entry at index does not match provided entry") 14 | 15 | // ErrEntryExists - returned when an entry exists in the existing map/set to prevents from overriding 16 | ErrEntryExists = errors.New("entry exists in container") 17 | 18 | // ErrEntryMissing - returned when an entry is not found in the container 19 | ErrEntryMissing = errors.New("entry not found in container") 20 | 21 | // ErrContainerEmpty - returned when the container such as a list/map/slice etc is empty 22 | ErrContainerEmpty = errors.New("the container is empty") 23 | 24 | // ErrInvalidJobTransition - the current state of the job prevents this transition 25 | ErrInvalidJobTransition = errors.New("invalid transition") 26 | 27 | // ErrInvalidOperation - The state indicates that this op cannot be done 28 | ErrInvalidOperation = errors.New("invalid operation due to the current state") 29 | 30 | // ErrUnauthorizedOperation - This state requires a matching client to perform this transition 31 | ErrUnauthorizedOperation = errors.New("client is not authorized to perform this operation") 32 | 33 | // ErrCancel - indicates the request is cancelled 34 | ErrCancel = errors.New("cancelled") 35 | 36 | // ErrNoReservation - indicates that a request for a reservation could not be completed 37 | ErrNoReservationFound = errors.New("no reservation could be found") 38 | 39 | // ErrInvalidResvTimeout - indicates the provided reservation timeout is invalid 40 | ErrInvalidResvTimeout = errors.New("the provided reservation timeout is invalid") 41 | 42 | // ErrClientIsWaitingForReservation - Indicates the client is waiting for a reservation 43 | ErrClientIsWaitingForReservation = errors.New("the request client cannot request for another reservation") 44 | ) 45 | 46 | type ResultError struct { 47 | // ID of the request 48 | RequestID string 49 | 50 | // Identifier for the error code 51 | ErrorCode int32 52 | 53 | // Error 54 | Err error 55 | } 56 | 57 | func (r *ResultError) Error() string { 58 | return fmt.Sprintf("ReqID: %s | ErrorCode: %d | Err: %v", 59 | r.RequestID, r.ErrorCode, r.Err) 60 | } 61 | 62 | func (r *ResultError) Unwrap() error { 63 | return r.Err 64 | } 65 | -------------------------------------------------------------------------------- /state/index.go: -------------------------------------------------------------------------------- 1 | package state 2 | 3 | import ( 4 | "fmt" 5 | log "github.com/sirupsen/logrus" 6 | ) 7 | 8 | // IndexEntry is an entry within the job Index 9 | type IndexEntry struct { 10 | job Job 11 | entry *JobEntry 12 | } 13 | 14 | // JobIndex is an index of indexEntry indexed by a job ID 15 | type JobIndex struct { 16 | m map[JobID]*IndexEntry 17 | maxJobID JobID 18 | } 19 | 20 | // NewJobIndex returns a pointer to a new JobIndex. 21 | func NewJobIndex() *JobIndex { 22 | return &JobIndex{ 23 | m: map[JobID]*IndexEntry{}, 24 | maxJobID: JobID(0), 25 | } 26 | } 27 | 28 | // NextJobID returns the maximum assigned JobID 29 | func (idx *JobIndex) NextJobID() JobID { 30 | idx.maxJobID = idx.maxJobID + 1 31 | return idx.maxJobID 32 | } 33 | 34 | // Len returns the number of jobs in this index 35 | func (idx *JobIndex) Len() int { 36 | return len(idx.m) 37 | } 38 | 39 | // Add a job to the index 40 | func (idx *JobIndex) Add(job Job) (*IndexEntry, error) { 41 | if _, ok := idx.m[job.ID()]; ok { 42 | return nil, fmt.Errorf("job with id=%v exists %w", job.ID(), ErrEntryExists) 43 | } 44 | 45 | e := IndexEntry{ 46 | job: job, 47 | entry: nil, 48 | } 49 | idx.m[job.ID()] = &e 50 | return &e, nil 51 | } 52 | 53 | // Get returns the job specified by the jobID 54 | func (idx *JobIndex) Get(jobID JobID) (*IndexEntry, error) { 55 | entry, ok := idx.m[jobID] 56 | if !ok { 57 | return nil, ErrEntryMissing 58 | } 59 | return entry, nil 60 | } 61 | 62 | // Remove deletes the job from the index 63 | func (idx *JobIndex) Remove(jobID JobID) (*IndexEntry, error) { 64 | entry, ok := idx.m[jobID] 65 | if !ok { 66 | return nil, ErrEntryMissing 67 | } 68 | 69 | delete(idx.m, jobID) 70 | return entry, nil 71 | } 72 | 73 | // Jobs returns a read-only channel of jobs 74 | func (idx *JobIndex) Jobs() <-chan Job { 75 | entriesCh := make(chan Job) 76 | go func(ch chan<- Job) { 77 | defer close(ch) 78 | for _, v := range idx.m { 79 | ch <- v.job 80 | } 81 | }(entriesCh) 82 | return entriesCh 83 | } 84 | 85 | // An map of tube state indexed by the tubeName 86 | type tubeIndex map[TubeName]*tubeState 87 | type tubeState struct { 88 | // Min heap of jobs (ordered by the ReadyAt) 89 | delayedJobs DelayedJobs 90 | 91 | // Min heap of jobs (ordered by job pri) 92 | // if two jobs are of same pri, older (by id) one is ahead 93 | readyJobs PriorityJobs 94 | 95 | // Set of clientHeap that are waiting for reservations 96 | waiting *clientResvQueue 97 | 98 | // Min heap of jobs (ordered by buriedAt) 99 | buriedJobs BuriedJobs 100 | } 101 | 102 | func (ti tubeIndex) getByName(tubeName TubeName, create bool) (*tubeState, error) { 103 | res, ok := ti[tubeName] 104 | if !ok { 105 | if !create { 106 | return nil, ErrEntryMissing 107 | } 108 | 109 | log.Debugf("tubeStates.getTube created tube=%v", tubeName) 110 | ti[tubeName] = &tubeState{ 111 | delayedJobs: NewDelayedJobs(), 112 | readyJobs: NewPriorityJobs(), 113 | waiting: newClientResvQueue(), 114 | buriedJobs: NewBuriedJobs(), 115 | } 116 | return ti[tubeName], nil 117 | } else { 118 | return res, nil 119 | } 120 | } 121 | 122 | func (ti tubeIndex) cleanup(tubeName TubeName) { 123 | if t, ok := ti[tubeName]; !ok { 124 | return 125 | } else if t.readyJobs.Len() == 0 && t.delayedJobs.Len() == 0 && t.waiting.Len() == 0 && t.buriedJobs.Len() == 0 { 126 | delete(ti, tubeName) 127 | t.readyJobs = nil 128 | t.delayedJobs = nil 129 | t.buriedJobs = nil 130 | } 131 | } 132 | 133 | func (ti tubeIndex) EnqueueReadyJob(job Job) (*JobEntry, error) { 134 | t, err := ti.getByName(job.TubeName(), true) 135 | if err != nil { 136 | return nil, err 137 | } 138 | return t.readyJobs.Enqueue(job), err 139 | } 140 | 141 | func (ti tubeIndex) EnqueueDelayedJob(job Job) (*JobEntry, error) { 142 | t, err := ti.getByName(job.TubeName(), true) 143 | if err != nil { 144 | return nil, err 145 | } 146 | return t.delayedJobs.Enqueue(job), nil 147 | } 148 | 149 | func (ti tubeIndex) EnqueueBuriedJob(job Job) (*JobEntry, error) { 150 | t, err := ti.getByName(job.TubeName(), true) 151 | if err != nil { 152 | return nil, err 153 | } 154 | return t.buriedJobs.Enqueue(job), nil 155 | } 156 | 157 | func (ti tubeIndex) RemoveDelayedJob(jobEntry *JobEntry) (*JobEntry, error) { 158 | tubeName := jobEntry.TubeName() 159 | t, err := ti.getByName(tubeName, false) 160 | if err != nil { 161 | return nil, err 162 | } 163 | 164 | res, err := t.delayedJobs.RemoveAt(jobEntry) 165 | ti.cleanup(tubeName) 166 | return res, err 167 | } 168 | 169 | func (ti tubeIndex) RemoveReadyJob(jobEntry *JobEntry) (*JobEntry, error) { 170 | t, err := ti.getByName(jobEntry.TubeName(), false) 171 | if err != nil { 172 | return nil, err 173 | } 174 | 175 | return t.readyJobs.RemoveAt(jobEntry) 176 | } 177 | 178 | func (ti tubeIndex) RemoveBuriedJob(jobEntry *JobEntry) (*JobEntry, error) { 179 | t, err := ti.getByName(jobEntry.TubeName(), false) 180 | if err != nil { 181 | return nil, err 182 | } 183 | 184 | return t.buriedJobs.RemoveAt(jobEntry) 185 | } 186 | 187 | func (ti tubeIndex) NextDelayedJob(tubeName TubeName) (*JobEntry, error) { 188 | t, err := ti.getByName(tubeName, false) 189 | if err != nil { 190 | return nil, ErrEntryMissing 191 | } 192 | 193 | if t.delayedJobs.Len() == 0 { 194 | return nil, ErrEntryMissing 195 | } 196 | 197 | return t.delayedJobs.Peek(), nil 198 | } 199 | 200 | func (ti tubeIndex) NextReadyJob(tubeName TubeName) (*JobEntry, error) { 201 | t, err := ti.getByName(tubeName, false) 202 | if err != nil { 203 | return nil, ErrEntryMissing 204 | } 205 | if t.readyJobs.Len() == 0 { 206 | return nil, ErrEntryMissing 207 | } 208 | return t.readyJobs.Peek(), nil 209 | } 210 | 211 | func (ti tubeIndex) NextBuriedJob(tubeName TubeName) (*JobEntry, error) { 212 | t, err := ti.getByName(tubeName, false) 213 | if err != nil { 214 | return nil, ErrEntryMissing 215 | } 216 | if t.buriedJobs.Len() == 0 { 217 | return nil, ErrEntryMissing 218 | } 219 | return t.buriedJobs.Peek(), nil 220 | } 221 | 222 | func (ti tubeIndex) EnqueueToWaitQ(tubeName TubeName, cli *ClientResvEntry) error { 223 | ts, err := ti.getByName(tubeName, true) 224 | if err != nil { 225 | return err 226 | } 227 | 228 | return ts.waiting.Enqueue(cli) 229 | } 230 | 231 | func (ti tubeIndex) WaitQLen(tubeName TubeName) (int, error) { 232 | if t, err := ti.getByName(tubeName, false); err != nil { 233 | return -1, err 234 | } else { 235 | return t.waiting.Len(), nil 236 | } 237 | } 238 | 239 | func (ti tubeIndex) DequeueFromWaitQ(tubeName TubeName) (*ClientResvEntry, error) { 240 | if t, err := ti.getByName(tubeName, false); err != nil { 241 | return nil, err 242 | } else { 243 | return t.waiting.Dequeue() 244 | } 245 | } 246 | 247 | func (ti tubeIndex) RemoveFromWaitQ(tubeName TubeName, cli *ClientResvEntry) error { 248 | if t, err := ti.getByName(tubeName, false); err != nil { 249 | return err 250 | } else { 251 | return t.waiting.Remove(cli) 252 | } 253 | } 254 | 255 | func (ti tubeIndex) GetTubeNames() []TubeName { 256 | result := make([]TubeName, 0) 257 | for n := range ti { 258 | result = append(result, n) 259 | } 260 | return result 261 | } 262 | 263 | func (ti tubeIndex) GetStatistics(tubeName TubeName) (map[string]interface{}, error) { 264 | t, err := ti.getByName(tubeName, false /*create*/) 265 | if err != nil { 266 | return nil, err 267 | } 268 | return map[string]interface{}{ 269 | "name": string(tubeName), 270 | "current-jobs-urgent": 0, 271 | "current-jobs-ready": t.readyJobs.Len(), 272 | "current-jobs-reserved": 0, 273 | "current-jobs-delayed": t.delayedJobs.Len(), 274 | "current-jobs-buried": t.buriedJobs.Len(), 275 | "total-jobs": 0, 276 | "current-using": 0, 277 | "current-waiting": t.waiting.Len(), 278 | "current-watching": 0, 279 | "pause": 0, 280 | "cmd-delete": 0, 281 | "cmd-pause-tube": 0, 282 | "pause-time-left": 0, 283 | }, nil 284 | } 285 | 286 | func (ti tubeIndex) TotalJobCounts() map[string]uint64 { 287 | s := map[string]uint64{ 288 | "current-jobs-urgent": 0, 289 | "current-jobs-ready": 0, 290 | "current-jobs-reserved": 0, 291 | "current-jobs-delayed": 0, 292 | "current-jobs-buried": 0, 293 | } 294 | for _, t := range ti { 295 | s["current-jobs-ready"] += uint64(t.readyJobs.Len()) 296 | s["current-jobs-delayed"] += uint64(t.delayedJobs.Len()) 297 | s["current-jobs-buried"] += uint64(t.buriedJobs.Len()) 298 | } 299 | return s 300 | } 301 | 302 | // An map of jobs reserved where the key is the CliID 303 | type reservedJobsIndex map[ClientID]ReservedJobs 304 | 305 | func (r reservedJobsIndex) Enqueue(clientID ClientID, job Job) (*JobEntry, error) { 306 | _, ok := r[clientID] 307 | if !ok { 308 | r[clientID] = NewReservedJobs() 309 | } 310 | return r[clientID].Enqueue(job), nil 311 | } 312 | 313 | func (r reservedJobsIndex) Remove(jobEntry *JobEntry) (Job, error) { 314 | clientID := jobEntry.ReservedBy() 315 | _, ok := r[clientID] 316 | if !ok { 317 | log.WithField("method", "RemoveReadyJob"). 318 | Errorf("cannot find a reservation list for client=%v", clientID) 319 | return nil, ErrEntryMissing 320 | } 321 | 322 | defer func() { 323 | if r[clientID].Len() == 0 { 324 | delete(r, clientID) 325 | } 326 | }() 327 | 328 | if jobEntry.index == 0 { 329 | return r[clientID].Dequeue(), nil 330 | } else { 331 | return r[clientID].RemoveAt(jobEntry) 332 | } 333 | } 334 | 335 | func (r reservedJobsIndex) NextReservedJob(clientID ClientID) (*JobEntry, error) { 336 | resvJobs, ok := r[clientID] 337 | if !ok { 338 | log.WithField("method", "reservedJobsState.NextReservedJob"). 339 | Debugf("no reservedJobList for CliID=%v", clientID) 340 | return nil, ErrEntryMissing 341 | } 342 | 343 | if resvJobs.Len() == 0 { 344 | log.WithField("method", "reservedJobsState.NextReservedJob"). 345 | Debugf("reservedJobList for CliID=%v has zero entries", clientID) 346 | return nil, ErrEntryMissing 347 | } 348 | 349 | return r[clientID].Peek(), nil 350 | } 351 | 352 | func (r reservedJobsIndex) JobCount(tubeName TubeName) uint64 { 353 | var count uint64 = 0 354 | for _, rj := range r { 355 | count += rj.JobCountByTube(tubeName) 356 | } 357 | return count 358 | } 359 | 360 | func (r reservedJobsIndex) TotalJobs() uint64 { 361 | var count uint64 = 0 362 | for _, rj := range r { 363 | count += uint64(rj.Len()) 364 | } 365 | return count 366 | } 367 | -------------------------------------------------------------------------------- /state/job_heap.go: -------------------------------------------------------------------------------- 1 | package state 2 | 3 | import ( 4 | "container/heap" 5 | log "github.com/sirupsen/logrus" 6 | ) 7 | 8 | // JobHeap is a binary heap of jobs 9 | type JobHeap interface { 10 | // Enqueue appends an entry to the job heap in priority order 11 | Enqueue(job Job) *JobEntry 12 | 13 | // Dequeue returns a from the heap in priority order 14 | Dequeue() Job 15 | 16 | // RemoveAt removes a specific job entry. 17 | RemoveAt(jobEntry *JobEntry) (*JobEntry, error) 18 | 19 | // Len returns the heap length 20 | Len() int 21 | 22 | // Peek returns the top element of the heap without dequeuing it 23 | Peek() *JobEntry 24 | 25 | // Return the number of jobs found in the specific tube 26 | JobCountByTube(tubename TubeName) uint64 27 | } 28 | 29 | // JobEntry is an entry in the JobHeap 30 | type JobEntry struct { 31 | Job 32 | 33 | // Represents the index of this entry in the Heap 34 | index int 35 | } 36 | 37 | type jobHeap struct { 38 | entries []*JobEntry 39 | lessFn func(i, j int) bool // Customizable Less function 40 | } 41 | 42 | func (jh *jobHeap) Peek() *JobEntry { 43 | if jh.Len() == 0 { 44 | return nil 45 | } 46 | return jh.entries[0] 47 | } 48 | 49 | func (jh *jobHeap) Enqueue(j Job) *JobEntry { 50 | e := &JobEntry{ 51 | Job: j, 52 | index: 0, 53 | } 54 | 55 | heap.Push(jh, e) 56 | return e 57 | } 58 | 59 | func (jh *jobHeap) Dequeue() Job { 60 | e, ok := heap.Pop(jh).(*JobEntry) 61 | if !ok { 62 | log.Panicf("cast-error, interface %T cannot be cast to *JobEntry", e) 63 | } 64 | 65 | return e.Job 66 | } 67 | 68 | func (jh *jobHeap) RemoveAt(entry *JobEntry) (*JobEntry, error) { 69 | if entry.index >= jh.Len() { 70 | return nil, ErrInvalidIndex 71 | } 72 | 73 | if entry.ID() != jh.entries[entry.index].ID() { 74 | return nil, ErrMismatchJobEntry 75 | } 76 | 77 | r, ok := heap.Remove(jh, entry.index).(*JobEntry) 78 | if !ok { 79 | log.Panicf("cast-error, interface cannot be cast to *JobEntry") 80 | } 81 | 82 | return r, nil 83 | } 84 | 85 | func (jh *jobHeap) Len() int { 86 | return len(jh.entries) 87 | } 88 | 89 | func (jh jobHeap) Less(i, j int) bool { 90 | return jh.lessFn(i, j) 91 | } 92 | 93 | func (jh jobHeap) Swap(i, j int) { 94 | jh.entries[i], jh.entries[j] = jh.entries[j], jh.entries[i] 95 | jh.entries[i].index = i 96 | jh.entries[j].index = j 97 | } 98 | 99 | func (jh *jobHeap) Push(x interface{}) { 100 | n := jh.Len() 101 | item, ok := x.(*JobEntry) 102 | if !ok { 103 | log.Panicf("cast-error, interface %T cannot be cast to *JobEntry", x) 104 | } 105 | 106 | item.index = n 107 | jh.entries = append(jh.entries, item) 108 | } 109 | 110 | func (jh *jobHeap) Pop() interface{} { 111 | old := jh.entries 112 | n := len(old) 113 | item := old[n-1] 114 | old[n-1] = nil // avoid memory leak 115 | item.index = -1 // for safety 116 | jh.entries = old[0 : n-1] 117 | return item 118 | } 119 | 120 | func (jh *jobHeap) JobCountByTube(tubeName TubeName) uint64 { 121 | var count uint64 = 0 122 | for _, e := range jh.entries { 123 | if e.TubeName() == tubeName { 124 | count++ 125 | } 126 | } 127 | return count 128 | } 129 | 130 | // PriorityJobs is a JobHeap, with jobs ordered by its 131 | // Priority. Lower priority values takes a higher precedence. 132 | type PriorityJobs JobHeap 133 | 134 | // NewPriorityJobs returns a new instance of PriorityJobs 135 | func NewPriorityJobs() PriorityJobs { 136 | pq := jobHeap{ 137 | entries: make([]*JobEntry, 0), 138 | } 139 | pq.lessFn = func(i, j int) bool { 140 | if pq.entries[i].Priority() == pq.entries[j].Priority() { 141 | return pq.entries[i].ID() < pq.entries[j].ID() 142 | } else { 143 | return pq.entries[i].Priority() < pq.entries[j].Priority() 144 | } 145 | } 146 | heap.Init(&pq) 147 | return &pq 148 | } 149 | 150 | // DelayedJobs is a JobHeap, with jobs ordered by its ReadyAt field. 151 | // Lower (earlier) ReadyAt takes a higher precedence. 152 | type DelayedJobs JobHeap 153 | 154 | // NewDelayedJobs returns a new instance of DelayedJobs 155 | func NewDelayedJobs() DelayedJobs { 156 | pq := jobHeap{ 157 | entries: make([]*JobEntry, 0), 158 | } 159 | pq.lessFn = func(i, j int) bool { 160 | if pq.entries[i].ReadyAt() == pq.entries[j].ReadyAt() { 161 | return pq.entries[i].ID() < pq.entries[j].ID() 162 | } 163 | return pq.entries[i].ReadyAt() < pq.entries[j].ReadyAt() 164 | } 165 | heap.Init(&pq) 166 | return &pq 167 | } 168 | 169 | // ReservedJobs is a JobHeap, with jobs ordered by its ExpiresAt 170 | // field. Lower (earlier) ExpiresAt take a higher precedence 171 | type ReservedJobs JobHeap 172 | 173 | // NewReservedJobs returns a new instance of ReservedJobs 174 | func NewReservedJobs() ReservedJobs { 175 | pq := jobHeap{ 176 | entries: make([]*JobEntry, 0), 177 | } 178 | pq.lessFn = func(i, j int) bool { 179 | if pq.entries[i].ExpiresAt() == pq.entries[j].ExpiresAt() { 180 | return pq.entries[i].ID() < pq.entries[j].ID() 181 | } 182 | return pq.entries[i].ExpiresAt() < pq.entries[j].ExpiresAt() 183 | } 184 | heap.Init(&pq) 185 | return &pq 186 | } 187 | 188 | // BuriedJobs is a JobHeap, with jobs ordered by its BuriedAt 189 | // field. Lower (earlier) BuriedAt take a higher precedence 190 | // If two jobs have the same BuriedAt value, the lower job id gets precedence 191 | type BuriedJobs JobHeap 192 | 193 | // NewBuriedJobs returns a new instance of BuriedJobs. 194 | func NewBuriedJobs() ReservedJobs { 195 | pq := jobHeap{ 196 | entries: make([]*JobEntry, 0), 197 | } 198 | pq.lessFn = func(i, j int) bool { 199 | if pq.entries[i].BuriedAt() == pq.entries[j].BuriedAt() { 200 | return pq.entries[i].ID() < pq.entries[j].ID() 201 | } 202 | return pq.entries[i].BuriedAt() < pq.entries[j].BuriedAt() 203 | } 204 | heap.Init(&pq) 205 | return &pq 206 | } 207 | -------------------------------------------------------------------------------- /state/job_heap_test.go: -------------------------------------------------------------------------------- 1 | package state 2 | 3 | import ( 4 | "github.com/stretchr/testify/assert" 5 | "testing" 6 | "time" 7 | ) 8 | 9 | func TestNewPriorityJobs(t *testing.T) { 10 | jobs := NewPriorityJobs() 11 | assert.Equalf(t, jobs.Len(), 0, "Initial jobs is empty") 12 | } 13 | 14 | func TestPriorityJobs_Enqueue_Order(t *testing.T) { 15 | jobs := NewPriorityJobs() 16 | 17 | e1, e2 := newTestJobWithPri(10), newTestJobWithPri(3) 18 | 19 | a1 := jobs.Enqueue(e1) 20 | a2 := jobs.Enqueue(e2) 21 | 22 | assert.Equalf(t, 0, a2.index, 23 | "job entry with lower pri is at the head of queue") 24 | assert.Equalf(t, 1, a1.index, 25 | "job entry with higher pri is at not at head of queue") 26 | } 27 | 28 | // TestPriorityJobs_Dequeue 29 | // Verify the dequeue operation is ordered by pri; lower pri jobEntry is de-queued first 30 | func TestPriorityJobs_Dequeue(t *testing.T) { 31 | jobs := NewPriorityJobs() 32 | e1, e2 := newTestJobWithPri(10), newTestJobWithPri(3) 33 | 34 | a1 := jobs.Enqueue(e1) 35 | a2 := jobs.Enqueue(e2) 36 | 37 | assert.Equalf(t, a2.ID(), jobs.Dequeue().ID(), 38 | "Expect jobEntry2 to be de-queued") 39 | assert.Equalf(t, a1.ID(), jobs.Dequeue().ID(), 40 | "Expect jobEntry1 to be de-queued") 41 | } 42 | 43 | func TestPriorityJobs_RemoveAt(t *testing.T) { 44 | jobs := NewPriorityJobs() 45 | jobs.Enqueue(newTestJobWithPri(10)) 46 | e := jobs.Enqueue(newTestJobWithPri(3)) 47 | 48 | a, err := jobs.RemoveAt(e) 49 | 50 | assert.Nil(t, err, "Expect err to be nil") 51 | assert.Equalf(t, e.ID(), a.ID(), 52 | "Expect jobEntry2 to be removedAt") 53 | } 54 | 55 | var jobID uint64 = 0 56 | 57 | func newTestJob(pri uint32, delay int64, ttr int, bodySize int, 58 | body []byte, tubeName TubeName) Job { 59 | 60 | now := time.Now().UTC().Unix() 61 | jobID++ 62 | return &localJob{ 63 | id: jobID, 64 | priority: pri, 65 | delay: delay, 66 | ttr: ttr, 67 | bodySize: bodySize, 68 | body: body, 69 | tubeName: tubeName, 70 | createdAt: now, 71 | readyAt: now + delay, 72 | state: Initial, 73 | expiresAt: 0, 74 | buriedAt: 0, 75 | } 76 | } 77 | 78 | func newTestJobWithPri(pri uint32) Job { 79 | return newTestJob(pri, 0, 0, 0, nil, TubeName("alpha")) 80 | } 81 | 82 | func TestNewDelayedJobs(t *testing.T) { 83 | jobs := NewDelayedJobs() 84 | assert.Equalf(t, jobs.Len(), 0, "Initial jobs is empty") 85 | } 86 | 87 | func TestDelayedJobs_Enqueue_Order(t *testing.T) { 88 | jobs := NewDelayedJobs() 89 | 90 | e1, e2 := newTestJobWithDelay(0, 1, 1000), 91 | newTestJobWithDelay(1, 1, 1) 92 | 93 | a1 := jobs.Enqueue(e1) 94 | a2 := jobs.Enqueue(e2) 95 | 96 | assert.Equalf(t, 0, a2.index, 97 | "job entry with lower ReadyAt is at the head of queue") 98 | assert.Equalf(t, 1, a1.index, 99 | "job entry with higher ReadyAt is at not at head of queue") 100 | } 101 | 102 | // TestDelayedJobs_Dequeue 103 | // Verify the dequeue operation is ordered by readyAt; lower readyAt jobEntry is de-queued first 104 | func TestDelayedJobs_Dequeue(t *testing.T) { 105 | jobs := NewDelayedJobs() 106 | e1, e2 := newTestJobWithDelay(0, 1, 1000), 107 | newTestJobWithDelay(1, 1, 1) 108 | 109 | a1 := jobs.Enqueue(e1) 110 | a2 := jobs.Enqueue(e2) 111 | 112 | assert.Equalf(t, a2.ID(), jobs.Dequeue().ID(), 113 | "Expect jobEntry2 to be de-queued") 114 | assert.Equalf(t, a1.ID(), jobs.Dequeue().ID(), 115 | "Expect jobEntry1 to be de-queued") 116 | } 117 | 118 | // TestDelayedJobs_RemoveAt 119 | // Verify RemoveAt operation; i.e remove a specific jobEntry 120 | func TestDelayedJobs_RemoveAt(t *testing.T) { 121 | jobs := NewDelayedJobs() 122 | jobs.Enqueue(newTestJobWithDelay(0, 1, 1000)) 123 | e := jobs.Enqueue(newTestJobWithDelay(1, 1, 10000)) 124 | 125 | a, err := jobs.RemoveAt(e) 126 | 127 | assert.Nil(t, err, "Expect err to be nil") 128 | assert.Equalf(t, e.ID(), a.ID(), 129 | "Expect jobEntry2 to be removedAt") 130 | } 131 | 132 | func newTestJobWithDelay(id uint64, createdAt int64, delay int64) Job { 133 | return &localJob{ 134 | id: id, 135 | priority: 0, 136 | createdAt: createdAt, 137 | delay: delay, 138 | readyAt: createdAt + delay, 139 | } 140 | } 141 | 142 | func TestNewReservedJobs(t *testing.T) { 143 | rJobs := NewReservedJobs() 144 | assert.Equalf(t, rJobs.Len(), 0, "Initial job list is empty") 145 | } 146 | 147 | func TestReservedJobs_Enqueue(t *testing.T) { 148 | rJobs := NewReservedJobs() 149 | e := newTestJobWithPri(0) 150 | updateResv(t, e) 151 | 152 | a := rJobs.Enqueue(e) 153 | 154 | assert.Equalf(t, e.ID(), a.ID(), 155 | "Result job entry matched inserted job") 156 | assert.Equalf(t, 0, a.index, 157 | "Result job entry is at the head of queue") 158 | } 159 | 160 | // TestReservedJobs_Enqueue_Order 161 | // Verify Enqueue is priority ordered by expiresAt; lower expiresAt has higher precedence 162 | func TestReservedJobs_Enqueue_Order(t *testing.T) { 163 | rJobs := NewReservedJobs() 164 | e1, e2 := newTestJobWithTTR(t, 1000), newTestJobWithTTR(t, 1) 165 | 166 | a1 := rJobs.Enqueue(e1) 167 | a2 := rJobs.Enqueue(e2) 168 | 169 | assert.Equalf(t, 0, a2.index, 170 | "job entry with lower expiresAt is at the head of queue") 171 | assert.Equalf(t, 1, a1.index, 172 | "job entry with higher expiresAt is at not at head of queue") 173 | } 174 | 175 | // TestReservedJobList_Dequeue 176 | // Verify the dequeue operation is ordered by expiresAt; lower expiresAt jobEntry is de-queued first 177 | func TestReservedJobs_Dequeue(t *testing.T) { 178 | rJobs := NewReservedJobs() 179 | e1, e2 := newTestJobWithTTR(t, 1), newTestJobWithTTR(t, 100) 180 | 181 | a1 := rJobs.Enqueue(e1) 182 | a2 := rJobs.Enqueue(e2) 183 | 184 | assert.Equalf(t, a1.ID(), rJobs.Dequeue().ID(), 185 | "Expect jobEntry1 to be de-queued") 186 | assert.Equalf(t, a2.ID(), rJobs.Dequeue().ID(), 187 | "Expect jobEntry2 to be de-queued") 188 | } 189 | 190 | // TestReservedJobs_RemoveAt 191 | // Verify RemoveAt operation; i.e remove a specific jobEntry 192 | func TestReservedJobs_RemoveAt(t *testing.T) { 193 | rJobs := NewReservedJobs() 194 | rJobs.Enqueue(newTestJobWithTTR(t, 1)) 195 | e := rJobs.Enqueue(newTestJobWithTTR(t, 100)) 196 | 197 | a, err := rJobs.RemoveAt(e) 198 | 199 | assert.Nil(t, err, "Expect err to be nil") 200 | assert.Equalf(t, e.ID(), a.ID(), 201 | "Expect jobEntry2 to be removedAt") 202 | } 203 | 204 | func newTestJobWithTTR(t *testing.T, ttr int) Job { 205 | j := newTestJob(0, 0, ttr, 0, nil, TubeName("alpha")) 206 | updateResv(t, j) 207 | return j 208 | } 209 | func updateResv(t *testing.T, j Job) { 210 | now := time.Now().UTC().Unix() 211 | _, err := j.UpdateReservation(now) 212 | if err != nil { 213 | t.Fatalf("un-expected err %v", err) 214 | } 215 | } 216 | 217 | func TestNewBuriedJobs(t *testing.T) { 218 | bJobs := NewBuriedJobs() 219 | assert.Equalf(t, bJobs.Len(), 0, "Initial job list is empty") 220 | } 221 | 222 | // TestBuriedJobs_Enqueue_Order 223 | // Verify Enqueue is priority ordered by buriedAt; lower buriedAt has higher precedence 224 | func TestBuriedJobs_Enqueue_Order(t *testing.T) { 225 | bJobs := NewBuriedJobs() 226 | j0, j1, j2 := newTestJobWithBuriedAt(t, 10), 227 | newTestJobWithBuriedAt(t, 5), 228 | newTestJobWithBuriedAt(t, 20) 229 | 230 | je0 := bJobs.Enqueue(j0) 231 | je1 := bJobs.Enqueue(j1) 232 | je2 := bJobs.Enqueue(j2) 233 | 234 | assert.Equalf(t, 1, je0.index, 235 | "Result job entry is at the middle of queue") 236 | assert.Equalf(t, 0, je1.index, 237 | "Result job entry is at the head of queue") 238 | assert.Equalf(t, 2, je2.index, 239 | "Result job entry is at the end of queue") 240 | } 241 | 242 | // TestBuriedJobs_Enqueue_Order_WithEqualBuriedAt 243 | // Verify Enqueue is priority ordered by job id when buriedAt are equal; 244 | // lower job id has higher precedence 245 | func TestBuriedJobs_Enqueue_Order_WithEqualBuriedAt(t *testing.T) { 246 | bJobs := NewBuriedJobs() 247 | j0, j1 := newTestJobWithBuriedAt(t, 10), 248 | newTestJobWithBuriedAt(t, 10) 249 | 250 | je1 := bJobs.Enqueue(j1) 251 | je0 := bJobs.Enqueue(j0) 252 | 253 | assert.Equalf(t, 0, je0.index, 254 | "Result job entry is at the head of queue since its id is lower") 255 | assert.Equalf(t, 1, je1.index, 256 | "Result job entry is at the head of queue since its id is lower") 257 | } 258 | 259 | // TestBuriedJobs_Dequeue 260 | // Verify the dequeue operation is ordered by buriedAt; lower buriedAt jobEntry is de-queued first 261 | func TestBuriedJobs_Dequeue(t *testing.T) { 262 | bJobs := NewBuriedJobs() 263 | j0, j1, j2 := newTestJobWithBuriedAt(t, 10), 264 | newTestJobWithBuriedAt(t, 5), 265 | newTestJobWithBuriedAt(t, 20) 266 | 267 | bJobs.Enqueue(j0) 268 | bJobs.Enqueue(j1) 269 | bJobs.Enqueue(j2) 270 | 271 | assert.Equalf(t, j1.ID(), bJobs.Dequeue().ID(), 272 | "Expect jobEntry1 to be de-queued") 273 | assert.Equalf(t, j0.ID(), bJobs.Dequeue().ID(), 274 | "Expect jobEntry0 to be de-queued") 275 | assert.Equalf(t, j2.ID(), bJobs.Dequeue().ID(), 276 | "Expect jobEntry2 to be de-queued") 277 | } 278 | 279 | // TestBuriedJobs_RemoveAt 280 | // Verify RemoveAt operation; i.e remove a specific jobEntry 281 | func TestBuriedJobs_RemoveAt(t *testing.T) { 282 | bJobs := NewBuriedJobs() 283 | j0, j1, j2 := newTestJobWithBuriedAt(t, 10), 284 | newTestJobWithBuriedAt(t, 5), 285 | newTestJobWithBuriedAt(t, 20) 286 | 287 | je0 := bJobs.Enqueue(j0) 288 | bJobs.Enqueue(j1) 289 | bJobs.Enqueue(j2) 290 | 291 | a, err := bJobs.RemoveAt(je0) 292 | 293 | assert.Nil(t, err, "Expect err to be nil") 294 | assert.Equalf(t, je0.ID(), a.ID(), 295 | "Expect jobEntry0 to be removedAt") 296 | assert.Equalf(t, 2, bJobs.Len(), "expect length to be 2") 297 | } 298 | 299 | func newTestJobWithBuriedAt(t *testing.T, nowSeconds int64) Job { 300 | j := newTestJob(0, 0, 10, 0, nil, TubeName("alpha")) 301 | j.UpdateBuriedAt(nowSeconds) 302 | return j 303 | } 304 | -------------------------------------------------------------------------------- /state/state_string.go: -------------------------------------------------------------------------------- 1 | // Code generated by "stringer -type=JobState --output state_string.go"; DO NOT EDIT. 2 | 3 | package state 4 | 5 | import "strconv" 6 | 7 | func _() { 8 | // An "invalid array index" compiler error signifies that the constant values have changed. 9 | // Re-run the stringer command to generate them again. 10 | var x [1]struct{} 11 | _ = x[Initial-0] 12 | _ = x[Ready-1] 13 | _ = x[Reserved-2] 14 | _ = x[Buried-3] 15 | _ = x[Delayed-4] 16 | _ = x[Deleted-5] 17 | } 18 | 19 | const _JobState_name = "InitialReadyReservedBuriedDelayedDeleted" 20 | 21 | var _JobState_index = [...]uint8{0, 7, 12, 20, 26, 33, 40} 22 | 23 | func (i JobState) String() string { 24 | if i < 0 || i >= JobState(len(_JobState_index)-1) { 25 | return "JobState(" + strconv.FormatInt(int64(i), 10) + ")" 26 | } 27 | return _JobState_name[_JobState_index[i]:_JobState_index[i+1]] 28 | } 29 | -------------------------------------------------------------------------------- /store/client_uri.go: -------------------------------------------------------------------------------- 1 | package store 2 | 3 | import ( 4 | "errors" 5 | "github.com/1xyz/coolbeans/state" 6 | log "github.com/sirupsen/logrus" 7 | "net/url" 8 | "strings" 9 | ) 10 | 11 | const scheme = "client" 12 | 13 | var ( 14 | ErrInvalidURI = errors.New("invalid uri") 15 | ErrValidationFailed = errors.New("uri validation failed") 16 | ErrInvalidSchema = errors.New("uri schema did not match or not present") 17 | ) 18 | 19 | type ClientURI struct { 20 | proxyID string 21 | clientID string 22 | } 23 | 24 | func NewClientURI(proxyID string, clientID string) *ClientURI { 25 | return &ClientURI{ 26 | proxyID: proxyID, 27 | clientID: clientID, 28 | } 29 | } 30 | 31 | func ParseClientURI(clientID state.ClientID) (*ClientURI, error) { 32 | u, err := url.Parse(string(clientID)) 33 | if err != nil { 34 | log.Errorf("invalid uri. err %v", err) 35 | return nil, ErrInvalidURI 36 | } 37 | 38 | if strings.ToLower(u.Scheme) != scheme { 39 | log.Errorf("invalid schema = %v", u.Scheme) 40 | return nil, ErrInvalidSchema 41 | } 42 | 43 | q := u.Query() 44 | c := NewClientURI(q.Get("proxy"), q.Get("client")) 45 | if err := c.Validate(); err != nil { 46 | return nil, err 47 | } 48 | 49 | return c, nil 50 | } 51 | 52 | func (c *ClientURI) Validate() error { 53 | if len(c.clientID) == 0 || len(c.proxyID) == 0 { 54 | log.Errorf("invalid clientid=%v or proxyid=%v", c.clientID, c.proxyID) 55 | return ErrValidationFailed 56 | } 57 | 58 | return nil 59 | } 60 | 61 | func (c *ClientURI) ToClientID() state.ClientID { 62 | baseUrl, err := url.Parse("client://id") 63 | if err != nil { 64 | log.Panicf("malformed url %v", err) 65 | } 66 | 67 | params := url.Values{} 68 | params.Add("proxy", c.proxyID) 69 | params.Add("client", c.clientID) 70 | baseUrl.RawQuery = params.Encode() 71 | return state.ClientID(baseUrl.String()) 72 | } 73 | -------------------------------------------------------------------------------- /store/client_uri_test.go: -------------------------------------------------------------------------------- 1 | package store 2 | 3 | import ( 4 | "github.com/1xyz/coolbeans/state" 5 | "github.com/stretchr/testify/assert" 6 | "testing" 7 | ) 8 | 9 | func TestParseClientID(t *testing.T) { 10 | var entries = []struct { 11 | inputCliID state.ClientID 12 | outClientURI *ClientURI 13 | outErr error 14 | }{ 15 | {state.ClientID("client://id?proxy=alpha&client=beta"), 16 | &ClientURI{proxyID: "alpha", clientID: "beta"}, nil}, 17 | {state.ClientID("client://id?proxy=alpha&client=beta&theta=omega"), 18 | &ClientURI{proxyID: "alpha", clientID: "beta"}, nil}, 19 | {state.ClientID("client://id?proxy=urn:uuid:043a76c3-903b-45ac-bd02-b03d6298b52e&client=urn:uuid:9d9eee85-bec7-4434-9559-0b8c83380033"), 20 | &ClientURI{ 21 | proxyID: "urn:uuid:043a76c3-903b-45ac-bd02-b03d6298b52e", 22 | clientID: "urn:uuid:9d9eee85-bec7-4434-9559-0b8c83380033"}, nil}, 23 | {state.ClientID("client://id?proxy=alpha"), nil, ErrValidationFailed}, 24 | {state.ClientID("client://id"), nil, ErrValidationFailed}, 25 | {state.ClientID("client:"), nil, ErrValidationFailed}, 26 | {state.ClientID("client"), nil, ErrInvalidSchema}, 27 | {state.ClientID(":::"), nil, ErrInvalidURI}, 28 | } 29 | 30 | for _, e := range entries { 31 | c, err := ParseClientURI(e.inputCliID) 32 | assert.Equalf(t, e.outClientURI, c, "expect clientURI to match") 33 | assert.Equalf(t, e.outErr, err, "expect err to match") 34 | } 35 | } 36 | 37 | func TestClientURI_ClientID(t *testing.T) { 38 | c := NewClientURI("foo", "bar").ToClientID() 39 | assert.Equalf(t, state.ClientID("client://id?client=bar&proxy=foo"), c, "expect ClientIDs to match") 40 | } 41 | -------------------------------------------------------------------------------- /store/snapshot.go: -------------------------------------------------------------------------------- 1 | package store 2 | 3 | import ( 4 | v1 "github.com/1xyz/coolbeans/api/v1" 5 | "github.com/1xyz/coolbeans/state" 6 | "github.com/golang/protobuf/proto" 7 | "github.com/hashicorp/raft" 8 | log "github.com/sirupsen/logrus" 9 | "golang.org/x/net/context" 10 | "io" 11 | "io/ioutil" 12 | "os" 13 | "time" 14 | ) 15 | 16 | type snapshot struct { 17 | snap *v1.SnapshotProto 18 | } 19 | 20 | func NewSnapshotFrom(jsm state.JSM) (*snapshot, error) { 21 | jsmSnap, err := jsm.Snapshot() 22 | if err != nil { 23 | return nil, err 24 | } 25 | 26 | ss := newSnapshot() 27 | if err := ss.createFrom(jsmSnap); err != nil { 28 | return nil, err 29 | } 30 | 31 | return ss, nil 32 | } 33 | 34 | func RestoreSnapshotTo(rdr io.Reader, jsm state.JSM, timeout time.Duration) error { 35 | logc := log.WithField("method", "RestoreSnapshotTo") 36 | 37 | jsmSnap, err := jsm.Snapshot() 38 | if err != nil { 39 | return err 40 | } 41 | 42 | ss := newSnapshot() 43 | if err := ss.readFull(rdr); err != nil { 44 | logc.Errorf("ss.readFull. err=%v", err) 45 | return err 46 | } 47 | 48 | return ss.restoreTo(jsmSnap, timeout) 49 | } 50 | 51 | func (s *snapshot) Persist(sink raft.SnapshotSink) error { 52 | logc := log.WithField("method", "snapshot.Persist") 53 | snkID := sink.ID() 54 | err := func(sinkID string) error { 55 | bytes, err := proto.Marshal(s.snap) 56 | if err != nil { 57 | return err 58 | } 59 | 60 | // Write data to sink. 61 | if n, err := sink.Write(bytes); err != nil { 62 | return err 63 | } else { 64 | logc.Debugf("sinkID=%v, wrote %v bytes to sink", sinkID, n) 65 | } 66 | 67 | // Close the sink. 68 | return sink.Close() 69 | }(snkID) 70 | 71 | if err != nil { 72 | logc.Errorf("marshal. err=%v", err) 73 | if err := sink.Cancel(); err != nil { 74 | logc.Errorf("sink.cancel. err=%v", err) 75 | } 76 | } 77 | 78 | return err 79 | } 80 | 81 | func (s *snapshot) Release() { 82 | log.Info("Release called") 83 | } 84 | 85 | func newSnapshot() *snapshot { 86 | return &snapshot{ 87 | snap: &v1.SnapshotProto{ 88 | Jobs: make([]*v1.JobProto, 0), 89 | Reservations: make([]*v1.ClientResvEntryProto, 0), 90 | }} 91 | } 92 | 93 | func (s *snapshot) readFull(r io.Reader) error { 94 | bytes, err := ioutil.ReadAll(r) 95 | if err != nil { 96 | return err 97 | } 98 | 99 | return proto.Unmarshal(bytes, s.snap) 100 | } 101 | 102 | func (s *snapshot) appendClientRsvEntry(cli *state.ClientResvEntry) { 103 | cliProto := &v1.ClientResvEntryProto{ 104 | ClientId: string(cli.CliID), 105 | ResvDeadlineAt: cli.ResvDeadlineAt, 106 | IsWaitingForResv: cli.IsWaitingForResv, 107 | TickAt: cli.TickAt, 108 | ReqId: cli.ReqID, 109 | HeapIndex: int32(cli.HeapIndex), 110 | WatchedTube: make([]string, 0), 111 | } 112 | 113 | for _, t := range cli.WatchedTubes { 114 | cliProto.WatchedTube = append(cliProto.WatchedTube, string(t)) 115 | } 116 | 117 | s.snap.Reservations = append(s.snap.Reservations, cliProto) 118 | } 119 | 120 | func (s *snapshot) appendJob(j state.Job) { 121 | s.snap.Jobs = append(s.snap.Jobs, JobToJobProto(j)) 122 | } 123 | 124 | func JobToJobProto(j state.Job) *v1.JobProto { 125 | return &v1.JobProto{ 126 | Id: int64(j.ID()), 127 | Priority: j.Priority(), 128 | Delay: j.Delay(), 129 | Ttr: int32(j.TTR()), 130 | TubeName: string(j.TubeName()), 131 | CreatedAt: j.CreatedAt(), 132 | ReadyAt: j.ReadyAt(), 133 | ExpiresAt: j.ExpiresAt(), 134 | State: v1.JobStateProto(j.State()), 135 | ReservedBy: string(j.ReservedBy()), 136 | BodySize: int32(j.BodySize()), 137 | Body: j.Body(), 138 | BuriedAt: j.BuriedAt(), 139 | 140 | ReserveCount: j.ReserveCount(), 141 | TimeoutCount: j.TimeoutCount(), 142 | ReleaseCount: j.ReleaseCount(), 143 | BuryCount: j.BuryCount(), 144 | KickCount: j.KickCount(), 145 | } 146 | } 147 | 148 | func (s *snapshot) ReadFromFile(filename string) error { 149 | f, err := os.Open(filename) 150 | if err != nil { 151 | return err 152 | } 153 | defer f.Close() 154 | return s.readFull(f) 155 | } 156 | 157 | func (s *snapshot) createFrom(jsmSnap state.JSMSnapshot) error { 158 | logc := log.WithField("method", "snapshot.createFrom") 159 | 160 | cliEntries, err := jsmSnap.SnapshotClients() 161 | if err != nil { 162 | logc.Errorf("jsmSnap.SnapshotClients. err=%v", err) 163 | return err 164 | } 165 | 166 | for cli := range cliEntries { 167 | s.appendClientRsvEntry(cli) 168 | } 169 | 170 | jobs, err := jsmSnap.SnapshotJobs() 171 | if err != nil { 172 | logc.Errorf("jsmSnap.SnapshotJobs. err=%v", err) 173 | return err 174 | } 175 | 176 | for job := range jobs { 177 | s.appendJob(job) 178 | } 179 | 180 | return nil 181 | } 182 | 183 | func (s *snapshot) restoreTo(jsmSnap state.JSMSnapshot, timeout time.Duration) error { 184 | logc := log.WithField("method", "snapshot.restoreTo") 185 | 186 | if err := s.restoreCliRevEntries(jsmSnap, timeout); err != nil { 187 | logc.Errorf("s.restoreCliRevEntries. err=%v", err) 188 | return err 189 | } 190 | 191 | if err := s.restoreJobs(jsmSnap, timeout); err != nil { 192 | logc.Errorf("s.restoreJobs. err=%v", err) 193 | return err 194 | } 195 | 196 | jsmSnap.FinalizeRestore() 197 | return nil 198 | } 199 | 200 | func (s *snapshot) restoreCliRevEntries(jsmSnap state.JSMSnapshot, timeout time.Duration) error { 201 | logc := log.WithField("method", "snapshot.restoreCliRevEntries") 202 | cliCh := make(chan *state.ClientResvEntry) 203 | errCliCh := make(chan error) 204 | go func(restoreCh <-chan *state.ClientResvEntry, errCh chan<- error) { 205 | defer close(errCh) 206 | ctx, cancel := context.WithTimeout(context.Background(), timeout) 207 | defer cancel() 208 | n := len(s.snap.Reservations) 209 | logc.Debugf("restore n=%v clients to job state machine", n) 210 | errCh <- jsmSnap.RestoreClients(ctx, n, cliCh) 211 | }(cliCh, errCliCh) 212 | 213 | for _, r := range s.snap.Reservations { 214 | cliCh <- &state.ClientResvEntry{ 215 | CliID: state.ClientID(r.ClientId), 216 | WatchedTubes: []state.TubeName{}, 217 | ResvDeadlineAt: r.ResvDeadlineAt, 218 | IsWaitingForResv: r.IsWaitingForResv, 219 | TickAt: r.TickAt, 220 | ReqID: r.ReqId, 221 | HeapIndex: int(r.HeapIndex), 222 | } 223 | } 224 | 225 | close(cliCh) 226 | err := <-errCliCh 227 | return err 228 | } 229 | 230 | func (s *snapshot) restoreJobs(jsmSnap state.JSMSnapshot, timeout time.Duration) error { 231 | logc := log.WithField("method", "snapshot.restoreJobs") 232 | jobCh := make(chan state.Job) 233 | errCh := make(chan error) 234 | 235 | go func(restoreCh <-chan state.Job, errCh chan<- error) { 236 | defer close(errCh) 237 | ctx, cancel := context.WithTimeout(context.Background(), timeout) 238 | defer cancel() 239 | logc.Debugf("restore jobs to job state machine") 240 | errCh <- jsmSnap.RestoreJobs(ctx, restoreCh) 241 | }(jobCh, errCh) 242 | 243 | for _, job := range s.snap.Jobs { 244 | jobCh <- NewJobFromJobProto(job) 245 | } 246 | 247 | close(jobCh) 248 | err := <-errCh 249 | return err 250 | } 251 | 252 | type wrapJob struct { 253 | jp *v1.JobProto 254 | } 255 | 256 | func NewJobFromJobProto(jp *v1.JobProto) state.Job { 257 | return &wrapJob{jp: jp} 258 | } 259 | 260 | func (j *wrapJob) ID() state.JobID { 261 | return state.JobID(j.jp.Id) 262 | } 263 | func (j *wrapJob) Priority() uint32 { 264 | return j.jp.Priority 265 | } 266 | func (j *wrapJob) UpdatePriority(newPriority uint32) uint32 { 267 | j.jp.Priority = newPriority 268 | return j.jp.Priority 269 | } 270 | func (j *wrapJob) Delay() int64 { 271 | return j.jp.Delay 272 | } 273 | func (j *wrapJob) UpdateDelay(newDelay int64) int64 { 274 | j.jp.Delay = newDelay 275 | return j.jp.Delay 276 | } 277 | func (j *wrapJob) TTR() int { 278 | return int(j.jp.Ttr) 279 | } 280 | func (j *wrapJob) BodySize() int { 281 | return int(j.jp.BodySize) 282 | } 283 | func (j *wrapJob) Body() []byte { 284 | return j.jp.Body 285 | } 286 | func (j *wrapJob) TubeName() state.TubeName { 287 | return state.TubeName(j.jp.TubeName) 288 | } 289 | func (j *wrapJob) CreatedAt() int64 { 290 | return j.jp.CreatedAt 291 | } 292 | func (j *wrapJob) ReadyAt() int64 { 293 | return j.jp.ReadyAt 294 | } 295 | func (j *wrapJob) UpdateReadyAt(nowSeconds int64) (int64, error) { 296 | j.jp.ReadyAt = nowSeconds + j.jp.Delay 297 | return j.jp.ReadyAt, nil 298 | } 299 | func (j *wrapJob) State() state.JobState { 300 | return state.JobState(j.jp.State) 301 | } 302 | func (j *wrapJob) UpdateState(newState state.JobState) { 303 | j.jp.State = v1.JobStateProto(newState) 304 | } 305 | func (j *wrapJob) UpdateReservedBy(clientID state.ClientID) { 306 | j.jp.ReservedBy = string(clientID) 307 | } 308 | func (j *wrapJob) ExpiresAt() int64 { 309 | return j.jp.ExpiresAt 310 | } 311 | func (j *wrapJob) ReservedBy() state.ClientID { 312 | return state.ClientID(j.jp.ReservedBy) 313 | } 314 | func (j *wrapJob) UpdateReservation(nowSeconds int64) (int64, error) { 315 | j.jp.ExpiresAt = nowSeconds + int64(j.jp.Ttr) 316 | return j.jp.ExpiresAt, nil 317 | } 318 | func (j *wrapJob) ResetBuriedAt() { 319 | j.jp.BuriedAt = 0 320 | } 321 | func (j *wrapJob) UpdateBuriedAt(nowSeconds int64) int64 { 322 | j.jp.BuriedAt = nowSeconds 323 | return j.jp.BuriedAt 324 | } 325 | func (j *wrapJob) BuriedAt() int64 { 326 | return j.jp.BuriedAt 327 | } 328 | func (j *wrapJob) ReserveCount() uint32 { 329 | return j.jp.GetReserveCount() 330 | } 331 | func (j *wrapJob) IncReserveCount() { 332 | j.jp.ReserveCount++ 333 | } 334 | func (j *wrapJob) TimeoutCount() uint32 { 335 | return j.jp.GetTimeoutCount() 336 | } 337 | func (j *wrapJob) IncTimeoutCount() { 338 | j.jp.TimeoutCount++ 339 | } 340 | func (j *wrapJob) ReleaseCount() uint32 { 341 | return j.jp.GetReleaseCount() 342 | } 343 | func (j *wrapJob) IncReleaseCount() { 344 | j.jp.ReleaseCount++ 345 | } 346 | func (j *wrapJob) BuryCount() uint32 { 347 | return j.jp.GetBuryCount() 348 | } 349 | func (j *wrapJob) IncBuryCount() { 350 | j.jp.BuryCount++ 351 | } 352 | func (j *wrapJob) KickCount() uint32 { 353 | return j.jp.GetKickCount() 354 | } 355 | func (j *wrapJob) IncKickCount() { 356 | j.jp.KickCount++ 357 | } 358 | -------------------------------------------------------------------------------- /store/snapshot_test.go: -------------------------------------------------------------------------------- 1 | package store 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "github.com/1xyz/coolbeans/state" 7 | "github.com/stretchr/testify/assert" 8 | "testing" 9 | "time" 10 | ) 11 | 12 | func TestNewSnapshotFrom_EmptyJsm(t *testing.T) { 13 | jsm, err := state.NewJSM() 14 | if err != nil { 15 | t.Fatalf("err state.NewJSM %v", err) 16 | } 17 | 18 | ss, err := NewSnapshotFrom(jsm) 19 | assert.Nilf(t, err, "expect err to be nil") 20 | assert.NotNilf(t, ss, "expect snapshot to not be nil") 21 | assert.Equalf(t, 0, len(ss.snap.Jobs), "expect zero jobs to be snapshot") 22 | assert.Equalf(t, 0, len(ss.snap.Reservations), "expect zero reservations to be snapshot") 23 | } 24 | 25 | func TestNewSnapshotFrom(t *testing.T) { 26 | jsm, _ := state.NewJSM() 27 | resv := putNClientResvEntries(t, jsm, 5) 28 | jobs := putNJobs(t, jsm, 7) 29 | 30 | ss, err := NewSnapshotFrom(jsm) 31 | assert.Nilf(t, err, "expect err to be nil") 32 | assert.NotNilf(t, ss, "expect snapshot to not be nil") 33 | assert.Equalf(t, len(jobs), len(ss.snap.Jobs), "expect zero jobs to be snapshot") 34 | assert.Equalf(t, len(resv), len(ss.snap.Reservations), "expect zero reservations to be snapshot") 35 | } 36 | 37 | func TestPersist(t *testing.T) { 38 | jsm, _ := state.NewJSM() 39 | putNClientResvEntries(t, jsm, 5) 40 | putNJobs(t, jsm, 7) 41 | 42 | ss, _ := NewSnapshotFrom(jsm) 43 | sink := newTestSink() 44 | err := ss.Persist(sink) 45 | assert.Nilf(t, err, "expect err to be nil") 46 | assert.Greaterf(t, len(sink.Bytes()), 0, "expect byte len > 0") 47 | } 48 | 49 | func TestRestoreSnapshotTo(t *testing.T) { 50 | jsm, _ := state.NewJSM() 51 | resv := putNClientResvEntries(t, jsm, 5) 52 | jobIDs := putNJobs(t, jsm, 7) 53 | 54 | ss, _ := NewSnapshotFrom(jsm) 55 | sink := newTestSink() 56 | ss.Persist(sink) 57 | 58 | // restore the snapshot to another state machine 59 | jsm2, _ := state.NewJSM() 60 | err := RestoreSnapshotTo(sink, jsm2, time.Second*30) 61 | assert.Nilf(t, err, "expect err to be nil") 62 | 63 | jsmSnap, _ := jsm2.Snapshot() 64 | // Verify if the jobIDs match 65 | if jobCh, err := jsmSnap.SnapshotJobs(); err != nil { 66 | t.Fatalf("jsmSnap.SnapshotJobs err=%v", err) 67 | } else { 68 | count := 0 69 | for range jobCh { 70 | count++ 71 | } 72 | assert.Equalf(t, len(jobIDs), count, "expect job count to be %v", len(jobIDs)) 73 | } 74 | 75 | // Verif if the clientIDs match 76 | if cliCh, err := jsmSnap.SnapshotClients(); err != nil { 77 | t.Fatalf("jsmSnap.SnapshotClients err=%v", err) 78 | } else { 79 | count := 0 80 | for range cliCh { 81 | count++ 82 | } 83 | assert.Equalf(t, len(resv), count, "expect resv count to be %v", len(resv)) 84 | } 85 | 86 | } 87 | 88 | func putNClientResvEntries(t *testing.T, jsm state.JSM, n int) []*state.Reservation { 89 | reqID := "bar" 90 | testResv := make([]*state.Reservation, 0) 91 | now := testNowSecs() 92 | deadlineAt := now + 100 93 | for i := 0; i < n; i++ { 94 | cliID := state.ClientID(fmt.Sprintf("foo-%d", i)) 95 | if r, err := jsm.AppendReservation(cliID, reqID, []state.TubeName{"foo"}, now, deadlineAt); err != nil { 96 | t.Fatalf("error in jsm.AppendReservation. err=%v", err) 97 | } else { 98 | testResv = append(testResv, r) 99 | } 100 | } 101 | 102 | return testResv 103 | } 104 | 105 | func putNJobs(t *testing.T, jsm state.JSM, n int) []state.JobID { 106 | now := testNowSecs() 107 | body := "hello world" 108 | testJobs := make([]state.JobID, 0) 109 | for i := 0; i < n; i++ { 110 | jobID, err := jsm.Put(now, uint32(i), 0, 10, len(body), []byte(body), state.TubeName("foo")) 111 | if err != nil { 112 | t.Fatalf("error in jsm.Put. err=%v", err) 113 | } else { 114 | testJobs = append(testJobs, jobID) 115 | } 116 | } 117 | 118 | return testJobs 119 | } 120 | 121 | func testNowSecs() int64 { 122 | return time.Now().UTC().Unix() 123 | } 124 | 125 | type testSink struct { 126 | bytes.Buffer 127 | } 128 | 129 | func newTestSink() *testSink { 130 | return &testSink{ 131 | bytes.Buffer{}, 132 | } 133 | } 134 | 135 | func (t *testSink) Close() error { 136 | return nil 137 | } 138 | 139 | func (t *testSink) ID() string { 140 | return "id" 141 | } 142 | 143 | func (t *testSink) Cancel() error { 144 | return nil 145 | } 146 | -------------------------------------------------------------------------------- /tools/opts.go: -------------------------------------------------------------------------------- 1 | package tools 2 | 3 | import ( 4 | "github.com/docopt/docopt-go" 5 | log "github.com/sirupsen/logrus" 6 | "time" 7 | ) 8 | 9 | func OptsBool(opts docopt.Opts, key string) bool { 10 | v, err := opts.Bool(key) 11 | if err != nil { 12 | log.Fatalf("OptsBool: %v parse err = %v", key, err) 13 | } 14 | return v 15 | } 16 | 17 | func OptsStr(opts docopt.Opts, key string) string { 18 | v, err := opts.String(key) 19 | if err != nil { 20 | log.Fatalf("OptsStr: %v parse err = %v", key, err) 21 | } 22 | return v 23 | } 24 | 25 | func OptsInt(opts docopt.Opts, key string) int { 26 | v, err := opts.Int(key) 27 | if err != nil { 28 | log.Fatalf("OptsInt: %v parse err = %v", key, err) 29 | } 30 | return v 31 | } 32 | 33 | func OptsSeconds(opts docopt.Opts, key string) time.Duration { 34 | v := OptsInt(opts, key) 35 | return time.Duration(v) * time.Second 36 | } 37 | -------------------------------------------------------------------------------- /tools/tools.go: -------------------------------------------------------------------------------- 1 | // +build tools 2 | 3 | package tools 4 | 5 | import ( 6 | _ "github.com/ory/go-acc" 7 | _ "golang.org/x/tools/cmd/stringer" 8 | ) 9 | --------------------------------------------------------------------------------