├── .devcontainer
└── devcontainer.json
├── .dockerignore
├── .gitattributes
├── .github
├── CODEOWNERS
├── FUNDING.yml
├── ISSUE_TEMPLATE
│ ├── bug_report.md
│ ├── instance-provider-request.md
│ └── reverse-proxy-integration-request.md
├── dependabot.yml
├── labeler.yml
└── workflows
│ ├── build.yml
│ ├── docker.yaml
│ ├── dockerhub-description.yml
│ ├── label.yml
│ ├── plugins.yml
│ └── release.yml
├── .gitignore
├── .golangci.yml
├── .testcontainers.properties
├── .traefik.yml
├── CODE_OF_CONDUCT.md
├── Dockerfile
├── LICENSE
├── Makefile
├── README.md
├── cmd
├── healthcheck
│ └── healthcheck.go
├── sablier
│ ├── cmd.go
│ ├── cmd_test.go
│ ├── logger.go
│ ├── provider.go
│ ├── sablier.go
│ ├── testdata
│ │ ├── config.env
│ │ ├── config.yml
│ │ ├── config_cli_wanted.json
│ │ ├── config_default.json
│ │ ├── config_env_wanted.json
│ │ └── config_yaml_wanted.json
│ └── theme.go
└── version
│ └── version.go
├── docker
└── sablier.yaml
├── docs
├── .nojekyll
├── CNAME
├── README.md
├── _sidebar.md
├── assets
│ └── img
│ │ ├── apacheapisix.png
│ │ ├── banner.png
│ │ ├── caddy.png
│ │ ├── demo.gif
│ │ ├── docker.svg
│ │ ├── docker_swarm.png
│ │ ├── envoy.png
│ │ ├── favicon.ico
│ │ ├── ghost.png
│ │ ├── github.svg
│ │ ├── hacker-terminal.png
│ │ ├── icon.png
│ │ ├── integration.png
│ │ ├── istio.png
│ │ ├── kubernetes.png
│ │ ├── matrix.png
│ │ ├── nginx.svg
│ │ ├── reverse-proxy-integration.png
│ │ ├── shuffle.png
│ │ └── traefik.png
├── configuration.md
├── getting-started.md
├── guides
│ ├── code-server-traefik-kubernetes.md
│ └── overview.md
├── health.md
├── index.html
├── installation.md
├── plugins
│ ├── apacheapisix.md
│ ├── caddy.md
│ ├── envoy.md
│ ├── istio.md
│ ├── nginx.md
│ ├── nginx_proxywasm.md
│ ├── overview.md
│ └── traefik.md
├── providers
│ ├── docker.md
│ ├── docker_swarm.md
│ ├── kubernetes.md
│ └── overview.md
├── strategies.md
├── themes.md
└── versioning.md
├── e2e
└── e2e_test.go
├── go.mod
├── go.sum
├── go.work
├── go.work.sum
├── internal
├── api
│ ├── abort.go
│ ├── api.go
│ ├── api_response_headers.go
│ ├── api_test.go
│ ├── apitest
│ │ └── mocks_sablier.go
│ ├── health.go
│ ├── problemdetail.go
│ ├── start_blocking.go
│ ├── start_blocking_test.go
│ ├── start_dynamic.go
│ ├── start_dynamic_test.go
│ └── theme_list.go
└── server
│ ├── logging.go
│ ├── routes.go
│ └── server.go
├── pkg
├── config
│ ├── configuration.go
│ ├── logging.go
│ ├── provider.go
│ ├── server.go
│ ├── sessions.go
│ ├── storage.go
│ └── strategy.go
├── durations
│ ├── duration.go
│ └── humanize.go
├── provider
│ ├── docker
│ │ ├── container_inspect.go
│ │ ├── container_inspect_test.go
│ │ ├── container_list.go
│ │ ├── container_list_test.go
│ │ ├── container_start.go
│ │ ├── container_start_test.go
│ │ ├── container_stop.go
│ │ ├── container_stop_test.go
│ │ ├── docker.go
│ │ ├── events.go
│ │ ├── events_test.go
│ │ └── testcontainers_test.go
│ ├── dockerswarm
│ │ ├── docker_swarm.go
│ │ ├── events.go
│ │ ├── events_test.go
│ │ ├── service_inspect.go
│ │ ├── service_inspect_test.go
│ │ ├── service_list.go
│ │ ├── service_list_test.go
│ │ ├── service_start.go
│ │ ├── service_start_test.go
│ │ ├── service_stop.go
│ │ ├── service_stop_test.go
│ │ └── testcontainers_test.go
│ ├── kubernetes
│ │ ├── deployment_events.go
│ │ ├── deployment_inspect.go
│ │ ├── deployment_inspect_test.go
│ │ ├── deployment_list.go
│ │ ├── instance_events.go
│ │ ├── instance_events_test.go
│ │ ├── instance_inspect.go
│ │ ├── instance_inspect_test.go
│ │ ├── instance_list.go
│ │ ├── instance_list_test.go
│ │ ├── instance_start.go
│ │ ├── instance_start_test.go
│ │ ├── instance_stop.go
│ │ ├── instance_stop_test.go
│ │ ├── kubernetes.go
│ │ ├── parse_name.go
│ │ ├── parse_name_test.go
│ │ ├── statefulset_events.go
│ │ ├── statefulset_inspect.go
│ │ ├── statefulset_inspect_test.go
│ │ ├── statefulset_list.go
│ │ ├── testcontainers_test.go
│ │ └── workload_scale.go
│ ├── providertest
│ │ └── mock_provider.go
│ └── types.go
├── sablier
│ ├── autostop.go
│ ├── autostop_test.go
│ ├── errors.go
│ ├── group_watch.go
│ ├── instance.go
│ ├── instance_expired.go
│ ├── instance_request.go
│ ├── provider.go
│ ├── sablier.go
│ ├── sablier_test.go
│ ├── session.go
│ ├── session_request.go
│ ├── session_request_test.go
│ └── store.go
├── store
│ ├── inmemory
│ │ ├── inmemory.go
│ │ └── inmemory_test.go
│ ├── store.go
│ ├── storetest
│ │ └── mocks_store.go
│ └── valkey
│ │ ├── valkey.go
│ │ └── valkey_test.go
├── theme
│ ├── embedded
│ │ ├── ghost.html
│ │ ├── hacker-terminal.html
│ │ ├── matrix.html
│ │ └── shuffle.html
│ ├── errors.go
│ ├── list.go
│ ├── list_test.go
│ ├── parse.go
│ ├── render.go
│ ├── render_test.go
│ ├── theme.go
│ └── types.go
├── tinykv
│ ├── heap.go
│ ├── retry.go
│ ├── timeout_heap.go
│ ├── tinykv.go
│ └── tinykv_test.go
└── version
│ └── info.go
├── plugins
├── caddy
│ ├── .gitignore
│ ├── Dockerfile
│ ├── README.md
│ ├── config.go
│ ├── config_test.go
│ ├── e2e
│ │ ├── docker
│ │ │ ├── Caddyfile
│ │ │ ├── docker-compose.yml
│ │ │ └── run.sh
│ │ ├── docker_swarm
│ │ │ ├── Caddyfile
│ │ │ ├── docker-stack.yml
│ │ │ └── run.sh
│ │ └── kubernetes
│ │ │ ├── Caddyfile
│ │ │ ├── docker-kubernetes.yml
│ │ │ ├── run.sh
│ │ │ └── values.yaml
│ ├── go.mod
│ ├── go.sum
│ ├── main.go
│ ├── main_test.go
│ └── remote.Dockerfile
├── nginx
│ ├── README.md
│ ├── e2e
│ │ ├── docker
│ │ │ ├── docker-compose.yml
│ │ │ ├── nginx.conf
│ │ │ └── run.sh
│ │ ├── docker_swarm
│ │ │ ├── docker-stack.yml
│ │ │ ├── nginx.conf
│ │ │ └── run.sh
│ │ ├── kubernetes
│ │ │ ├── docker-kubernetes.yml
│ │ │ ├── manifests
│ │ │ │ ├── deployment.yml
│ │ │ │ └── sablier.yml
│ │ │ └── run.sh
│ │ └── nginx.conf
│ └── njs
│ │ └── sablier.js
├── proxywasm
│ ├── BlockingConfiguration_json.go
│ ├── Config_json.go
│ ├── Dockerfile
│ ├── DynamicConfiguration_json.go
│ ├── Makefile
│ ├── README.md
│ ├── e2e
│ │ ├── apacheapisix
│ │ │ ├── README.md
│ │ │ └── docker
│ │ │ │ ├── apisix.yaml
│ │ │ │ ├── compose.yaml
│ │ │ │ ├── config.yaml
│ │ │ │ └── run.sh
│ │ ├── envoy
│ │ │ └── docker
│ │ │ │ ├── compose.yaml
│ │ │ │ ├── envoy.yaml
│ │ │ │ └── run.sh
│ │ ├── istio
│ │ │ └── kubernetes
│ │ │ │ ├── README.md
│ │ │ │ ├── compose.yaml
│ │ │ │ ├── istio-gateway-values.yaml
│ │ │ │ ├── manifests
│ │ │ │ ├── ingressgateway.yaml
│ │ │ │ ├── nginx.yml
│ │ │ │ ├── sablier.yml
│ │ │ │ ├── wasmplugin.yaml
│ │ │ │ └── whoami.yml
│ │ │ │ └── run.sh
│ │ └── nginx
│ │ │ ├── Dockerfile
│ │ │ └── docker
│ │ │ ├── compose.yaml
│ │ │ ├── nginx.conf
│ │ │ └── run.sh
│ ├── go.mod
│ ├── go.sum
│ ├── main.go
│ └── main_test.go
└── traefik
│ ├── .gitignore
│ ├── README.md
│ ├── config.go
│ ├── config_test.go
│ ├── e2e
│ ├── docker
│ │ ├── docker-compose.yml
│ │ ├── dynamic-config.yml
│ │ └── run.sh
│ ├── docker_swarm
│ │ ├── docker-stack.yml
│ │ └── run.sh
│ └── kubernetes
│ │ ├── docker-kubernetes.yml
│ │ ├── manifests
│ │ ├── deployment.yml
│ │ └── sablier.yml
│ │ ├── run.sh
│ │ └── values.yaml
│ ├── go.mod
│ ├── go.sum
│ ├── main.go
│ └── main_test.go
├── release.config.js
└── sablier.sample.yaml
/.devcontainer/devcontainer.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "Go",
3 | "image": "mcr.microsoft.com/devcontainers/go:1.24",
4 | "features": {
5 | "ghcr.io/devcontainers/features/node:1": {
6 | "version": "lts"
7 | },
8 | "ghcr.io/devcontainers/features/docker-in-docker:2": {},
9 | "ghcr.io/devcontainers/features/git:1": {},
10 | "ghcr.io/devcontainers/features/go:1": {}
11 | },
12 |
13 | // Configure tool-specific properties.
14 | "customizations": {
15 | // Configure properties specific to VS Code.
16 | "vscode": {
17 | // Set *default* container specific settings.json values on container create.
18 | "settings": {
19 | "go.toolsManagement.checkForUpdates": "local",
20 | "go.useLanguageServer": true,
21 | "go.gopath": "/go"
22 | }
23 | }
24 | }
25 |
26 | // Use 'forwardPorts' to make a list of ports inside the container available locally.
27 | // "forwardPorts": [],
28 |
29 | // Use 'postCreateCommand' to run commands after the container is created.
30 | // "postCreateCommand": "go version",
31 |
32 | // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
33 | // "remoteUser": "root"
34 | }
35 |
--------------------------------------------------------------------------------
/.dockerignore:
--------------------------------------------------------------------------------
1 | bin/*
--------------------------------------------------------------------------------
/.gitattributes:
--------------------------------------------------------------------------------
1 | * text=auto eol=lf
--------------------------------------------------------------------------------
/.github/CODEOWNERS:
--------------------------------------------------------------------------------
1 | * @acouvreur
--------------------------------------------------------------------------------
/.github/FUNDING.yml:
--------------------------------------------------------------------------------
1 | # These are supported funding model platforms
2 |
3 | github: acouvreur
4 | patreon: # Replace with a single Patreon username
5 | open_collective: # Replace with a single Open Collective username
6 | ko_fi: # Replace with a single Ko-fi username
7 | tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
8 | community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
9 | liberapay: # Replace with a single Liberapay username
10 | issuehunt: # Replace with a single IssueHunt username
11 | lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry
12 | polar: # Replace with a single Polar username
13 | buy_me_a_coffee: # Replace with a single Buy Me a Coffee username
14 | custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
15 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Create a report to help us improve
4 | title: ''
5 | labels: bug
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Describe the bug**
11 | A clear and concise description of what the bug is.
12 |
13 | **Context**
14 | - Sablier version: [e.g. 1.6.0]
15 | - Provider: [e.g. docker v20.10.21]
16 | - Reverse proxy: [e.g. traefik v2.8.5]
17 | - Sablier running inside a container?
18 |
19 | **Expected behavior**
20 | A clear and concise description of what you expected to happen.
21 |
22 | **Additional context**
23 | Add any other context about the problem here.
24 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/instance-provider-request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Instance provider request
3 | about: Suggest support for an instance provider
4 | title: Add `[PROVIDER]` provider
5 | labels: enhancement, provider
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Describe the provider**
11 | A clear and concise description of the provider.
12 |
13 | **Does the provider provide the following APIs**
14 | *An instance is a generic term used to describe containers, pods, deployments...*
15 |
16 | | API | Yes | No | I don't know |
17 | | ---- | ---- | --- | -----------|
18 | | Start an instance | | |
19 | | Stop an instance | | |
20 | | Get info about an instance | | |
21 | | Listening for external events happening |||
22 |
23 | **Does the provider have a Go API SDK?**
24 |
25 | If yes link it here
26 |
27 | **Can the provider be started locally**
28 | Providers such as Cloud provider may provide some local API tools.
29 | Docker can be started locally, Kubernetes has tools such as k3s to facilitate the integration.
30 |
31 | **Additional context**
32 | Add any other context about the feature request here.
33 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/reverse-proxy-integration-request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Reverse proxy integration request
3 | about: Suggest a reverse proxy integration for this project
4 | title: Add `[REVERSE PROXY]` reverse proxy integration
5 | labels: enhancement, reverse-proxy
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Describe the reverse proxy you'd like**
11 | A clear and concise description of the reverse proxy.
12 |
13 | **Does the reverse proxy provide middleware/module/extension support**
14 | Links to documentations and examples that shows the extensibility of the reverse proxy.
15 | For traefik it's http middleware, for nginx it's modules etc.
16 |
17 | **Additional context**
18 | Add any other context about the feature request here.
19 |
--------------------------------------------------------------------------------
/.github/dependabot.yml:
--------------------------------------------------------------------------------
1 | version: 2
2 | updates:
3 | - package-ecosystem: "gomod"
4 | directories:
5 | - "/"
6 | - "/plugins/caddy"
7 | - "/plugins/proxywasm"
8 | - "/plugins/traefik"
9 | schedule:
10 | interval: "daily"
11 | groups:
12 | testcontainers-go:
13 | patterns:
14 | - "github.com/testcontainers/testcontainers-go"
15 | - "github.com/testcontainers/testcontainers-go/modules/*"
16 | k8s.io:
17 | patterns:
18 | - "k8s.io/api"
19 | - "k8s.io/apiextensions-apiserver"
20 | - "k8s.io/apimachinery"
21 | - "k8s.io/apiserver"
22 | - "k8s.io/cli-runtime"
23 | - "k8s.io/client-go"
24 | - "k8s.io/kubectl"
25 | assignees:
26 | - "acouvreur"
27 |
28 | - package-ecosystem: "github-actions"
29 | directory: "/"
30 | schedule:
31 | interval: "daily"
32 | assignees:
33 | - "acouvreur"
34 |
35 | # Enable version updates for Docker
36 | - package-ecosystem: "docker"
37 | # Look for a `Dockerfile` in the `root` directory
38 | directories:
39 | - "**/*"
40 | # Check for updates once a week
41 | schedule:
42 | interval: "weekly"
43 | assignees:
44 | - "acouvreur"
45 |
--------------------------------------------------------------------------------
/.github/labeler.yml:
--------------------------------------------------------------------------------
1 | documentation:
2 | - changed-files:
3 | - any-glob-to-any-file: 'docs/**'
4 |
5 | reverse-proxy:
6 | - changed-files:
7 | - any-glob-to-any-file: 'plugins/**'
8 |
9 | provider:
10 | - changed-files:
11 | - any-glob-to-any-file: 'pkg/provider/**'
12 |
13 | ci:
14 | - changed-files:
15 | - any-glob-to-any-file: '.github/**'
--------------------------------------------------------------------------------
/.github/workflows/dockerhub-description.yml:
--------------------------------------------------------------------------------
1 | name: Update Docker Hub Description
2 | on: release
3 |
4 | jobs:
5 | dockerHubDescription:
6 | runs-on: ubuntu-latest
7 | steps:
8 | - uses: actions/checkout@v4
9 |
10 | - name: Docker Hub Description
11 | uses: peter-evans/dockerhub-description@v4
12 | with:
13 | username: ${{ secrets.DOCKER_USERNAME }}
14 | password: ${{ secrets.DOCKER_PASSWORD }}
15 | short-description: ${{ github.event.repository.description }}
--------------------------------------------------------------------------------
/.github/workflows/label.yml:
--------------------------------------------------------------------------------
1 | # This workflow will triage pull requests and apply a label based on the
2 | # paths that are modified in the pull request.
3 | #
4 | # To use this workflow, you will need to set up a .github/labeler.yml
5 | # file with configuration. For more information, see:
6 | # https://github.com/actions/labeler
7 |
8 | name: Labeler
9 | on: [pull_request]
10 |
11 | jobs:
12 | label:
13 |
14 | runs-on: ubuntu-latest
15 | permissions:
16 | contents: read
17 | pull-requests: write
18 |
19 | steps:
20 | - uses: actions/labeler@v5
21 | with:
22 | repo-token: "${{ secrets.GITHUB_TOKEN }}"
--------------------------------------------------------------------------------
/.github/workflows/release.yml:
--------------------------------------------------------------------------------
1 | name: Release
2 | on:
3 | push:
4 | branches:
5 | - main
6 | - beta
7 |
8 | jobs:
9 | release:
10 | name: Release
11 | runs-on: ubuntu-latest
12 | steps:
13 | - name: Checkout
14 | uses: actions/checkout@v4
15 | with:
16 | fetch-depth: 0
17 | persist-credentials: false
18 |
19 | - name: Set up Go 1.24
20 | uses: actions/setup-go@v5
21 | with:
22 | go-version: ^1.24
23 |
24 | - name: Setup Node.js
25 | uses: actions/setup-node@v4
26 | with:
27 | node-version: '20'
28 |
29 | - name: Release
30 | env:
31 | GITHUB_TOKEN: ${{ secrets.GH_TOKEN }}
32 | run: |
33 | npm i -G semantic-release@~23.0.0 @semantic-release/exec@~6.0.3 @semantic-release/git@~10.0.1
34 | npx semantic-release
35 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | sablier.yaml
2 | ./plugins/traefik/e2e/kubeconfig.yaml
3 | node_modules
4 | .DS_Store
5 | *.wasm
6 | kubeconfig.yaml
7 | .idea
--------------------------------------------------------------------------------
/.golangci.yml:
--------------------------------------------------------------------------------
1 | linters:
2 | enable:
3 | - dupword # Checks for duplicate words in the source code.
4 | - goimports
5 | - gosec
6 | - gosimple
7 | - govet
8 | - importas
9 | - ineffassign
10 | - misspell
11 | - revive
12 | - staticcheck
13 | - typecheck
14 | - unconvert
15 | - unused
--------------------------------------------------------------------------------
/.testcontainers.properties:
--------------------------------------------------------------------------------
1 | ryuk.disabled=true
--------------------------------------------------------------------------------
/.traefik.yml:
--------------------------------------------------------------------------------
1 | displayName: Sablier
2 | type: middleware
3 | iconPath: ./docs/assets/img/icon.png
4 | bannerPath: ./docs/assets/img/banner.png
5 |
6 | import: github.com/sablierapp/sablier/plugins/traefik
7 |
8 | summary: "Start your containers on demand, shut them down automatically when there's no activity. Docker, Docker Swarm Mode and Kubernetes compatible."
9 |
10 | testData:
11 | sablierUrl: http://sablier:10000 # The sablier URL service, must be reachable from the Traefik instance
12 | names: whoami,nginx # Comma separated names of containers/services/deployments etc.
13 | group: default # Group name to use to filter by label, ignored if names is set
14 | sessionDuration: 1m # The session duration after which containers/services/deployments instances are shutdown
15 | # You can only use one strategy at a time
16 | # To do so, only declare `dynamic` or `blocking`
17 |
18 | # Dynamic strategy, provides the waiting webui
19 | dynamic:
20 | displayName: My Title # (Optional) Defaults to the middleware name
21 | showDetails: true # (Optional) Set to true or false to show details specifcally for this middleware, unset to use Sablier server defaults
22 | theme: hacker-terminal # (Optional) The theme to use
23 | refreshFrequency: 5s # (Optional) The loading page refresh frequency
24 |
25 | # Blocking strategy, waits until services are up and running
26 | # but will not wait more than `timeout`
27 | # blocking:
28 | # timeout: 1m
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM golang:1.24 AS build
2 |
3 | RUN mkdir -p /etc/sablier/themes
4 | WORKDIR /src
5 | RUN go env -w GOMODCACHE=/root/.cache/go-build
6 |
7 | # See https://docs.docker.com/build/guide/mounts/#add-bind-mounts for cached builds
8 | RUN --mount=type=cache,target=/root/.cache/go-build \
9 | --mount=type=bind,source=go.sum,target=go.sum \
10 | --mount=type=bind,source=go.mod,target=go.mod \
11 | go mod download
12 |
13 | COPY . /src
14 | ARG BUILDTIME
15 | ARG VERSION
16 | ARG REVISION
17 | ARG TARGETOS
18 | ARG TARGETARCH
19 | RUN --mount=type=cache,target=/root/.cache/go-build \
20 | make BUILDTIME=${BUILDTIME} VERSION=${VERSION} GIT_REVISION=${REVISION} ${TARGETOS}/${TARGETARCH}
21 |
22 | FROM scratch
23 |
24 | EXPOSE 10000
25 | COPY --from=build /etc/sablier/themes /etc/sablier/themes
26 | COPY --from=build /src/sablier* /bin/sablier
27 | COPY docker/sablier.yaml /etc/sablier/sablier.yaml
28 |
29 | ENTRYPOINT [ "sablier" ]
30 | CMD [ "--configFile=/etc/sablier/sablier.yaml", "start" ]
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | #
2 |
3 | 
4 |
5 | A free and open-source software to start workloads on demand and stop them after a period of inactivity.
6 |
7 | Think of it a bit like a serverless platform, but for your own servers.
8 |
9 | 
10 |
11 | Either because you don't want to overload your raspberry pi or because your QA environment gets used only once a week and wastes resources by keeping your workloads up and running, Sablier is a project that might interest you.
12 |
13 | ## 🎯 Features
14 |
15 | - [Supports the following providers](https://sablierapp.dev/#/providers/overview)
16 | - Docker
17 | - Docker Swarm
18 | - Kubernetes
19 | - [Supports multiple reverse proxies](https://sablierapp.dev/#/plugins/overview)
20 | - Apache APISIX
21 | - Caddy
22 | - Envoy
23 | - Istio
24 | - Nginx (NJS Module)
25 | - Nginx (WASM Module)
26 | - Traefik
27 | - Scale up your workload automatically upon the first request
28 | - [with a themable waiting page](https://sablierapp.dev/#/themes)
29 | - [with a hanging request (hang until service is up)](https://sablierapp.dev/#/strategies?id=blocking-strategy)
30 | - Scale your workload to zero automatically after a period of inactivity
31 |
32 | ## 📝 Documentation
33 |
34 | [See the documentation here](https://sablierapp.dev)
35 |
--------------------------------------------------------------------------------
/cmd/healthcheck/healthcheck.go:
--------------------------------------------------------------------------------
1 | package healthcheck
2 |
3 | import (
4 | "fmt"
5 | "github.com/spf13/cobra"
6 | "io"
7 | "net/http"
8 | "os"
9 | )
10 |
11 | const (
12 | healthy = true
13 | unhealthy = false
14 | )
15 |
16 | func Health(url string) (string, bool) {
17 | resp, err := http.Get(url)
18 |
19 | if err != nil {
20 | return err.Error(), unhealthy
21 | }
22 |
23 | body, err := io.ReadAll(resp.Body)
24 |
25 | if err != nil {
26 | return err.Error(), unhealthy
27 | }
28 |
29 | if resp.StatusCode >= 400 {
30 | return string(body), unhealthy
31 | }
32 |
33 | return string(body), healthy
34 | }
35 |
36 | func NewCmd() *cobra.Command {
37 | return &cobra.Command{
38 | Use: "health",
39 | Short: "Calls the health endpoint of a Sablier instance",
40 | Run: func(cmd *cobra.Command, args []string) {
41 | details, healthy := Health(cmd.Flag("url").Value.String())
42 |
43 | if healthy {
44 | fmt.Fprintf(os.Stderr, "healthy: %v\n", details)
45 | os.Exit(0)
46 | } else {
47 | fmt.Fprintf(os.Stderr, "unhealthy: %v\n", details)
48 | os.Exit(1)
49 | }
50 | },
51 | }
52 | }
53 |
--------------------------------------------------------------------------------
/cmd/sablier/logger.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "github.com/lmittmann/tint"
5 | "github.com/sablierapp/sablier/pkg/config"
6 | "log/slog"
7 | "os"
8 | "strings"
9 | "time"
10 | )
11 |
12 | func setupLogger(config config.Logging) *slog.Logger {
13 | w := os.Stderr
14 | level := parseLogLevel(config.Level)
15 | // create a new logger
16 | logger := slog.New(tint.NewHandler(w, &tint.Options{
17 | Level: level,
18 | TimeFormat: time.Kitchen,
19 | AddSource: true,
20 | }))
21 |
22 | return logger
23 | }
24 |
25 | func parseLogLevel(level string) slog.Level {
26 | switch strings.ToUpper(level) {
27 | case slog.LevelDebug.String():
28 | return slog.LevelDebug
29 | case slog.LevelInfo.String():
30 | return slog.LevelInfo
31 | case slog.LevelWarn.String():
32 | return slog.LevelWarn
33 | case slog.LevelError.String():
34 | return slog.LevelError
35 | default:
36 | slog.Warn("invalid log level, defaulting to info", slog.String("level", level))
37 | return slog.LevelInfo
38 | }
39 | }
40 |
--------------------------------------------------------------------------------
/cmd/sablier/provider.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "github.com/docker/docker/client"
7 | "github.com/sablierapp/sablier/pkg/config"
8 | "github.com/sablierapp/sablier/pkg/provider/docker"
9 | "github.com/sablierapp/sablier/pkg/provider/dockerswarm"
10 | "github.com/sablierapp/sablier/pkg/provider/kubernetes"
11 | "github.com/sablierapp/sablier/pkg/sablier"
12 | k8s "k8s.io/client-go/kubernetes"
13 | "k8s.io/client-go/rest"
14 | "log/slog"
15 | )
16 |
17 | func setupProvider(ctx context.Context, logger *slog.Logger, config config.Provider) (sablier.Provider, error) {
18 | if err := config.IsValid(); err != nil {
19 | return nil, err
20 | }
21 |
22 | switch config.Name {
23 | case "swarm", "docker_swarm":
24 | cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
25 | if err != nil {
26 | return nil, fmt.Errorf("cannot create docker swarm client: %v", err)
27 | }
28 | return dockerswarm.New(ctx, cli, logger)
29 | case "docker":
30 | cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
31 | if err != nil {
32 | return nil, fmt.Errorf("cannot create docker client: %v", err)
33 | }
34 | return docker.New(ctx, cli, logger)
35 | case "kubernetes":
36 | kubeclientConfig, err := rest.InClusterConfig()
37 | if err != nil {
38 | return nil, err
39 | }
40 | kubeclientConfig.QPS = config.Kubernetes.QPS
41 | kubeclientConfig.Burst = config.Kubernetes.Burst
42 |
43 | cli, err := k8s.NewForConfig(kubeclientConfig)
44 | if err != nil {
45 | return nil, err
46 | }
47 | return kubernetes.New(ctx, cli, logger, config.Kubernetes)
48 | }
49 | return nil, fmt.Errorf("unimplemented provider %s", config.Name)
50 | }
51 |
--------------------------------------------------------------------------------
/cmd/sablier/testdata/config.env:
--------------------------------------------------------------------------------
1 | PROVIDER_NAME=envvar
2 | PROVIDER_AUTOSTOPONSTARTUP=false
3 | PROVIDER_KUBERNETES_QPS=16
4 | PROVIDER_KUBERNETES_BURST=32
5 | PROVIDER_KUBERNETES_DELIMITER=/
6 | SERVER_PORT=2222
7 | SERVER_BASE_PATH=/envvar/
8 | STORAGE_FILE=/tmp/envvar.json
9 | SESSIONS_DEFAULT_DURATION=2h
10 | SESSIONS_EXPIRATION_INTERVAL=2h
11 | LOGGING_LEVEL=debug
12 | STRATEGY_DYNAMIC_CUSTOM_THEMES_PATH=/tmp/envvar/themes
13 | STRATEGY_SHOW_DETAILS_BY_DEFAULT=false
14 | STRATEGY_DYNAMIC_DEFAULT_THEME=envvar
15 | STRATEGY_DYNAMIC_DEFAULT_REFRESH_FREQUENCY=2h
16 | STRATEGY_BLOCKING_DEFAULT_TIMEOUT=2h
17 | STRATEGY_BLOCKING_DEFAULT_REFRESH_FREQUENCY=2h
--------------------------------------------------------------------------------
/cmd/sablier/testdata/config.yml:
--------------------------------------------------------------------------------
1 | provider:
2 | name: configfile
3 | auto-stop-on-startup: false
4 | kubernetes:
5 | qps: 64
6 | burst: 128
7 | delimiter: .
8 | server:
9 | port: 1111
10 | base-path: /configfile/
11 | storage:
12 | file: /tmp/configfile.json
13 | sessions:
14 | default-duration: 1h
15 | expiration-interval: 1h
16 | logging:
17 | level: trace
18 | strategy:
19 | dynamic:
20 | custom-themes-path: /tmp/configfile/themes
21 | show-details-by-default: false
22 | default-theme: configfile
23 | default-refresh-frequency: 1h
24 | blocking:
25 | default-timeout: 1h
26 | default-refresh-frequency: 1h
--------------------------------------------------------------------------------
/cmd/sablier/testdata/config_cli_wanted.json:
--------------------------------------------------------------------------------
1 | {
2 | "Server": {
3 | "Port": 3333,
4 | "BasePath": "/cli/"
5 | },
6 | "Storage": {
7 | "File": "/tmp/cli.json"
8 | },
9 | "Provider": {
10 | "Name": "cli",
11 | "AutoStopOnStartup": false,
12 | "Kubernetes": {
13 | "QPS": 256,
14 | "Burst": 512,
15 | "Delimiter": "_"
16 | }
17 | },
18 | "Sessions": {
19 | "DefaultDuration": 10800000000000,
20 | "ExpirationInterval": 10800000000000
21 | },
22 | "Logging": {
23 | "Level": "info"
24 | },
25 | "Strategy": {
26 | "Dynamic": {
27 | "CustomThemesPath": "/tmp/cli/themes",
28 | "ShowDetailsByDefault": false,
29 | "DefaultTheme": "cli",
30 | "DefaultRefreshFrequency": 10800000000000
31 | },
32 | "Blocking": {
33 | "DefaultTimeout": 10800000000000,
34 | "DefaultRefreshFrequency": 10800000000000
35 | }
36 | }
37 | }
38 |
--------------------------------------------------------------------------------
/cmd/sablier/testdata/config_default.json:
--------------------------------------------------------------------------------
1 | {
2 | "Server": {
3 | "Port": 10000,
4 | "BasePath": "/"
5 | },
6 | "Storage": {
7 | "File": ""
8 | },
9 | "Provider": {
10 | "Name": "docker",
11 | "AutoStopOnStartup": true,
12 | "Kubernetes": {
13 | "QPS": 5,
14 | "Burst": 10,
15 | "Delimiter": "_"
16 | }
17 | },
18 | "Sessions": {
19 | "DefaultDuration": 300000000000,
20 | "ExpirationInterval": 20000000000
21 | },
22 | "Logging": {
23 | "Level": "info"
24 | },
25 | "Strategy": {
26 | "Dynamic": {
27 | "CustomThemesPath": "",
28 | "ShowDetailsByDefault": true,
29 | "DefaultTheme": "hacker-terminal",
30 | "DefaultRefreshFrequency": 5000000000
31 | },
32 | "Blocking": {
33 | "DefaultTimeout": 60000000000,
34 | "DefaultRefreshFrequency": 5000000000
35 | }
36 | }
37 | }
38 |
--------------------------------------------------------------------------------
/cmd/sablier/testdata/config_env_wanted.json:
--------------------------------------------------------------------------------
1 | {
2 | "Server": {
3 | "Port": 2222,
4 | "BasePath": "/envvar/"
5 | },
6 | "Storage": {
7 | "File": "/tmp/envvar.json"
8 | },
9 | "Provider": {
10 | "Name": "envvar",
11 | "AutoStopOnStartup": false,
12 | "Kubernetes": {
13 | "QPS": 16,
14 | "Burst": 32,
15 | "Delimiter": "/"
16 | }
17 | },
18 | "Sessions": {
19 | "DefaultDuration": 7200000000000,
20 | "ExpirationInterval": 7200000000000
21 | },
22 | "Logging": {
23 | "Level": "debug"
24 | },
25 | "Strategy": {
26 | "Dynamic": {
27 | "CustomThemesPath": "/tmp/envvar/themes",
28 | "ShowDetailsByDefault": false,
29 | "DefaultTheme": "envvar",
30 | "DefaultRefreshFrequency": 7200000000000
31 | },
32 | "Blocking": {
33 | "DefaultTimeout": 7200000000000,
34 | "DefaultRefreshFrequency": 7200000000000
35 | }
36 | }
37 | }
38 |
--------------------------------------------------------------------------------
/cmd/sablier/testdata/config_yaml_wanted.json:
--------------------------------------------------------------------------------
1 | {
2 | "Server": {
3 | "Port": 1111,
4 | "BasePath": "/configfile/"
5 | },
6 | "Storage": {
7 | "File": "/tmp/configfile.json"
8 | },
9 | "Provider": {
10 | "Name": "configfile",
11 | "AutoStopOnStartup": false,
12 | "Kubernetes": {
13 | "QPS": 64,
14 | "Burst": 128,
15 | "Delimiter": "."
16 | }
17 | },
18 | "Sessions": {
19 | "DefaultDuration": 3600000000000,
20 | "ExpirationInterval": 3600000000000
21 | },
22 | "Logging": {
23 | "Level": "trace"
24 | },
25 | "Strategy": {
26 | "Dynamic": {
27 | "CustomThemesPath": "/tmp/configfile/themes",
28 | "ShowDetailsByDefault": false,
29 | "DefaultTheme": "configfile",
30 | "DefaultRefreshFrequency": 3600000000000
31 | },
32 | "Blocking": {
33 | "DefaultTimeout": 3600000000000,
34 | "DefaultRefreshFrequency": 3600000000000
35 | }
36 | }
37 | }
38 |
--------------------------------------------------------------------------------
/cmd/sablier/theme.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "github.com/sablierapp/sablier/pkg/config"
6 | "github.com/sablierapp/sablier/pkg/theme"
7 | "log/slog"
8 | "os"
9 | )
10 |
11 | func setupTheme(ctx context.Context, conf config.Config, logger *slog.Logger) (*theme.Themes, error) {
12 | if conf.Strategy.Dynamic.CustomThemesPath != "" {
13 | logger.DebugContext(ctx, "loading themes from custom theme path", slog.String("path", conf.Strategy.Dynamic.CustomThemesPath))
14 | custom := os.DirFS(conf.Strategy.Dynamic.CustomThemesPath)
15 | t, err := theme.NewWithCustomThemes(custom, logger)
16 | if err != nil {
17 | return nil, err
18 | }
19 | return t, nil
20 | }
21 | logger.DebugContext(ctx, "loading themes without custom theme path", slog.String("reason", "--strategy.dynamic.custom-themes-path is empty"))
22 | t, err := theme.New(logger)
23 | if err != nil {
24 | return nil, err
25 |
26 | }
27 | return t, nil
28 | }
29 |
--------------------------------------------------------------------------------
/cmd/version/version.go:
--------------------------------------------------------------------------------
1 | package version
2 |
3 | import (
4 | "fmt"
5 | "github.com/sablierapp/sablier/pkg/version"
6 |
7 | "github.com/spf13/cobra"
8 | )
9 |
10 | func NewCmd() *cobra.Command {
11 | return &cobra.Command{
12 | Use: "version",
13 | Short: "Print the version Sablier",
14 | Run: func(cmd *cobra.Command, args []string) {
15 | fmt.Println(version.Info())
16 | },
17 | }
18 | }
19 |
--------------------------------------------------------------------------------
/docker/sablier.yaml:
--------------------------------------------------------------------------------
1 | server:
2 | port: 10000
3 | base-path: /
4 | storage:
5 | file: /etc/sablier/state.json
6 | provider:
7 | name: docker
8 | logging:
9 | level: info
10 | strategy:
11 | dynamic:
12 | custom-themes-path: /etc/sablier/themes
--------------------------------------------------------------------------------
/docs/.nojekyll:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sablierapp/sablier/6e557e95c95d2fba42f7c1e0a5ac8da264b0630d/docs/.nojekyll
--------------------------------------------------------------------------------
/docs/CNAME:
--------------------------------------------------------------------------------
1 | sablierapp.dev
--------------------------------------------------------------------------------
/docs/README.md:
--------------------------------------------------------------------------------
1 | # Sablier - Scale to Zero
2 |
3 | Sablier is a **free** and **open-source** software that can scale your workloads on demand.
4 |
5 | 
6 |
7 | Your workloads can be a docker container, a kubernetes deployment and more (see [providers](providers/overview) for the full list).
8 |
9 |
10 | Sablier is an API that start containers for a given duration.
11 |
12 | It provides an integrations with multiple reverse proxies and different loading strategies.
13 |
14 | Which allows you to start your containers on demand and shut them down automatically as soon as there's no activity.
15 |
16 | ## Glossary
17 |
18 | I'll use these terms in order to be provider-agnostic.
19 |
20 | - **Session**: A Session is a set of **instances**
21 | - **Instance**: An instance is either a docker container, docker swarm service, kubernetes deployment or kubernetes statefulset
22 |
23 | ## Credits
24 |
25 | - [Hourglass icons created by Vectors Market - Flaticon](https://www.flaticon.com/free-icons/hourglass)
26 | - [tarampampam/error-pages](https://github.com/tarampampam/error-pages/) for the themes
--------------------------------------------------------------------------------
/docs/_sidebar.md:
--------------------------------------------------------------------------------
1 | - [Introduction](/)
2 | - [Getting started](/getting-started)
3 | - [Installation](/installation)
4 | - [Configuration](/configuration)
5 | - [Strategies](/strategies)
6 | - [Themes](/themes)
7 | - [Versioning](/versioning)
8 | - **Providers**
9 | - [Overview](/providers/overview)
10 | - [
Docker](/providers/docker)
11 | - [
Docker Swarm](/providers/docker_swarm)
12 | - [
Kubernetes](/providers/kubernetes)
13 | - **Reverse Proxy Plugins**
14 | - [Overview](/plugins/overview)
15 | - [
Apache APISIX](/plugins/apacheapisix)
16 | - [
Caddy](/plugins/caddy)
17 | - [
Envoy](/plugins/envoy)
18 | - [
Istio](/plugins/istio)
19 | - [
Nginx (NJS)](/plugins/nginx)
20 | - [
Nginx (ProxyWasm)](/plugins/nginx_proxywasm)
21 | - [
Traefik](/plugins/traefik)
22 | - **Guides**
23 | - [Overview](/guides/overview)
24 | - [VSCode Server with Traefik and Kubernetes](/guides/code-server-traefik-kubernetes.md)
25 | - **Links**
26 | - [
Github](https://github.com/sablierapp/sablier)
27 | - [
Docker Hub](https://hub.docker.com/r/sablierapp/sablier)
--------------------------------------------------------------------------------
/docs/assets/img/apacheapisix.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sablierapp/sablier/6e557e95c95d2fba42f7c1e0a5ac8da264b0630d/docs/assets/img/apacheapisix.png
--------------------------------------------------------------------------------
/docs/assets/img/banner.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sablierapp/sablier/6e557e95c95d2fba42f7c1e0a5ac8da264b0630d/docs/assets/img/banner.png
--------------------------------------------------------------------------------
/docs/assets/img/caddy.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sablierapp/sablier/6e557e95c95d2fba42f7c1e0a5ac8da264b0630d/docs/assets/img/caddy.png
--------------------------------------------------------------------------------
/docs/assets/img/demo.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sablierapp/sablier/6e557e95c95d2fba42f7c1e0a5ac8da264b0630d/docs/assets/img/demo.gif
--------------------------------------------------------------------------------
/docs/assets/img/docker.svg:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/docs/assets/img/docker_swarm.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sablierapp/sablier/6e557e95c95d2fba42f7c1e0a5ac8da264b0630d/docs/assets/img/docker_swarm.png
--------------------------------------------------------------------------------
/docs/assets/img/envoy.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sablierapp/sablier/6e557e95c95d2fba42f7c1e0a5ac8da264b0630d/docs/assets/img/envoy.png
--------------------------------------------------------------------------------
/docs/assets/img/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sablierapp/sablier/6e557e95c95d2fba42f7c1e0a5ac8da264b0630d/docs/assets/img/favicon.ico
--------------------------------------------------------------------------------
/docs/assets/img/ghost.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sablierapp/sablier/6e557e95c95d2fba42f7c1e0a5ac8da264b0630d/docs/assets/img/ghost.png
--------------------------------------------------------------------------------
/docs/assets/img/github.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/docs/assets/img/hacker-terminal.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sablierapp/sablier/6e557e95c95d2fba42f7c1e0a5ac8da264b0630d/docs/assets/img/hacker-terminal.png
--------------------------------------------------------------------------------
/docs/assets/img/icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sablierapp/sablier/6e557e95c95d2fba42f7c1e0a5ac8da264b0630d/docs/assets/img/icon.png
--------------------------------------------------------------------------------
/docs/assets/img/integration.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sablierapp/sablier/6e557e95c95d2fba42f7c1e0a5ac8da264b0630d/docs/assets/img/integration.png
--------------------------------------------------------------------------------
/docs/assets/img/istio.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sablierapp/sablier/6e557e95c95d2fba42f7c1e0a5ac8da264b0630d/docs/assets/img/istio.png
--------------------------------------------------------------------------------
/docs/assets/img/kubernetes.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sablierapp/sablier/6e557e95c95d2fba42f7c1e0a5ac8da264b0630d/docs/assets/img/kubernetes.png
--------------------------------------------------------------------------------
/docs/assets/img/matrix.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sablierapp/sablier/6e557e95c95d2fba42f7c1e0a5ac8da264b0630d/docs/assets/img/matrix.png
--------------------------------------------------------------------------------
/docs/assets/img/nginx.svg:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/docs/assets/img/reverse-proxy-integration.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sablierapp/sablier/6e557e95c95d2fba42f7c1e0a5ac8da264b0630d/docs/assets/img/reverse-proxy-integration.png
--------------------------------------------------------------------------------
/docs/assets/img/shuffle.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sablierapp/sablier/6e557e95c95d2fba42f7c1e0a5ac8da264b0630d/docs/assets/img/shuffle.png
--------------------------------------------------------------------------------
/docs/assets/img/traefik.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sablierapp/sablier/6e557e95c95d2fba42f7c1e0a5ac8da264b0630d/docs/assets/img/traefik.png
--------------------------------------------------------------------------------
/docs/guides/overview.md:
--------------------------------------------------------------------------------
1 | # Guides
2 |
3 | Guides are here to help you to understand more how to use this API in complex use cases.
4 |
5 | Because the nature of Sablier involves multiple reverse proxies and multiple providers, the complexity multiplies.
--------------------------------------------------------------------------------
/docs/health.md:
--------------------------------------------------------------------------------
1 | ## Sablier Healthcheck
2 |
3 | ### Using the `/health` route
4 |
5 | You can use the route `/health` to check for healthiness.
6 |
7 | - Returns 200 `OK` when ready
8 | - Returns 503 `Service Unavailable` when terminating
9 |
10 | ### Using the `sablier health` command
11 |
12 | You can use the command `sablier health` to check for healthiness.
13 |
14 | `sablier health` takes on argument `--url` which defaults to `http://localhost:10000/health`.
15 |
16 | ```yml
17 | services:
18 | sablier:
19 | image: sablierapp/sablier:1.9.0
20 | healthcheck:
21 | test: ["sablier", "health"]
22 | interval: 1m30s
23 | ```
--------------------------------------------------------------------------------
/docs/installation.md:
--------------------------------------------------------------------------------
1 | # Install Sablier on its own
2 |
3 | You can install Sablier with the following flavors:
4 |
5 | - Use the Docker image
6 | - Use the binary distribution
7 | - Compile your binary from the sources
8 |
9 | ## Use the Docker image
10 |
11 | - **Docker Hub**: [sablierapp/sablier](https://hub.docker.com/r/sablierapp/sablier)
12 | - **GitHub Container Registry**: [ghcr.io/sablierapp/sablier](https://github.com/sablierapp/sablier/pkgs/container/sablier)
13 |
14 | Choose one of the Docker images and run it with one sample configuration file:
15 |
16 | - [sablier.yaml](https://raw.githubusercontent.com/sablierapp/sablier/main/sablier.sample.yaml)
17 |
18 | ```bash
19 | docker run -d -p 10000:10000 \
20 | -v $PWD/sablier.yaml:/etc/sablier/sablier.yaml sablierapp/sablier:1.9.0
21 | ```
22 |
23 | ## Use the binary distribution
24 |
25 | Grab the latest binary from the [releases](https://github.com/sablierapp/sablier/releases) page.
26 |
27 | And run it:
28 |
29 | ```bash
30 | ./sablier --help
31 | ```
32 |
33 | ## Compile your binary from the sources
34 |
35 | ```bash
36 | git clone git@github.com:sablierapp/sablier.git
37 | cd sablier
38 | make
39 | # Output will change depending on your distro
40 | ./sablier_draft_linux-amd64
41 | ```
42 |
--------------------------------------------------------------------------------
/docs/plugins/apacheapisix.md:
--------------------------------------------------------------------------------
1 | # Apache APISIX Plugin
2 |
3 | The Apache APISIX Plugin is a WASM Plugin written with the Proxy Wasm SDK.
4 |
5 | ## Provider compatibility grid
6 |
7 | | Provider | Dynamic | Blocking |
8 | |----------------------------------------|:-------:|:--------:|
9 | | [Docker](../providers/docker) | ✅ | ✅ |
10 | | [Docker Swarm](../providers/docker_swarm) | ❓ | ❓ |
11 | | [Kubernetes](../providers/kubernetes) | ❓ | ❓ |
12 |
13 | ## Install the plugin to Apache APISIX
14 |
15 | ```yaml
16 | wasm:
17 | plugins:
18 | - name: proxywasm_sablier_plugin
19 | priority: 7997
20 | file: /wasm/sablierproxywasm.wasm # Downloaded WASM Filter path
21 | ```
22 |
23 | ## Configuration
24 |
25 | You can have the following configuration:
26 |
27 | ```yaml
28 | routes:
29 | - uri: "/"
30 | plugins:
31 | proxywasm_sablier_plugin:
32 | conf: '{ "sablier_url": "sablier:10000", "group": ["my-group"], "session_duration": "1m", "dynamic": { "display_name": "Dynamic Whoami" } }'
33 | ```
--------------------------------------------------------------------------------
/docs/plugins/caddy.md:
--------------------------------------------------------------------------------
1 | # Caddy Sablier Plugin
2 |
3 | Caddy Sablier Plugin.
4 |
5 | ## Provider compatibility grid
6 |
7 | | Provider | Dynamic | Blocking |
8 | | --------------------------------------- | :-----: | :------: |
9 | | [Docker](../providers/docker) | ✅ | ✅ |
10 | | [Docker Swarm](../providers/docker_swarm) | ✅ | ✅ |
11 | | [Kubernetes](../providers/kubernetes) | ❌ | ❌ |
12 |
13 | ## Install the plugin to Caddy
14 |
15 | Because Caddy does not do runtime evaluation, you need to build the base image with the plugin source code.
16 |
17 | In order to use the custom plugin for Caddy, you need to bundle it with Caddy.
18 | Here I'll show you two options with Docker.
19 |
20 |
21 |
22 | #### **Using the provided Dockerfile**
23 |
24 | ```bash
25 | docker build https://github.com/sablierapp/sablier.git#v1.8.1:plugins/caddy
26 | --build-arg=CADDY_VERSION=2.8.4
27 | -t caddy:2.8.4-with-sablier
28 | ```
29 |
30 | #### **Updating your Caddy Dockerfile**
31 |
32 | ```docker
33 | ARG CADDY_VERSION=2.8.4
34 | FROM caddy:${CADDY_VERSION}-builder AS builder
35 |
36 | RUN xcaddy build \
37 | --with github.com/sablierapp/sablier/plugins/caddy
38 |
39 | FROM caddy:${CADDY_VERSION}
40 |
41 | COPY --from=builder /usr/bin/caddy /usr/bin/caddy
42 | ```
43 |
44 |
45 |
46 | ## Configuration
47 |
48 | You can have the following configuration:
49 |
50 | ```Caddyfile
51 | :80 {
52 | route /my/route {
53 | sablier [=http://sablier:10000] {
54 | [names container1,container2,...]
55 | [group mygroup]
56 | [session_duration 30m]
57 | dynamic {
58 | [display_name This is my display name]
59 | [show_details yes|true|on]
60 | [theme hacker-terminal]
61 | [refresh_frequency 2s]
62 | }
63 | blocking {
64 | [timeout 1m]
65 | }
66 | }
67 | reverse_proxy myservice:port
68 | }
69 | }
70 | ```
71 |
72 | ### Exemple with a minimal configuration
73 |
74 | Almost all options are optional, and you can set up very simple rules to use the server default values.
75 |
76 | ```Caddyfile
77 | :80 {
78 | route /my/route {
79 | sablier {
80 | group mygroup
81 | dynamic
82 | }
83 | reverse_proxy myservice:port
84 | }
85 | }
86 | ```
87 |
--------------------------------------------------------------------------------
/docs/plugins/envoy.md:
--------------------------------------------------------------------------------
1 | # Envoy Plugin
2 |
3 | The Envoy Plugin is a WASM Plugin written with the Proxy Wasm SDK.
4 |
5 | ## Provider compatibility grid
6 |
7 | | Provider | Dynamic | Blocking |
8 | |-----------------------------------------|:-------:|:--------:|
9 | | [Docker](../providers/docker) | ✅ | ✅ |
10 | | [Docker Swarm](../providers/docker_swarm) | ❓ | ❓ |
11 | | [Kubernetes](../providers/kubernetes) | ❓ | ❓ |
12 |
13 | ## Configuration
14 |
15 | You can have the following configuration:
16 |
17 | ```yaml
18 | http_filters:
19 | - name: sablier-wasm-whoami-dynamic
20 | disabled: true
21 | typed_config:
22 | "@type": type.googleapis.com/udpa.type.v1.TypedStruct
23 | type_url: type.googleapis.com/envoy.extensions.filters.http.wasm.v3.Wasm
24 | value:
25 | config:
26 | name: "sablier-wasm-whoami-dynamic"
27 | root_id: "sablier-wasm-whoami-dynamic"
28 | configuration:
29 | "@type": "type.googleapis.com/google.protobuf.StringValue"
30 | value: |
31 | {
32 | "sablier_url": "sablier:10000",
33 | "cluster": "sablier",
34 | "names": ["docker_classic_e2e-whoami-1"],
35 | "session_duration": "1m",
36 | "dynamic": {
37 | "display_name": "Dynamic Whoami",
38 | "theme": "hacker-terminal"
39 | }
40 | }
41 | vm_config:
42 | runtime: "envoy.wasm.runtime.v8"
43 | vm_id: "vm.sablier.sablier-wasm-whoami-dynamic"
44 | code:
45 | local:
46 | filename: "/etc/sablierproxywasm.wasm"
47 | configuration: { }
48 | ```
--------------------------------------------------------------------------------
/docs/plugins/istio.md:
--------------------------------------------------------------------------------
1 | # Istio Plugin
2 |
3 | The Istio Plugin is a WASM Plugin written with the Proxy Wasm SDK.
4 |
5 | ## Provider compatibility grid
6 |
7 | | Provider | Dynamic | Blocking |
8 | |-----------------------------------------|:-------:|:--------:|
9 | | [Docker](../providers/docker) | ❌ | ❌ |
10 | | [Docker Swarm](../providers/docker_swarm) | ❌ | ❌ |
11 | | [Kubernetes](../providers/kubernetes) | ✅ | ✅ |
12 |
13 | ## Configuration
14 |
15 | You can have the following configuration:
16 |
17 | !> This only works for ingress gateways.
18 | !> Attaching this filter to a side-car would not work because the side-car itself gets shutdown on scaling to zero.
19 |
20 | ```yaml
21 | apiVersion: extensions.istio.io/v1alpha1
22 | kind: WasmPlugin
23 | metadata:
24 | name: sablier-wasm-whoami-dynamic
25 | namespace: istio-system
26 | spec:
27 | selector:
28 | matchLabels:
29 | istio: ingressgateway
30 | url: file:///opt/filters/sablierproxywasm.wasm/..data/sablierproxywasm.wasm
31 | # Use https://istio.io/latest/docs/reference/config/proxy_extensions/wasm-plugin/#WasmPlugin-TrafficSelector
32 | # To specify which service to apply this filter only
33 | phase: UNSPECIFIED_PHASE
34 | pluginConfig:
35 | {
36 | "sablier_url": "sablier.sablier-system.svc.cluster.local",
37 | "cluster": "outbound|10000||sablier.sablier-system.svc.cluster.local",
38 | "names": [ "deployment_default_whoami_1" ],
39 | "session_duration": "1m",
40 | "dynamic": {
41 | "display_name": "Dynamic Whoami",
42 | "theme": "hacker-terminal"
43 | }
44 | }
45 | ```
--------------------------------------------------------------------------------
/docs/providers/docker.md:
--------------------------------------------------------------------------------
1 | # Docker
2 |
3 | The Docker provider communicates with the `docker.sock` socket to start and stop containers on demand.
4 |
5 | ## Use the Docker provider
6 |
7 | In order to use the docker provider you can configure the [provider.name](../configuration) property.
8 |
9 |
10 |
11 | #### **File (YAML)**
12 |
13 | ```yaml
14 | provider:
15 | name: docker
16 | ```
17 |
18 | #### **CLI**
19 |
20 | ```bash
21 | sablier start --provider.name=docker
22 | ```
23 |
24 | #### **Environment Variable**
25 |
26 | ```bash
27 | PROVIDER_NAME=docker
28 | ```
29 |
30 |
31 |
32 | !> **Ensure that Sablier has access to the docker socket!**
33 |
34 | ```yaml
35 | services:
36 | sablier:
37 | image: sablierapp/sablier:1.9.0
38 | command:
39 | - start
40 | - --provider.name=docker
41 | volumes:
42 | - '/var/run/docker.sock:/var/run/docker.sock'
43 | ```
44 |
45 | ## Register containers
46 |
47 | For Sablier to work, it needs to know which docker container to start and stop.
48 |
49 | You have to register your containers by opting-in with labels.
50 |
51 | ```yaml
52 | services:
53 | whoami:
54 | image: acouvreur/whoami:v1.10.2
55 | labels:
56 | - sablier.enable=true
57 | - sablier.group=mygroup
58 | ```
59 |
60 | ## How does Sablier knows when a container is ready?
61 |
62 | If the container defines a Healthcheck, then it will check for healthiness before stating the `ready` status.
63 |
64 | If the containers do not define a Healthcheck, then as soon as the container has the status `started`
--------------------------------------------------------------------------------
/docs/providers/docker_swarm.md:
--------------------------------------------------------------------------------
1 | # Docker Swarm
2 |
3 | The Docker Swarm provider communicates with the `docker.sock` socket to scale services on demand.
4 |
5 | ## Use the Docker Swarm provider
6 |
7 | In order to use the docker swarm provider you can configure the [provider.name](../configuration) property.
8 |
9 |
10 |
11 | #### **File (YAML)**
12 |
13 | ```yaml
14 | provider:
15 | name: docker_swarm # or swarm
16 | ```
17 |
18 | #### **CLI**
19 |
20 | ```bash
21 | sablier start --provider.name=docker_swarm # or swarm
22 | ```
23 |
24 | #### **Environment Variable**
25 |
26 | ```bash
27 | PROVIDER_NAME=docker_swarm # or swarm
28 | ```
29 |
30 |
31 |
32 |
33 | !> **Ensure that Sablier has access to the docker socket!**
34 |
35 | ```yaml
36 | services:
37 | sablier:
38 | image: sablierapp/sablier:1.9.0
39 | command:
40 | - start
41 | - --provider.name=docker_swarm # or swarm
42 | volumes:
43 | - '/var/run/docker.sock:/var/run/docker.sock'
44 | ```
45 |
46 | ## Register services
47 |
48 | For Sablier to work, it needs to know which docker services to scale up and down.
49 |
50 | You have to register your services by opting-in with labels.
51 |
52 | ```yaml
53 | services:
54 | whoami:
55 | image: acouvreur/whoami:v1.10.2
56 | deploy:
57 | labels:
58 | - sablier.enable=true
59 | - sablier.group=mygroup
60 | ```
61 |
62 | ## How does Sablier knows when a service is ready?
63 |
64 | Sablier checks for the service replicas. As soon as the current replicas matches the wanted replicas, then the service is considered `ready`.
65 |
66 | ?> Docker Swarm uses the container's healthcheck to check if the container is up and running. So the provider has a native healthcheck support.
--------------------------------------------------------------------------------
/docs/providers/kubernetes.md:
--------------------------------------------------------------------------------
1 | # Kubernetes
2 |
3 | Sablier assumes that it is deployed within the Kubernetes cluster to use the Kubernetes API internally.
4 |
5 | ## Use the Kubernetes provider
6 |
7 | In order to use the kubernetes provider you can configure the [provider.name](../configuration) property.
8 |
9 |
10 |
11 | #### **File (YAML)**
12 |
13 | ```yaml
14 | provider:
15 | name: kubernetes
16 | ```
17 |
18 | #### **CLI**
19 |
20 | ```bash
21 | sablier start --provider.name=kubernetes
22 | ```
23 |
24 | #### **Environment Variable**
25 |
26 | ```bash
27 | PROVIDER_NAME=kubernetes
28 | ```
29 |
30 |
31 |
32 | !> **Ensure that Sablier has the necessary roles!**
33 |
34 | ```yaml
35 | apiVersion: rbac.authorization.k8s.io/v1
36 | kind: ClusterRole
37 | metadata:
38 | name: sablier
39 | rules:
40 | - apiGroups:
41 | - apps
42 | - ""
43 | resources:
44 | - deployments
45 | - statefulsets
46 | verbs:
47 | - get # Retrieve info about specific dep
48 | - list # Events
49 | - watch # Events
50 | - apiGroups:
51 | - apps
52 | - ""
53 | resources:
54 | - deployments/scale
55 | - statefulsets/scale
56 | verbs:
57 | - patch # Scale up and down
58 | - update # Scale up and down
59 | - get # Retrieve info about specific dep
60 | - list # Events
61 | - watch # Events
62 | ```
63 |
64 | ## Register Deployments
65 |
66 | For Sablier to work, it needs to know which deployments to scale up and down.
67 |
68 | You have to register your deployments by opting-in with labels.
69 |
70 |
71 | ```yaml
72 | apiVersion: apps/v1
73 | kind: Deployment
74 | metadata:
75 | name: whoami
76 | labels:
77 | app: whoami
78 | sablier.enable: "true"
79 | sablier.group: mygroup
80 | spec:
81 | selector:
82 | matchLabels:
83 | app: whoami
84 | template:
85 | metadata:
86 | labels:
87 | app: whoami
88 | spec:
89 | containers:
90 | - name: whoami
91 | image: acouvreur/whoami:v1.10.2
92 | ```
93 |
94 | ## How does Sablier knows when a deployment is ready?
95 |
96 | Sablier checks for the deployment replicas. As soon as the current replicas matches the wanted replicas, then the deployment is considered `ready`.
97 |
98 | ?> Kubernetes uses the Pod healthcheck to check if the Pod is up and running. So the provider has a native healthcheck support.
--------------------------------------------------------------------------------
/docs/providers/overview.md:
--------------------------------------------------------------------------------
1 | # Providers
2 |
3 | ## What is a Provider?
4 |
5 | A Provider is how Sablier can interact with your instances.
6 |
7 | A Provider typically have the following capabilities:
8 | - Start an instance
9 | - Stop an instance
10 | - Get the current status of an instance
11 | - Listen for instance lifecycle events (started, stopped)
12 |
13 | ## Available providers
14 |
15 | | Provider | Name | Details |
16 | |------------------------------------------------------------|---------------------------|------------------------------------------------------------------|
17 | | [Docker](docker) | `docker` | Stop and start **containers** on demand |
18 | | [Docker Swarm](docker_swarm) | `docker_swarm` or `swarm` | Scale down to zero and up **services** on demand |
19 | | [Kubernetes](kubernetes) | `kubernetes` | Scale down and up **deployments** and **statefulsets** on demand |
20 | | [Podman](https://github.com/sablierapp/sablier/issues/70) | `podman` | [See #70](https://github.com/sablierapp/sablier/issues/70) |
21 | | [ECS](https://github.com/sablierapp/sablier/issues/116) | `ecs` | [See #116](https://github.com/sablierapp/sablier/issues/116) |
22 | | [Systemd](https://github.com/sablierapp/sablier/issues/148) | `systemd` | [See #148](https://github.com/sablierapp/sablier/issues/148) |
23 |
24 | *Your Provider is not on the list? [Open an issue to request the missing provider here!](https://github.com/sablierapp/sablier/issues/new?assignees=&labels=enhancement%2C+provider&projects=&template=instance-provider-request.md&title=Add+%60%5BPROVIDER%5D%60+provider)*
25 |
26 | [See the active issues about the providers](https://github.com/sablierapp/sablier/issues?q=is%3Aopen+is%3Aissue+label%3Aprovider)
--------------------------------------------------------------------------------
/docs/strategies.md:
--------------------------------------------------------------------------------
1 | # Strategies
2 |
3 | ## Dynamic Strategy
4 |
5 | The **Dynamic Strategy** provides a waiting page for your session.
6 |
7 | 
8 |
9 | ?> This strategy is well suited for a user that would access a frontend directly and expects to see a loading page.
10 |
11 | ```plantuml
12 | @startuml
13 |
14 | User -> Proxy: Website Request
15 | Proxy -> Sablier: Reverse Proxy Plugin Request Session Status
16 | Sablier -> Provider: Request Instance Status
17 | Sablier <-- Provider: Response Instance Status
18 | Proxy <-- Sablier: Returns the X-Sablier-Status Header
19 |
20 | alt `X-Sablier-Status` value is `not-ready`
21 |
22 | User <-- Proxy: Serve the waiting page
23 | loop until `X-Sablier-Status` value is `ready`
24 | User -> Proxy: Self-Reload Waiting Page
25 | Proxy -> Sablier: Reverse Proxy Plugin Request Session Status
26 | Sablier -> Provider: Request Instance Status
27 | Sablier <-- Provider: Response Instance Status
28 | Proxy <-- Sablier: Returns the waiting page
29 | User <-- Proxy: Serve the waiting page
30 | end
31 |
32 | end
33 |
34 | User <-- Proxy: Content
35 |
36 | @enduml
37 | ```
38 | ## Blocking Strategy
39 |
40 | The **Blocking Strategy** hangs the request until your session is ready.
41 |
42 | ?> This strategy is well suited for an API communication.
43 |
44 | ```plantuml
45 | @startuml
46 |
47 | User -> Proxy: Website Request
48 | Proxy -> Sablier: Reverse Proxy Plugin Request Session Status
49 | Sablier -> Provider: Request Instance Status
50 |
51 | alt `Instance` status is `not-ready`
52 | Proxy -> Sablier: Reverse Proxy Plugin Request Session Status
53 | Sablier -> Provider: Request Instance Status
54 | Sablier <-- Provider: Response Instance Status
55 | Proxy <-- Sablier: Returns the waiting page
56 | end
57 |
58 | Sablier <-- Provider: Response Instance Status
59 | Proxy <-- Sablier: Response
60 |
61 | User <-- Proxy: Content
62 |
63 | @enduml
64 | ```
--------------------------------------------------------------------------------
/docs/versioning.md:
--------------------------------------------------------------------------------
1 | # Versioning
2 |
3 | Sablier follows the [Semantic Versioning 2.0.0](https://semver.org/) Specification (SemVer).
4 |
5 | Given a version number MAJOR.MINOR.PATCH, increment the:
6 |
7 | 1. MAJOR version when you make incompatible API changes
8 | 2. MINOR version when you add functionality in a backwards compatible manner
9 | 3. PATCH version when you make backwards compatible bug fixes
10 |
11 | Additional labels for pre-release and build metadata are available as extensions to the MAJOR.MINOR.PATCH format.
12 |
13 | This process is fully automated using [Semantic Release](https://github.com/semantic-release/semantic-release).
14 |
15 | The configuration is [release.config.js](https://github.com/sablierapp/sablier/blob/main/release.config.js).
--------------------------------------------------------------------------------
/go.work:
--------------------------------------------------------------------------------
1 | go 1.24.0
2 |
3 | toolchain go1.24.0
4 |
5 | use (
6 | .
7 | ./plugins/caddy
8 | ./plugins/proxywasm
9 | ./plugins/traefik
10 | )
11 |
--------------------------------------------------------------------------------
/internal/api/abort.go:
--------------------------------------------------------------------------------
1 | package api
2 |
3 | import (
4 | "github.com/gin-gonic/gin"
5 | "github.com/tniswong/go.rfcx/rfc7807"
6 | "net/url"
7 | )
8 |
9 | func AbortWithProblemDetail(c *gin.Context, p rfc7807.Problem) {
10 | _ = c.Error(p)
11 | instance, err := url.Parse(c.Request.RequestURI)
12 | if err != nil {
13 | instance = &url.URL{}
14 | }
15 | p.Instance = *instance
16 | c.Header("Content-Type", rfc7807.JSONMediaType)
17 | c.IndentedJSON(p.Status, p)
18 | }
19 |
--------------------------------------------------------------------------------
/internal/api/api.go:
--------------------------------------------------------------------------------
1 | package api
2 |
3 | import (
4 | "context"
5 | "github.com/sablierapp/sablier/pkg/config"
6 | "github.com/sablierapp/sablier/pkg/sablier"
7 | "github.com/sablierapp/sablier/pkg/theme"
8 | "time"
9 | )
10 |
11 | //go:generate go tool mockgen -package apitest -source=api.go -destination=apitest/mocks_sablier.go *
12 |
13 | type Sablier interface {
14 | RequestSession(ctx context.Context, names []string, duration time.Duration) (*sablier.SessionState, error)
15 | RequestSessionGroup(ctx context.Context, group string, duration time.Duration) (*sablier.SessionState, error)
16 | RequestReadySession(ctx context.Context, names []string, duration time.Duration, timeout time.Duration) (*sablier.SessionState, error)
17 | RequestReadySessionGroup(ctx context.Context, group string, duration time.Duration, timeout time.Duration) (*sablier.SessionState, error)
18 | }
19 |
20 | type ServeStrategy struct {
21 | Theme *theme.Themes
22 |
23 | Sablier Sablier
24 | StrategyConfig config.Strategy
25 | SessionsConfig config.Sessions
26 | }
27 |
--------------------------------------------------------------------------------
/internal/api/api_response_headers.go:
--------------------------------------------------------------------------------
1 | package api
2 |
3 | import (
4 | "github.com/gin-gonic/gin"
5 | "github.com/sablierapp/sablier/pkg/sablier"
6 | )
7 |
8 | const SablierStatusHeader = "X-Sablier-Session-Status"
9 | const SablierStatusReady = "ready"
10 | const SablierStatusNotReady = "not-ready"
11 |
12 | func AddSablierHeader(c *gin.Context, session *sablier.SessionState) {
13 | if session.IsReady() {
14 | c.Header(SablierStatusHeader, SablierStatusReady)
15 | } else {
16 | c.Header(SablierStatusHeader, SablierStatusNotReady)
17 | }
18 | }
19 |
--------------------------------------------------------------------------------
/internal/api/api_test.go:
--------------------------------------------------------------------------------
1 | package api
2 |
3 | import (
4 | "github.com/gin-gonic/gin"
5 | "github.com/neilotoole/slogt"
6 | "github.com/sablierapp/sablier/internal/api/apitest"
7 | config2 "github.com/sablierapp/sablier/pkg/config"
8 | "github.com/sablierapp/sablier/pkg/theme"
9 | "go.uber.org/mock/gomock"
10 | "gotest.tools/v3/assert"
11 | "net/http"
12 | "net/http/httptest"
13 | "testing"
14 | )
15 |
16 | func NewApiTest(t *testing.T) (app *gin.Engine, router *gin.RouterGroup, strategy *ServeStrategy, mock *apitest.MockSablier) {
17 | t.Helper()
18 | gin.SetMode(gin.TestMode)
19 | ctrl := gomock.NewController(t)
20 | th, err := theme.New(slogt.New(t))
21 | assert.NilError(t, err)
22 |
23 | app = gin.New()
24 | router = app.Group("/api")
25 | mock = apitest.NewMockSablier(ctrl)
26 | strategy = &ServeStrategy{
27 | Theme: th,
28 | Sablier: mock,
29 | StrategyConfig: config2.NewStrategyConfig(),
30 | SessionsConfig: config2.NewSessionsConfig(),
31 | }
32 |
33 | return app, router, strategy, mock
34 | }
35 |
36 | // PerformRequest runs an API request with an empty request body.
37 | func PerformRequest(r http.Handler, method, path string) *httptest.ResponseRecorder {
38 | req, _ := http.NewRequest(method, path, nil)
39 | w := httptest.NewRecorder()
40 | r.ServeHTTP(w, req)
41 |
42 | return w
43 | }
44 |
--------------------------------------------------------------------------------
/internal/api/health.go:
--------------------------------------------------------------------------------
1 | package api
2 |
3 | import (
4 | "context"
5 | "net/http"
6 |
7 | "github.com/gin-gonic/gin"
8 | )
9 |
10 | type Health struct {
11 | TerminatingStatusCode int `description:"Terminating status code" json:"terminatingStatusCode,omitempty" yaml:"terminatingStatusCode,omitempty" export:"true"`
12 | terminating bool
13 | }
14 |
15 | func (h *Health) SetDefaults() {
16 | h.TerminatingStatusCode = http.StatusServiceUnavailable
17 | }
18 |
19 | func (h *Health) WithContext(ctx context.Context) {
20 | go func() {
21 | <-ctx.Done()
22 | h.terminating = true
23 | }()
24 | }
25 |
26 | func (h *Health) ServeHTTP(c *gin.Context) {
27 | statusCode := http.StatusOK
28 | if h.terminating {
29 | statusCode = h.TerminatingStatusCode
30 | }
31 |
32 | c.String(statusCode, http.StatusText(statusCode))
33 | }
34 |
35 | func Healthcheck(router *gin.RouterGroup, ctx context.Context) {
36 | health := Health{}
37 | health.SetDefaults()
38 | health.WithContext(ctx)
39 | router.GET("/health", health.ServeHTTP)
40 | }
41 |
--------------------------------------------------------------------------------
/internal/api/problemdetail.go:
--------------------------------------------------------------------------------
1 | package api
2 |
3 | import (
4 | "github.com/sablierapp/sablier/pkg/sablier"
5 | "github.com/sablierapp/sablier/pkg/theme"
6 | "github.com/tniswong/go.rfcx/rfc7807"
7 | "net/http"
8 | )
9 |
10 | func ProblemError(e error) rfc7807.Problem {
11 | return rfc7807.Problem{
12 | Type: "https://sablierapp.dev/#/errors?id=internal-error",
13 | Title: http.StatusText(http.StatusInternalServerError),
14 | Status: http.StatusInternalServerError,
15 | Detail: e.Error(),
16 | }
17 | }
18 |
19 | func ProblemValidation(e error) rfc7807.Problem {
20 | return rfc7807.Problem{
21 | Type: "https://sablierapp.dev/#/errors?id=validation-error",
22 | Title: "Validation Failed",
23 | Status: http.StatusBadRequest,
24 | Detail: e.Error(),
25 | }
26 | }
27 |
28 | func ProblemGroupNotFound(e sablier.ErrGroupNotFound) rfc7807.Problem {
29 | pb := rfc7807.Problem{
30 | Type: "https://sablierapp.dev/#/errors?id=group-not-found",
31 | Title: "Group not found",
32 | Status: http.StatusNotFound,
33 | Detail: "The group you requested does not exist. It is possible that the group has not been scanned yet.",
34 | }
35 | _ = pb.Extend("availableGroups", e.AvailableGroups)
36 | _ = pb.Extend("requestGroup", e.Group)
37 | _ = pb.Extend("error", e.Error())
38 | return pb
39 | }
40 |
41 | func ProblemThemeNotFound(e theme.ErrThemeNotFound) rfc7807.Problem {
42 | pb := rfc7807.Problem{
43 | Type: "https://sablierapp.dev/#/errors?id=theme-not-found",
44 | Title: "Theme not found",
45 | Status: http.StatusNotFound,
46 | Detail: "The theme you requested does not exist among the default themes and the custom themes (if any).",
47 | }
48 | _ = pb.Extend("availableTheme", e.AvailableThemes)
49 | _ = pb.Extend("requestTheme", e.Theme)
50 | _ = pb.Extend("error", e.Error())
51 | return pb
52 | }
53 |
--------------------------------------------------------------------------------
/internal/api/start_blocking.go:
--------------------------------------------------------------------------------
1 | package api
2 |
3 | import (
4 | "errors"
5 | "github.com/gin-gonic/gin"
6 | "github.com/sablierapp/sablier/pkg/sablier"
7 | "net/http"
8 | "time"
9 | )
10 |
11 | type BlockingRequest struct {
12 | Names []string `form:"names"`
13 | Group string `form:"group"`
14 | SessionDuration time.Duration `form:"session_duration"`
15 | Timeout time.Duration `form:"timeout"`
16 | }
17 |
18 | func StartBlocking(router *gin.RouterGroup, s *ServeStrategy) {
19 | router.GET("/strategies/blocking", func(c *gin.Context) {
20 | request := BlockingRequest{
21 | SessionDuration: s.SessionsConfig.DefaultDuration,
22 | Timeout: s.StrategyConfig.Blocking.DefaultTimeout,
23 | }
24 |
25 | if err := c.ShouldBind(&request); err != nil {
26 | AbortWithProblemDetail(c, ProblemValidation(err))
27 | return
28 | }
29 |
30 | if len(request.Names) == 0 && request.Group == "" {
31 | AbortWithProblemDetail(c, ProblemValidation(errors.New("'names' or 'group' query parameter must be set")))
32 | return
33 | }
34 |
35 | if len(request.Names) > 0 && request.Group != "" {
36 | AbortWithProblemDetail(c, ProblemValidation(errors.New("'names' and 'group' query parameters are both set, only one must be set")))
37 | return
38 | }
39 |
40 | var sessionState *sablier.SessionState
41 | var err error
42 | if len(request.Names) > 0 {
43 | sessionState, err = s.Sablier.RequestReadySession(c.Request.Context(), request.Names, request.SessionDuration, request.Timeout)
44 | } else {
45 | sessionState, err = s.Sablier.RequestReadySessionGroup(c.Request.Context(), request.Group, request.SessionDuration, request.Timeout)
46 | var groupNotFoundError sablier.ErrGroupNotFound
47 | if errors.As(err, &groupNotFoundError) {
48 | AbortWithProblemDetail(c, ProblemGroupNotFound(groupNotFoundError))
49 | return
50 | }
51 | }
52 | if err != nil {
53 | AbortWithProblemDetail(c, ProblemError(err))
54 | return
55 | }
56 |
57 | if sessionState == nil {
58 | AbortWithProblemDetail(c, ProblemError(errors.New("session could not be created, please check logs for more details")))
59 | return
60 | }
61 |
62 | AddSablierHeader(c, sessionState)
63 |
64 | c.JSON(http.StatusOK, map[string]interface{}{"session": sessionState})
65 | })
66 | }
67 |
--------------------------------------------------------------------------------
/internal/api/theme_list.go:
--------------------------------------------------------------------------------
1 | package api
2 |
3 | import (
4 | "github.com/gin-gonic/gin"
5 | "net/http"
6 | )
7 |
8 | func ListThemes(router *gin.RouterGroup, s *ServeStrategy) {
9 | handler := func(c *gin.Context) {
10 | c.JSON(http.StatusOK, map[string]interface{}{
11 | "themes": s.Theme.List(),
12 | })
13 | }
14 |
15 | router.GET("/themes", handler)
16 | router.GET("/dynamic/themes", handler) // Legacy path
17 | }
18 |
--------------------------------------------------------------------------------
/internal/server/logging.go:
--------------------------------------------------------------------------------
1 | package server
2 |
3 | import (
4 | "github.com/gin-gonic/gin"
5 | sloggin "github.com/samber/slog-gin"
6 | "log/slog"
7 | )
8 |
9 | // StructuredLogger logs a gin HTTP request in JSON format. Allows to set the
10 | // logger for testing purposes.
11 | func StructuredLogger(logger *slog.Logger) gin.HandlerFunc {
12 | if logger.Enabled(nil, slog.LevelDebug) {
13 | return sloggin.NewWithConfig(logger, sloggin.Config{
14 | DefaultLevel: slog.LevelInfo,
15 | ClientErrorLevel: slog.LevelWarn,
16 | ServerErrorLevel: slog.LevelError,
17 |
18 | WithUserAgent: false,
19 | WithRequestID: true,
20 | WithRequestBody: false,
21 | WithRequestHeader: false,
22 | WithResponseBody: false,
23 | WithResponseHeader: false,
24 | WithSpanID: false,
25 | WithTraceID: false,
26 |
27 | Filters: []sloggin.Filter{},
28 | })
29 | }
30 |
31 | return sloggin.NewWithConfig(logger, sloggin.Config{
32 | DefaultLevel: slog.LevelInfo,
33 | ClientErrorLevel: slog.LevelWarn,
34 | ServerErrorLevel: slog.LevelError,
35 |
36 | WithUserAgent: false,
37 | WithRequestID: true,
38 | WithRequestBody: false,
39 | WithRequestHeader: false,
40 | WithResponseBody: false,
41 | WithResponseHeader: false,
42 | WithSpanID: false,
43 | WithTraceID: false,
44 |
45 | Filters: []sloggin.Filter{},
46 | })
47 | }
48 |
--------------------------------------------------------------------------------
/internal/server/routes.go:
--------------------------------------------------------------------------------
1 | package server
2 |
3 | import (
4 | "context"
5 | "github.com/gin-gonic/gin"
6 | "github.com/sablierapp/sablier/internal/api"
7 | "github.com/sablierapp/sablier/pkg/config"
8 | )
9 |
10 | func registerRoutes(ctx context.Context, router *gin.Engine, serverConf config.Server, s *api.ServeStrategy) {
11 | // Enables automatic redirection if the current route cannot be matched but a
12 | // handler for the path with (without) the trailing slash exists.
13 | router.RedirectTrailingSlash = true
14 |
15 | base := router.Group(serverConf.BasePath)
16 |
17 | api.Healthcheck(base, ctx)
18 |
19 | // Create REST API router group.
20 | APIv1 := base.Group("/api")
21 |
22 | api.StartDynamic(APIv1, s)
23 | api.StartBlocking(APIv1, s)
24 | api.ListThemes(APIv1, s)
25 | }
26 |
--------------------------------------------------------------------------------
/internal/server/server.go:
--------------------------------------------------------------------------------
1 | package server
2 |
3 | import (
4 | "context"
5 | "errors"
6 | "fmt"
7 | "github.com/gin-gonic/gin"
8 | "github.com/sablierapp/sablier/internal/api"
9 | "github.com/sablierapp/sablier/pkg/config"
10 | "log/slog"
11 | "net/http"
12 | "time"
13 | )
14 |
15 | func setupRouter(ctx context.Context, logger *slog.Logger, serverConf config.Server, s *api.ServeStrategy) *gin.Engine {
16 | r := gin.New()
17 |
18 | r.Use(StructuredLogger(logger))
19 | r.Use(gin.Recovery())
20 |
21 | registerRoutes(ctx, r, serverConf, s)
22 |
23 | return r
24 | }
25 |
26 | func Start(ctx context.Context, logger *slog.Logger, serverConf config.Server, s *api.ServeStrategy) {
27 | start := time.Now()
28 |
29 | if logger.Enabled(ctx, slog.LevelDebug) {
30 | gin.SetMode(gin.DebugMode)
31 | } else {
32 | gin.SetMode(gin.ReleaseMode)
33 | }
34 |
35 | r := setupRouter(ctx, logger, serverConf, s)
36 |
37 | var server *http.Server
38 | server = &http.Server{
39 | Addr: fmt.Sprintf(":%d", serverConf.Port),
40 | Handler: r,
41 | }
42 |
43 | logger.Info("starting ",
44 | slog.String("listen", server.Addr),
45 | slog.Duration("startup", time.Since(start)),
46 | slog.String("mode", gin.Mode()),
47 | )
48 |
49 | go StartHttp(server, logger)
50 |
51 | // Graceful web server shutdown.
52 | <-ctx.Done()
53 | logger.Info("server: shutting down")
54 | err := server.Close()
55 | if err != nil {
56 | logger.Error("server: shutdown failed", slog.Any("error", err))
57 | }
58 | }
59 |
60 | // StartHttp starts the Web server in http mode.
61 | func StartHttp(s *http.Server, logger *slog.Logger) {
62 | if err := s.ListenAndServe(); err != nil {
63 | if errors.Is(err, http.ErrServerClosed) {
64 | logger.Info("server: shutdown complete")
65 | } else {
66 | logger.Error("server failed to start", slog.Any("error", err))
67 | }
68 | }
69 | }
70 |
--------------------------------------------------------------------------------
/pkg/config/configuration.go:
--------------------------------------------------------------------------------
1 | package config
2 |
3 | type Config struct {
4 | Server Server
5 | Storage Storage
6 | Provider Provider
7 | Sessions Sessions
8 | Logging Logging
9 | Strategy Strategy
10 | }
11 |
12 | func NewConfig() Config {
13 | return Config{
14 | Server: NewServerConfig(),
15 | Storage: NewStorageConfig(),
16 | Provider: NewProviderConfig(),
17 | Sessions: NewSessionsConfig(),
18 | Logging: NewLoggingConfig(),
19 | Strategy: NewStrategyConfig(),
20 | }
21 | }
22 |
--------------------------------------------------------------------------------
/pkg/config/logging.go:
--------------------------------------------------------------------------------
1 | package config
2 |
3 | import (
4 | "log/slog"
5 | "strings"
6 | )
7 |
8 | type Logging struct {
9 | Level string `mapstructure:"LEVEL" yaml:"level" default:"info"`
10 | }
11 |
12 | func NewLoggingConfig() Logging {
13 | return Logging{
14 | Level: strings.ToLower(slog.LevelInfo.String()),
15 | }
16 | }
17 |
--------------------------------------------------------------------------------
/pkg/config/provider.go:
--------------------------------------------------------------------------------
1 | package config
2 |
3 | import (
4 | "fmt"
5 | )
6 |
7 | // Provider holds the provider configurations
8 | type Provider struct {
9 | // The provider name to use
10 | // It can be either docker, swarm or kubernetes. Defaults to "docker"
11 | Name string `mapstructure:"NAME" yaml:"name,omitempty" default:"docker"`
12 | AutoStopOnStartup bool `yaml:"auto-stop-on-startup,omitempty" default:"true"`
13 | Kubernetes Kubernetes
14 | }
15 |
16 | type Kubernetes struct {
17 | // QPS limit for K8S API access client-side throttle
18 | QPS float32 `mapstructure:"QPS" yaml:"QPS" default:"5"`
19 | // Maximum burst for client-side throttle
20 | Burst int `mapstructure:"BURST" yaml:"Burst" default:"10"`
21 | // Delimiter used for namespace/resource type/name resolution. Defaults to "_" for backward compatibility. But you should use "/" or ".".
22 | Delimiter string `mapstructure:"DELIMITER" yaml:"Delimiter" default:"_"`
23 | }
24 |
25 | var providers = []string{"docker", "docker_swarm", "swarm", "kubernetes"}
26 |
27 | func NewProviderConfig() Provider {
28 | return Provider{
29 |
30 | Name: "docker",
31 | Kubernetes: Kubernetes{
32 | QPS: 5,
33 | Burst: 10,
34 | Delimiter: "_",
35 | },
36 | }
37 | }
38 |
39 | func (provider Provider) IsValid() error {
40 | for _, p := range providers {
41 | if p == provider.Name {
42 | return nil
43 | }
44 | }
45 | return fmt.Errorf("unrecognized provider %s. providers available: %v", provider.Name, providers)
46 | }
47 |
48 | func GetProviders() []string {
49 | return providers
50 | }
51 |
--------------------------------------------------------------------------------
/pkg/config/server.go:
--------------------------------------------------------------------------------
1 | package config
2 |
3 | type Server struct {
4 | Port int `mapstructure:"PORT" yaml:"port" default:"10000"`
5 | BasePath string `mapstructure:"BASE_PATH" yaml:"basePath" default:"/"`
6 | }
7 |
8 | func NewServerConfig() Server {
9 | return Server{
10 | Port: 10000,
11 | BasePath: "/",
12 | }
13 | }
14 |
--------------------------------------------------------------------------------
/pkg/config/sessions.go:
--------------------------------------------------------------------------------
1 | package config
2 |
3 | import "time"
4 |
5 | type Sessions struct {
6 | DefaultDuration time.Duration `mapstructure:"DEFAULT_DURATION" yaml:"defaultDuration" default:"5m"`
7 | ExpirationInterval time.Duration `mapstructure:"EXPIRATION_INTERVAL" yaml:"expirationInterval" default:"20s"`
8 | }
9 |
10 | func NewSessionsConfig() Sessions {
11 | return Sessions{
12 | DefaultDuration: 5 * time.Minute,
13 | ExpirationInterval: 20 * time.Second,
14 | }
15 | }
16 |
--------------------------------------------------------------------------------
/pkg/config/storage.go:
--------------------------------------------------------------------------------
1 | package config
2 |
3 | type Storage struct {
4 | File string `mapstructure:"FILE" yaml:"file" default:""`
5 | }
6 |
7 | func NewStorageConfig() Storage {
8 | return Storage{
9 | File: "",
10 | }
11 | }
12 |
--------------------------------------------------------------------------------
/pkg/config/strategy.go:
--------------------------------------------------------------------------------
1 | package config
2 |
3 | import "time"
4 |
5 | type DynamicStrategy struct {
6 | CustomThemesPath string `mapstructure:"CUSTOM_THEMES_PATH" yaml:"customThemesPath"`
7 | ShowDetailsByDefault bool `mapstructure:"SHOW_DETAILS_BY_DEFAULT" yaml:"showDetailsByDefault"`
8 | DefaultTheme string `mapstructure:"DEFAULT_THEME" yaml:"defaultTheme" default:"hacker-terminal"`
9 | DefaultRefreshFrequency time.Duration `mapstructure:"DEFAULT_REFRESH_FREQUENCY" yaml:"defaultRefreshFrequency" default:"5s"`
10 | }
11 |
12 | type BlockingStrategy struct {
13 | DefaultTimeout time.Duration `mapstructure:"DEFAULT_TIMEOUT" yaml:"defaultTimeout" default:"1m"`
14 | DefaultRefreshFrequency time.Duration `mapstructure:"DEFAULT_REFRESH_FREQUENCY" yaml:"defaultRefreshFrequency" default:"5s"`
15 | }
16 |
17 | type Strategy struct {
18 | Dynamic DynamicStrategy
19 | Blocking BlockingStrategy
20 | }
21 |
22 | func NewStrategyConfig() Strategy {
23 | return Strategy{
24 | Dynamic: newDynamicStrategy(),
25 | Blocking: newBlockingStrategy(),
26 | }
27 | }
28 |
29 | func newDynamicStrategy() DynamicStrategy {
30 | return DynamicStrategy{
31 | DefaultTheme: "hacker-terminal",
32 | ShowDetailsByDefault: true,
33 | DefaultRefreshFrequency: 5 * time.Second,
34 | }
35 | }
36 |
37 | func newBlockingStrategy() BlockingStrategy {
38 | return BlockingStrategy{
39 | DefaultTimeout: 1 * time.Minute,
40 | DefaultRefreshFrequency: 5 * time.Second,
41 | }
42 | }
43 |
--------------------------------------------------------------------------------
/pkg/durations/duration.go:
--------------------------------------------------------------------------------
1 | package durations
2 |
3 | import (
4 | "encoding/json"
5 | "fmt"
6 | "time"
7 | )
8 |
9 | type Duration struct {
10 | time.Duration
11 | }
12 |
13 | func (duration *Duration) UnmarshalJSON(b []byte) error {
14 | var unmarshalledJson interface{}
15 |
16 | err := json.Unmarshal(b, &unmarshalledJson)
17 | if err != nil {
18 | return err
19 | }
20 |
21 | switch value := unmarshalledJson.(type) {
22 | case float64:
23 | duration.Duration = time.Duration(value)
24 | case string:
25 | duration.Duration, err = time.ParseDuration(value)
26 | if err != nil {
27 | return err
28 | }
29 | default:
30 | return fmt.Errorf("invalid duration: %#v", unmarshalledJson)
31 | }
32 |
33 | return nil
34 | }
35 |
--------------------------------------------------------------------------------
/pkg/durations/humanize.go:
--------------------------------------------------------------------------------
1 | package durations
2 |
3 | import (
4 | "fmt"
5 | "math"
6 | "strings"
7 | "time"
8 | )
9 |
10 | func Humanize(d time.Duration) string {
11 | days := int64(d.Hours() / 24)
12 | hours := int64(math.Mod(d.Hours(), 24))
13 | minutes := int64(math.Mod(d.Minutes(), 60))
14 | seconds := int64(math.Mod(d.Seconds(), 60))
15 |
16 | chunks := []struct {
17 | singularName string
18 | amount int64
19 | }{
20 | {"day", days},
21 | {"hour", hours},
22 | {"minute", minutes},
23 | {"second", seconds},
24 | }
25 |
26 | var parts []string
27 |
28 | for _, chunk := range chunks {
29 | switch chunk.amount {
30 | case 0:
31 | continue
32 | case 1:
33 | parts = append(parts, fmt.Sprintf("%d %s", chunk.amount, chunk.singularName))
34 | default:
35 | parts = append(parts, fmt.Sprintf("%d %ss", chunk.amount, chunk.singularName))
36 | }
37 | }
38 |
39 | return strings.Join(parts, " ")
40 | }
41 |
--------------------------------------------------------------------------------
/pkg/provider/docker/container_inspect.go:
--------------------------------------------------------------------------------
1 | package docker
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "github.com/sablierapp/sablier/pkg/sablier"
7 | "log/slog"
8 | )
9 |
10 | func (p *Provider) InstanceInspect(ctx context.Context, name string) (sablier.InstanceInfo, error) {
11 | spec, err := p.Client.ContainerInspect(ctx, name)
12 | if err != nil {
13 | return sablier.InstanceInfo{}, fmt.Errorf("cannot inspect container: %w", err)
14 | }
15 |
16 | // "created", "running", "paused", "restarting", "removing", "exited", or "dead"
17 | switch spec.State.Status {
18 | case "created", "paused", "restarting", "removing":
19 | return sablier.NotReadyInstanceState(name, 0, p.desiredReplicas), nil
20 | case "running":
21 | if spec.State.Health != nil {
22 | // // "starting", "healthy" or "unhealthy"
23 | if spec.State.Health.Status == "healthy" {
24 | return sablier.ReadyInstanceState(name, p.desiredReplicas), nil
25 | } else if spec.State.Health.Status == "unhealthy" {
26 | return sablier.UnrecoverableInstanceState(name, "container is unhealthy", p.desiredReplicas), nil
27 | } else {
28 | return sablier.NotReadyInstanceState(name, 0, p.desiredReplicas), nil
29 | }
30 | }
31 | p.l.WarnContext(ctx, "container running without healthcheck, you should define a healthcheck on your container so that Sablier properly detects when the container is ready to handle requests.", slog.String("container", name))
32 | return sablier.ReadyInstanceState(name, p.desiredReplicas), nil
33 | case "exited":
34 | if spec.State.ExitCode != 0 {
35 | return sablier.UnrecoverableInstanceState(name, fmt.Sprintf("container exited with code \"%d\"", spec.State.ExitCode), p.desiredReplicas), nil
36 | }
37 | return sablier.NotReadyInstanceState(name, 0, p.desiredReplicas), nil
38 | case "dead":
39 | return sablier.UnrecoverableInstanceState(name, "container in \"dead\" state cannot be restarted", p.desiredReplicas), nil
40 | default:
41 | return sablier.UnrecoverableInstanceState(name, fmt.Sprintf("container status \"%s\" not handled", spec.State.Status), p.desiredReplicas), nil
42 | }
43 | }
44 |
--------------------------------------------------------------------------------
/pkg/provider/docker/container_list.go:
--------------------------------------------------------------------------------
1 | package docker
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | dockertypes "github.com/docker/docker/api/types"
7 | "github.com/docker/docker/api/types/container"
8 | "github.com/docker/docker/api/types/filters"
9 | "github.com/sablierapp/sablier/pkg/provider"
10 | "github.com/sablierapp/sablier/pkg/sablier"
11 | "strings"
12 | )
13 |
14 | func (p *Provider) InstanceList(ctx context.Context, options provider.InstanceListOptions) ([]sablier.InstanceConfiguration, error) {
15 | args := filters.NewArgs()
16 | args.Add("label", fmt.Sprintf("%s=true", "sablier.enable"))
17 |
18 | containers, err := p.Client.ContainerList(ctx, container.ListOptions{
19 | All: options.All,
20 | Filters: args,
21 | })
22 | if err != nil {
23 | return nil, err
24 | }
25 |
26 | instances := make([]sablier.InstanceConfiguration, 0, len(containers))
27 | for _, c := range containers {
28 | instance := containerToInstance(c)
29 | instances = append(instances, instance)
30 | }
31 |
32 | return instances, nil
33 | }
34 |
35 | func containerToInstance(c dockertypes.Container) sablier.InstanceConfiguration {
36 | var group string
37 |
38 | if _, ok := c.Labels["sablier.enable"]; ok {
39 | if g, ok := c.Labels["sablier.group"]; ok {
40 | group = g
41 | } else {
42 | group = "default"
43 | }
44 | }
45 |
46 | return sablier.InstanceConfiguration{
47 | Name: strings.TrimPrefix(c.Names[0], "/"), // Containers name are reported with a leading slash
48 | Group: group,
49 | }
50 | }
51 |
52 | func (p *Provider) InstanceGroups(ctx context.Context) (map[string][]string, error) {
53 | args := filters.NewArgs()
54 | args.Add("label", fmt.Sprintf("%s=true", "sablier.enable"))
55 |
56 | containers, err := p.Client.ContainerList(ctx, container.ListOptions{
57 | All: true,
58 | Filters: args,
59 | })
60 |
61 | if err != nil {
62 | return nil, err
63 | }
64 |
65 | groups := make(map[string][]string)
66 | for _, c := range containers {
67 | groupName := c.Labels["sablier.group"]
68 | if len(groupName) == 0 {
69 | groupName = "default"
70 | }
71 | group := groups[groupName]
72 | group = append(group, strings.TrimPrefix(c.Names[0], "/"))
73 | groups[groupName] = group
74 | }
75 |
76 | return groups, nil
77 | }
78 |
--------------------------------------------------------------------------------
/pkg/provider/docker/container_start.go:
--------------------------------------------------------------------------------
1 | package docker
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "github.com/docker/docker/api/types/container"
7 | )
8 |
9 | func (p *Provider) InstanceStart(ctx context.Context, name string) error {
10 | // TODO: InstanceStart should block until the container is ready.
11 | err := p.Client.ContainerStart(ctx, name, container.StartOptions{})
12 | if err != nil {
13 | return fmt.Errorf("cannot start container %s: %w", name, err)
14 | }
15 | return nil
16 | }
17 |
--------------------------------------------------------------------------------
/pkg/provider/docker/container_start_test.go:
--------------------------------------------------------------------------------
1 | package docker_test
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "github.com/neilotoole/slogt"
7 | "github.com/sablierapp/sablier/pkg/provider/docker"
8 | "gotest.tools/v3/assert"
9 | "testing"
10 | )
11 |
12 | func TestDockerClassicProvider_Start(t *testing.T) {
13 | if testing.Short() {
14 | t.Skip("skipping test in short mode.")
15 | }
16 |
17 | ctx := context.Background()
18 | type args struct {
19 | do func(dind *dindContainer) (string, error)
20 | }
21 | tests := []struct {
22 | name string
23 | args args
24 | err error
25 | }{
26 | {
27 | name: "non existing container start",
28 | args: args{
29 | do: func(dind *dindContainer) (string, error) {
30 | return "non-existent", nil
31 | },
32 | },
33 | err: fmt.Errorf("cannot start container non-existent: Error response from daemon: No such container: non-existent"),
34 | },
35 | {
36 | name: "container start as expected",
37 | args: args{
38 | do: func(dind *dindContainer) (string, error) {
39 | c, err := dind.CreateMimic(ctx, MimicOptions{})
40 | return c.ID, err
41 | },
42 | },
43 | err: nil,
44 | },
45 | }
46 | c := setupDinD(t)
47 | for _, tt := range tests {
48 | t.Run(tt.name, func(t *testing.T) {
49 | t.Parallel()
50 | p, err := docker.New(ctx, c.client, slogt.New(t))
51 | assert.NilError(t, err)
52 |
53 | name, err := tt.args.do(c)
54 | assert.NilError(t, err)
55 |
56 | err = p.InstanceStart(t.Context(), name)
57 | if tt.err != nil {
58 | assert.Error(t, err, tt.err.Error())
59 | } else {
60 | assert.NilError(t, err)
61 | }
62 | })
63 | }
64 | }
65 |
--------------------------------------------------------------------------------
/pkg/provider/docker/container_stop.go:
--------------------------------------------------------------------------------
1 | package docker
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "github.com/docker/docker/api/types/container"
7 | "log/slog"
8 | )
9 |
10 | func (p *Provider) InstanceStop(ctx context.Context, name string) error {
11 | p.l.DebugContext(ctx, "stopping container", slog.String("name", name))
12 | err := p.Client.ContainerStop(ctx, name, container.StopOptions{})
13 | if err != nil {
14 | p.l.ErrorContext(ctx, "cannot stop container", slog.String("name", name), slog.Any("error", err))
15 | return fmt.Errorf("cannot stop container %s: %w", name, err)
16 | }
17 |
18 | p.l.DebugContext(ctx, "waiting for container to stop", slog.String("name", name))
19 | waitC, errC := p.Client.ContainerWait(ctx, name, container.WaitConditionNotRunning)
20 | select {
21 | case <-waitC:
22 | p.l.DebugContext(ctx, "container stopped", slog.String("name", name))
23 | return nil
24 | case err := <-errC:
25 | p.l.ErrorContext(ctx, "cannot wait for container to stop", slog.String("name", name), slog.Any("error", err))
26 | return fmt.Errorf("cannot wait for container %s to stop: %w", name, err)
27 | case <-ctx.Done():
28 | p.l.ErrorContext(ctx, "context cancelled while waiting for container to stop", slog.String("name", name))
29 | return ctx.Err()
30 | }
31 | }
32 |
--------------------------------------------------------------------------------
/pkg/provider/docker/container_stop_test.go:
--------------------------------------------------------------------------------
1 | package docker_test
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "github.com/docker/docker/api/types/container"
7 | "github.com/neilotoole/slogt"
8 | "github.com/sablierapp/sablier/pkg/provider/docker"
9 | "gotest.tools/v3/assert"
10 | "testing"
11 | )
12 |
13 | func TestDockerClassicProvider_Stop(t *testing.T) {
14 | if testing.Short() {
15 | t.Skip("skipping test in short mode.")
16 | }
17 |
18 | ctx := context.Background()
19 | type args struct {
20 | do func(dind *dindContainer) (string, error)
21 | }
22 | tests := []struct {
23 | name string
24 | args args
25 | err error
26 | }{
27 | {
28 | name: "non existing container stop",
29 | args: args{
30 | do: func(dind *dindContainer) (string, error) {
31 | return "non-existent", nil
32 | },
33 | },
34 | err: fmt.Errorf("cannot stop container non-existent: Error response from daemon: No such container: non-existent"),
35 | },
36 | {
37 | name: "container stop as expected",
38 | args: args{
39 | do: func(dind *dindContainer) (string, error) {
40 | c, err := dind.CreateMimic(ctx, MimicOptions{})
41 | if err != nil {
42 | return "", err
43 | }
44 |
45 | err = dind.client.ContainerStart(ctx, c.ID, container.StartOptions{})
46 | if err != nil {
47 | return "", err
48 | }
49 |
50 | return c.ID, nil
51 | },
52 | },
53 | err: nil,
54 | },
55 | }
56 | c := setupDinD(t)
57 | for _, tt := range tests {
58 | t.Run(tt.name, func(t *testing.T) {
59 | t.Parallel()
60 | p, err := docker.New(ctx, c.client, slogt.New(t))
61 |
62 | name, err := tt.args.do(c)
63 | assert.NilError(t, err)
64 |
65 | err = p.InstanceStop(t.Context(), name)
66 | if tt.err != nil {
67 | assert.Error(t, err, tt.err.Error())
68 | } else {
69 | assert.NilError(t, err)
70 | }
71 | })
72 | }
73 | }
74 |
--------------------------------------------------------------------------------
/pkg/provider/docker/docker.go:
--------------------------------------------------------------------------------
1 | package docker
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "github.com/docker/docker/client"
7 | "github.com/sablierapp/sablier/pkg/sablier"
8 | "log/slog"
9 | )
10 |
11 | // Interface guard
12 | var _ sablier.Provider = (*Provider)(nil)
13 |
14 | type Provider struct {
15 | Client client.APIClient
16 | desiredReplicas int32
17 | l *slog.Logger
18 | }
19 |
20 | func New(ctx context.Context, cli *client.Client, logger *slog.Logger) (*Provider, error) {
21 | logger = logger.With(slog.String("provider", "docker"))
22 |
23 | serverVersion, err := cli.ServerVersion(ctx)
24 | if err != nil {
25 | return nil, fmt.Errorf("cannot connect to docker host: %v", err)
26 | }
27 |
28 | logger.InfoContext(ctx, "connection established with docker",
29 | slog.String("version", serverVersion.Version),
30 | slog.String("api_version", serverVersion.APIVersion),
31 | )
32 | return &Provider{
33 | Client: cli,
34 | desiredReplicas: 1,
35 | l: logger,
36 | }, nil
37 | }
38 |
--------------------------------------------------------------------------------
/pkg/provider/docker/events.go:
--------------------------------------------------------------------------------
1 | package docker
2 |
3 | import (
4 | "context"
5 | "errors"
6 | "github.com/docker/docker/api/types/events"
7 | "github.com/docker/docker/api/types/filters"
8 | "io"
9 | "log/slog"
10 | "strings"
11 | )
12 |
13 | func (p *Provider) NotifyInstanceStopped(ctx context.Context, instance chan<- string) {
14 | msgs, errs := p.Client.Events(ctx, events.ListOptions{
15 | Filters: filters.NewArgs(
16 | filters.Arg("scope", "local"),
17 | filters.Arg("type", string(events.ContainerEventType)),
18 | filters.Arg("event", "die"),
19 | ),
20 | })
21 | for {
22 | select {
23 | case msg, ok := <-msgs:
24 | if !ok {
25 | p.l.ErrorContext(ctx, "event stream closed")
26 | close(instance)
27 | return
28 | }
29 | // Send the container that has died to the channel
30 | instance <- strings.TrimPrefix(msg.Actor.Attributes["name"], "/")
31 | case err, ok := <-errs:
32 | if !ok {
33 | p.l.ErrorContext(ctx, "event stream closed")
34 | close(instance)
35 | return
36 | }
37 | if errors.Is(err, io.EOF) {
38 | p.l.ErrorContext(ctx, "event stream closed")
39 | close(instance)
40 | return
41 | }
42 | p.l.ErrorContext(ctx, "event stream error", slog.Any("error", err))
43 | case <-ctx.Done():
44 | close(instance)
45 | return
46 | }
47 | }
48 | }
49 |
--------------------------------------------------------------------------------
/pkg/provider/docker/events_test.go:
--------------------------------------------------------------------------------
1 | package docker_test
2 |
3 | import (
4 | "context"
5 | "github.com/docker/docker/api/types/container"
6 | "github.com/neilotoole/slogt"
7 | "github.com/sablierapp/sablier/pkg/provider/docker"
8 | "gotest.tools/v3/assert"
9 | "testing"
10 | "time"
11 | )
12 |
13 | func TestDockerClassicProvider_NotifyInstanceStopped(t *testing.T) {
14 | if testing.Short() {
15 | t.Skip("skipping test in short mode.")
16 | }
17 |
18 | ctx, cancel := context.WithTimeout(t.Context(), 30*time.Second)
19 | defer cancel()
20 | dind := setupDinD(t)
21 | p, err := docker.New(ctx, dind.client, slogt.New(t))
22 | assert.NilError(t, err)
23 |
24 | c, err := dind.CreateMimic(ctx, MimicOptions{})
25 | assert.NilError(t, err)
26 |
27 | inspected, err := dind.client.ContainerInspect(ctx, c.ID)
28 | assert.NilError(t, err)
29 |
30 | err = dind.client.ContainerStart(ctx, c.ID, container.StartOptions{})
31 | assert.NilError(t, err)
32 |
33 | <-time.After(1 * time.Second)
34 |
35 | waitC := make(chan string)
36 | go p.NotifyInstanceStopped(ctx, waitC)
37 |
38 | err = dind.client.ContainerStop(ctx, c.ID, container.StopOptions{})
39 | assert.NilError(t, err)
40 |
41 | name := <-waitC
42 |
43 | // Docker container name is prefixed with a slash, but we don't use it
44 | assert.Equal(t, "/"+name, inspected.Name)
45 | }
46 |
--------------------------------------------------------------------------------
/pkg/provider/docker/testcontainers_test.go:
--------------------------------------------------------------------------------
1 | package docker_test
2 |
3 | import (
4 | "context"
5 | "github.com/docker/docker/api/types/container"
6 | "github.com/docker/docker/client"
7 | "github.com/testcontainers/testcontainers-go"
8 | "github.com/testcontainers/testcontainers-go/modules/dind"
9 | "gotest.tools/v3/assert"
10 | "testing"
11 | )
12 |
13 | type dindContainer struct {
14 | testcontainers.Container
15 | client *client.Client
16 | t *testing.T
17 | }
18 |
19 | type MimicOptions struct {
20 | Cmd []string
21 | Healthcheck *container.HealthConfig
22 | RestartPolicy container.RestartPolicy
23 | Labels map[string]string
24 | }
25 |
26 | func (d *dindContainer) CreateMimic(ctx context.Context, opts MimicOptions) (container.CreateResponse, error) {
27 | if len(opts.Cmd) == 0 {
28 | opts.Cmd = []string{"/mimic", "-running", "-running-after=1s", "-healthy=false"}
29 | }
30 |
31 | d.t.Log("Creating mimic container with options", opts)
32 | return d.client.ContainerCreate(ctx, &container.Config{
33 | Entrypoint: opts.Cmd,
34 | Image: "sablierapp/mimic:v0.3.1",
35 | Labels: opts.Labels,
36 | Healthcheck: opts.Healthcheck,
37 | }, &container.HostConfig{RestartPolicy: opts.RestartPolicy}, nil, nil, "")
38 | }
39 |
40 | func setupDinD(t *testing.T) *dindContainer {
41 | t.Helper()
42 | ctx := t.Context()
43 | c, err := dind.Run(ctx, "docker:28.0.4-dind")
44 | assert.NilError(t, err)
45 | testcontainers.CleanupContainer(t, c)
46 |
47 | host, err := c.Host(ctx)
48 | assert.NilError(t, err)
49 |
50 | dindCli, err := client.NewClientWithOpts(client.WithHost(host), client.WithAPIVersionNegotiation())
51 | assert.NilError(t, err)
52 |
53 | provider, err := testcontainers.ProviderDocker.GetProvider()
54 | assert.NilError(t, err)
55 |
56 | err = provider.PullImage(ctx, "sablierapp/mimic:v0.3.1")
57 | assert.NilError(t, err)
58 |
59 | err = c.LoadImage(ctx, "sablierapp/mimic:v0.3.1")
60 | assert.NilError(t, err)
61 |
62 | return &dindContainer{
63 | Container: c,
64 | client: dindCli,
65 | t: t,
66 | }
67 | }
68 |
--------------------------------------------------------------------------------
/pkg/provider/dockerswarm/docker_swarm.go:
--------------------------------------------------------------------------------
1 | package dockerswarm
2 |
3 | import (
4 | "context"
5 | "errors"
6 | "fmt"
7 | "github.com/sablierapp/sablier/pkg/sablier"
8 | "log/slog"
9 | "strings"
10 |
11 | "github.com/docker/docker/api/types"
12 | "github.com/docker/docker/api/types/swarm"
13 | "github.com/docker/docker/client"
14 | )
15 |
16 | // Interface guard
17 | var _ sablier.Provider = (*Provider)(nil)
18 |
19 | type Provider struct {
20 | Client client.APIClient
21 | desiredReplicas int32
22 |
23 | l *slog.Logger
24 | }
25 |
26 | func New(ctx context.Context, cli *client.Client, logger *slog.Logger) (*Provider, error) {
27 | logger = logger.With(slog.String("provider", "swarm"))
28 |
29 | serverVersion, err := cli.ServerVersion(ctx)
30 | if err != nil {
31 | return nil, fmt.Errorf("cannot connect to docker host: %w", err)
32 | }
33 |
34 | logger.InfoContext(ctx, "connection established with docker swarm",
35 | slog.String("version", serverVersion.Version),
36 | slog.String("api_version", serverVersion.APIVersion),
37 | )
38 |
39 | return &Provider{
40 | Client: cli,
41 | desiredReplicas: 1,
42 | l: logger,
43 | }, nil
44 |
45 | }
46 |
47 | func (p *Provider) ServiceUpdateReplicas(ctx context.Context, name string, replicas uint64) error {
48 | service, err := p.getServiceByName(name, ctx)
49 | if err != nil {
50 | return err
51 | }
52 |
53 | foundName := p.getInstanceName(name, *service)
54 | if service.Spec.Mode.Replicated == nil {
55 | return errors.New("swarm service is not in \"replicated\" mode")
56 | }
57 |
58 | service.Spec.Mode.Replicated.Replicas = &replicas
59 |
60 | response, err := p.Client.ServiceUpdate(ctx, service.ID, service.Meta.Version, service.Spec, types.ServiceUpdateOptions{})
61 | if err != nil {
62 | return err
63 | }
64 |
65 | if len(response.Warnings) > 0 {
66 | return fmt.Errorf("warning received updating swarm service [%s]: %s", foundName, strings.Join(response.Warnings, ", "))
67 | }
68 |
69 | return nil
70 | }
71 |
72 | func (p *Provider) getInstanceName(name string, service swarm.Service) string {
73 | if name == service.Spec.Name {
74 | return name
75 | }
76 |
77 | return fmt.Sprintf("%s (%s)", name, service.Spec.Name)
78 | }
79 |
--------------------------------------------------------------------------------
/pkg/provider/dockerswarm/events.go:
--------------------------------------------------------------------------------
1 | package dockerswarm
2 |
3 | import (
4 | "context"
5 | "errors"
6 | "github.com/docker/docker/api/types/events"
7 | "github.com/docker/docker/api/types/filters"
8 | "io"
9 | "log/slog"
10 | )
11 |
12 | func (p *Provider) NotifyInstanceStopped(ctx context.Context, instance chan<- string) {
13 | msgs, errs := p.Client.Events(ctx, events.ListOptions{
14 | Filters: filters.NewArgs(
15 | filters.Arg("scope", "swarm"),
16 | filters.Arg("type", "service"),
17 | ),
18 | })
19 |
20 | go func() {
21 | for {
22 | select {
23 | case msg, ok := <-msgs:
24 | if !ok {
25 | p.l.ErrorContext(ctx, "event stream closed")
26 | return
27 | }
28 | if msg.Actor.Attributes["replicas.new"] == "0" {
29 | instance <- msg.Actor.Attributes["name"]
30 | } else if msg.Action == "remove" {
31 | instance <- msg.Actor.Attributes["name"]
32 | }
33 | case err, ok := <-errs:
34 | if !ok {
35 | p.l.ErrorContext(ctx, "event stream closed")
36 | return
37 | }
38 | if errors.Is(err, io.EOF) {
39 | p.l.ErrorContext(ctx, "event stream closed")
40 | return
41 | }
42 | p.l.ErrorContext(ctx, "event stream error", slog.Any("error", err))
43 | case <-ctx.Done():
44 | return
45 | }
46 | }
47 | }()
48 | }
49 |
--------------------------------------------------------------------------------
/pkg/provider/dockerswarm/events_test.go:
--------------------------------------------------------------------------------
1 | package dockerswarm_test
2 |
3 | import (
4 | "context"
5 | "github.com/docker/docker/api/types"
6 | "github.com/neilotoole/slogt"
7 | "github.com/sablierapp/sablier/pkg/provider/dockerswarm"
8 | "gotest.tools/v3/assert"
9 | "testing"
10 | "time"
11 | )
12 |
13 | func TestDockerSwarmProvider_NotifyInstanceStopped(t *testing.T) {
14 | if testing.Short() {
15 | t.Skip("skipping test in short mode.")
16 | }
17 |
18 | ctx, cancel := context.WithTimeout(t.Context(), 30*time.Second)
19 | defer cancel()
20 | dind := setupDinD(t)
21 | p, err := dockerswarm.New(ctx, dind.client, slogt.New(t))
22 | assert.NilError(t, err)
23 |
24 | c, err := dind.CreateMimic(ctx, MimicOptions{})
25 | assert.NilError(t, err)
26 |
27 | waitC := make(chan string)
28 | go p.NotifyInstanceStopped(ctx, waitC)
29 |
30 | t.Run("service is scaled to 0 replicas", func(t *testing.T) {
31 | service, _, err := dind.client.ServiceInspectWithRaw(ctx, c.ID, types.ServiceInspectOptions{})
32 | assert.NilError(t, err)
33 |
34 | replicas := uint64(0)
35 | service.Spec.Mode.Replicated.Replicas = &replicas
36 |
37 | _, err = p.Client.ServiceUpdate(ctx, service.ID, service.Meta.Version, service.Spec, types.ServiceUpdateOptions{})
38 | assert.NilError(t, err)
39 |
40 | name := <-waitC
41 |
42 | // Docker container name is prefixed with a slash, but we don't use it
43 | assert.Equal(t, name, service.Spec.Name)
44 | })
45 |
46 | t.Run("service is removed", func(t *testing.T) {
47 | service, _, err := dind.client.ServiceInspectWithRaw(ctx, c.ID, types.ServiceInspectOptions{})
48 | assert.NilError(t, err)
49 |
50 | err = p.Client.ServiceRemove(ctx, service.ID)
51 | assert.NilError(t, err)
52 |
53 | name := <-waitC
54 |
55 | // Docker container name is prefixed with a slash, but we don't use it
56 | assert.Equal(t, name, service.Spec.Name)
57 | })
58 | }
59 |
--------------------------------------------------------------------------------
/pkg/provider/dockerswarm/service_inspect.go:
--------------------------------------------------------------------------------
1 | package dockerswarm
2 |
3 | import (
4 | "context"
5 | "errors"
6 | "fmt"
7 | "github.com/docker/docker/api/types"
8 | "github.com/docker/docker/api/types/filters"
9 | "github.com/docker/docker/api/types/swarm"
10 | "github.com/sablierapp/sablier/pkg/sablier"
11 | )
12 |
13 | func (p *Provider) InstanceInspect(ctx context.Context, name string) (sablier.InstanceInfo, error) {
14 | service, err := p.getServiceByName(name, ctx)
15 | if err != nil {
16 | return sablier.InstanceInfo{}, err
17 | }
18 |
19 | foundName := p.getInstanceName(name, *service)
20 |
21 | if service.Spec.Mode.Replicated == nil {
22 | return sablier.InstanceInfo{}, errors.New("swarm service is not in \"replicated\" mode")
23 | }
24 |
25 | if service.ServiceStatus.DesiredTasks != service.ServiceStatus.RunningTasks || service.ServiceStatus.DesiredTasks == 0 {
26 | return sablier.NotReadyInstanceState(foundName, 0, p.desiredReplicas), nil
27 | }
28 |
29 | return sablier.ReadyInstanceState(foundName, p.desiredReplicas), nil
30 | }
31 |
32 | func (p *Provider) getServiceByName(name string, ctx context.Context) (*swarm.Service, error) {
33 | opts := types.ServiceListOptions{
34 | Filters: filters.NewArgs(),
35 | Status: true,
36 | }
37 | opts.Filters.Add("name", name)
38 |
39 | services, err := p.Client.ServiceList(ctx, opts)
40 | if err != nil {
41 | return nil, fmt.Errorf("error listing services: %w", err)
42 | }
43 |
44 | if len(services) == 0 {
45 | return nil, fmt.Errorf("service with name %s was not found", name)
46 | }
47 |
48 | for _, service := range services {
49 | // Exact match
50 | if service.Spec.Name == name {
51 | return &service, nil
52 | }
53 | if service.ID == name {
54 | return &service, nil
55 | }
56 | }
57 |
58 | return nil, fmt.Errorf("service %s was not found because it did not match exactly or on suffix", name)
59 | }
60 |
--------------------------------------------------------------------------------
/pkg/provider/dockerswarm/service_list.go:
--------------------------------------------------------------------------------
1 | package dockerswarm
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | dockertypes "github.com/docker/docker/api/types"
7 | "github.com/docker/docker/api/types/filters"
8 | "github.com/docker/docker/api/types/swarm"
9 | "github.com/sablierapp/sablier/pkg/provider"
10 | "github.com/sablierapp/sablier/pkg/sablier"
11 | )
12 |
13 | func (p *Provider) InstanceList(ctx context.Context, _ provider.InstanceListOptions) ([]sablier.InstanceConfiguration, error) {
14 | args := filters.NewArgs()
15 | args.Add("label", fmt.Sprintf("%s=true", "sablier.enable"))
16 | args.Add("mode", "replicated")
17 |
18 | services, err := p.Client.ServiceList(ctx, dockertypes.ServiceListOptions{
19 | Filters: args,
20 | })
21 |
22 | if err != nil {
23 | return nil, err
24 | }
25 |
26 | instances := make([]sablier.InstanceConfiguration, 0, len(services))
27 | for _, s := range services {
28 | instance := p.serviceToInstance(s)
29 | instances = append(instances, instance)
30 | }
31 |
32 | return instances, nil
33 | }
34 |
35 | func (p *Provider) serviceToInstance(s swarm.Service) (i sablier.InstanceConfiguration) {
36 | var group string
37 |
38 | if _, ok := s.Spec.Labels["sablier.enable"]; ok {
39 | if g, ok := s.Spec.Labels["sablier.group"]; ok {
40 | group = g
41 | } else {
42 | group = "default"
43 | }
44 | }
45 |
46 | return sablier.InstanceConfiguration{
47 | Name: s.Spec.Name,
48 | Group: group,
49 | }
50 | }
51 |
52 | func (p *Provider) InstanceGroups(ctx context.Context) (map[string][]string, error) {
53 | f := filters.NewArgs()
54 | f.Add("label", fmt.Sprintf("%s=true", "sablier.enable"))
55 |
56 | services, err := p.Client.ServiceList(ctx, dockertypes.ServiceListOptions{
57 | Filters: f,
58 | })
59 |
60 | if err != nil {
61 | return nil, err
62 | }
63 |
64 | groups := make(map[string][]string)
65 | for _, service := range services {
66 | groupName := service.Spec.Labels["sablier.group"]
67 | if len(groupName) == 0 {
68 | groupName = "default"
69 | }
70 |
71 | group := groups[groupName]
72 | group = append(group, service.Spec.Name)
73 | groups[groupName] = group
74 | }
75 |
76 | return groups, nil
77 | }
78 |
--------------------------------------------------------------------------------
/pkg/provider/dockerswarm/service_start.go:
--------------------------------------------------------------------------------
1 | package dockerswarm
2 |
3 | import "context"
4 |
5 | func (p *Provider) InstanceStart(ctx context.Context, name string) error {
6 | return p.ServiceUpdateReplicas(ctx, name, uint64(p.desiredReplicas))
7 | }
8 |
--------------------------------------------------------------------------------
/pkg/provider/dockerswarm/service_stop.go:
--------------------------------------------------------------------------------
1 | package dockerswarm
2 |
3 | import "context"
4 |
5 | func (p *Provider) InstanceStop(ctx context.Context, name string) error {
6 | return p.ServiceUpdateReplicas(ctx, name, 0)
7 | }
8 |
--------------------------------------------------------------------------------
/pkg/provider/kubernetes/deployment_events.go:
--------------------------------------------------------------------------------
1 | package kubernetes
2 |
3 | import (
4 | appsv1 "k8s.io/api/apps/v1"
5 | core_v1 "k8s.io/api/core/v1"
6 | "k8s.io/client-go/informers"
7 | "k8s.io/client-go/tools/cache"
8 | "time"
9 | )
10 |
11 | func (p *Provider) watchDeployents(instance chan<- string) cache.SharedIndexInformer {
12 | handler := cache.ResourceEventHandlerFuncs{
13 | UpdateFunc: func(old, new interface{}) {
14 | newDeployment := new.(*appsv1.Deployment)
15 | oldDeployment := old.(*appsv1.Deployment)
16 |
17 | if newDeployment.ObjectMeta.ResourceVersion == oldDeployment.ObjectMeta.ResourceVersion {
18 | return
19 | }
20 |
21 | if *oldDeployment.Spec.Replicas == 0 {
22 | return
23 | }
24 |
25 | if *newDeployment.Spec.Replicas == 0 {
26 | parsed := DeploymentName(newDeployment, ParseOptions{Delimiter: p.delimiter})
27 | instance <- parsed.Original
28 | }
29 | },
30 | DeleteFunc: func(obj interface{}) {
31 | deletedDeployment := obj.(*appsv1.Deployment)
32 | parsed := DeploymentName(deletedDeployment, ParseOptions{Delimiter: p.delimiter})
33 | instance <- parsed.Original
34 | },
35 | }
36 | factory := informers.NewSharedInformerFactoryWithOptions(p.Client, 2*time.Second, informers.WithNamespace(core_v1.NamespaceAll))
37 | informer := factory.Apps().V1().Deployments().Informer()
38 |
39 | informer.AddEventHandler(handler)
40 | return informer
41 | }
42 |
--------------------------------------------------------------------------------
/pkg/provider/kubernetes/deployment_inspect.go:
--------------------------------------------------------------------------------
1 | package kubernetes
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "github.com/sablierapp/sablier/pkg/sablier"
7 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
8 | )
9 |
10 | func (p *Provider) DeploymentInspect(ctx context.Context, config ParsedName) (sablier.InstanceInfo, error) {
11 | d, err := p.Client.AppsV1().Deployments(config.Namespace).Get(ctx, config.Name, metav1.GetOptions{})
12 | if err != nil {
13 | return sablier.InstanceInfo{}, fmt.Errorf("error getting deployment: %w", err)
14 | }
15 |
16 | // TODO: Should add option to set ready as soon as one replica is ready
17 | if *d.Spec.Replicas != 0 && *d.Spec.Replicas == d.Status.ReadyReplicas {
18 | return sablier.ReadyInstanceState(config.Original, config.Replicas), nil
19 | }
20 |
21 | return sablier.NotReadyInstanceState(config.Original, d.Status.ReadyReplicas, config.Replicas), nil
22 | }
23 |
--------------------------------------------------------------------------------
/pkg/provider/kubernetes/deployment_list.go:
--------------------------------------------------------------------------------
1 | package kubernetes
2 |
3 | import (
4 | "context"
5 | "github.com/sablierapp/sablier/pkg/sablier"
6 | v1 "k8s.io/api/apps/v1"
7 | corev1 "k8s.io/api/core/v1"
8 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
9 | )
10 |
11 | func (p *Provider) DeploymentList(ctx context.Context) ([]sablier.InstanceConfiguration, error) {
12 | labelSelector := metav1.LabelSelector{
13 | MatchLabels: map[string]string{
14 | "sablier.enable": "true",
15 | },
16 | }
17 | deployments, err := p.Client.AppsV1().Deployments(corev1.NamespaceAll).List(ctx, metav1.ListOptions{
18 | LabelSelector: metav1.FormatLabelSelector(&labelSelector),
19 | })
20 | if err != nil {
21 | return nil, err
22 | }
23 |
24 | instances := make([]sablier.InstanceConfiguration, 0, len(deployments.Items))
25 | for _, d := range deployments.Items {
26 | instance := p.deploymentToInstance(&d)
27 | instances = append(instances, instance)
28 | }
29 |
30 | return instances, nil
31 | }
32 |
33 | func (p *Provider) deploymentToInstance(d *v1.Deployment) sablier.InstanceConfiguration {
34 | var group string
35 |
36 | if _, ok := d.Labels["sablier.enable"]; ok {
37 | if g, ok := d.Labels["sablier.group"]; ok {
38 | group = g
39 | } else {
40 | group = "default"
41 | }
42 | }
43 |
44 | parsed := DeploymentName(d, ParseOptions{Delimiter: p.delimiter})
45 |
46 | return sablier.InstanceConfiguration{
47 | Name: parsed.Original,
48 | Group: group,
49 | }
50 | }
51 |
52 | func (p *Provider) DeploymentGroups(ctx context.Context) (map[string][]string, error) {
53 | labelSelector := metav1.LabelSelector{
54 | MatchLabels: map[string]string{
55 | "sablier.enable": "true",
56 | },
57 | }
58 | deployments, err := p.Client.AppsV1().Deployments(corev1.NamespaceAll).List(ctx, metav1.ListOptions{
59 | LabelSelector: metav1.FormatLabelSelector(&labelSelector),
60 | })
61 |
62 | if err != nil {
63 | return nil, err
64 | }
65 |
66 | groups := make(map[string][]string)
67 | for _, deployment := range deployments.Items {
68 | groupName := deployment.Labels["sablier.group"]
69 | if len(groupName) == 0 {
70 | groupName = "default"
71 | }
72 |
73 | group := groups[groupName]
74 | parsed := DeploymentName(&deployment, ParseOptions{Delimiter: p.delimiter})
75 | group = append(group, parsed.Original)
76 | groups[groupName] = group
77 | }
78 |
79 | return groups, nil
80 | }
81 |
--------------------------------------------------------------------------------
/pkg/provider/kubernetes/instance_events.go:
--------------------------------------------------------------------------------
1 | package kubernetes
2 |
3 | import "context"
4 |
5 | func (p *Provider) NotifyInstanceStopped(ctx context.Context, instance chan<- string) {
6 | informer := p.watchDeployents(instance)
7 | go informer.Run(ctx.Done())
8 | informer = p.watchStatefulSets(instance)
9 | go informer.Run(ctx.Done())
10 | }
11 |
--------------------------------------------------------------------------------
/pkg/provider/kubernetes/instance_inspect.go:
--------------------------------------------------------------------------------
1 | package kubernetes
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "github.com/sablierapp/sablier/pkg/sablier"
7 | )
8 |
9 | func (p *Provider) InstanceInspect(ctx context.Context, name string) (sablier.InstanceInfo, error) {
10 | parsed, err := ParseName(name, ParseOptions{Delimiter: p.delimiter})
11 | if err != nil {
12 | return sablier.InstanceInfo{}, err
13 | }
14 |
15 | switch parsed.Kind {
16 | case "deployment":
17 | return p.DeploymentInspect(ctx, parsed)
18 | case "statefulset":
19 | return p.StatefulSetInspect(ctx, parsed)
20 | default:
21 | return sablier.InstanceInfo{}, fmt.Errorf("unsupported kind \"%s\" must be one of \"deployment\", \"statefulset\"", parsed.Kind)
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/pkg/provider/kubernetes/instance_inspect_test.go:
--------------------------------------------------------------------------------
1 | package kubernetes_test
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "github.com/neilotoole/slogt"
7 | "github.com/sablierapp/sablier/pkg/config"
8 | "github.com/sablierapp/sablier/pkg/provider/kubernetes"
9 | "gotest.tools/v3/assert"
10 | "testing"
11 | )
12 |
13 | func TestKubernetesProvider_InstanceInspect(t *testing.T) {
14 | if testing.Short() {
15 | t.Skip("skipping test in short mode.")
16 | }
17 |
18 | ctx := context.Background()
19 | type args struct {
20 | name string
21 | }
22 | tests := []struct {
23 | name string
24 | args args
25 | want error
26 | }{
27 | {
28 | name: "invalid format name",
29 | args: args{
30 | name: "invalid-name-format",
31 | },
32 | want: fmt.Errorf("invalid name [invalid-name-format] should be: kind_namespace_name_replicas"),
33 | },
34 | {
35 | name: "unsupported resource name",
36 | args: args{
37 | name: "service_default_my-service_1",
38 | },
39 | want: fmt.Errorf("unsupported kind \"service\" must be one of \"deployment\", \"statefulset\""),
40 | },
41 | }
42 | c := setupKinD(t, ctx)
43 | for _, tt := range tests {
44 | t.Run(tt.name, func(t *testing.T) {
45 | t.Parallel()
46 | p, err := kubernetes.New(ctx, c.client, slogt.New(t), config.NewProviderConfig().Kubernetes)
47 |
48 | _, err = p.InstanceInspect(ctx, tt.args.name)
49 | assert.Error(t, err, tt.want.Error())
50 | })
51 | }
52 | }
53 |
--------------------------------------------------------------------------------
/pkg/provider/kubernetes/instance_list.go:
--------------------------------------------------------------------------------
1 | package kubernetes
2 |
3 | import (
4 | "context"
5 | "github.com/sablierapp/sablier/pkg/provider"
6 | "github.com/sablierapp/sablier/pkg/sablier"
7 | )
8 |
9 | func (p *Provider) InstanceList(ctx context.Context, options provider.InstanceListOptions) ([]sablier.InstanceConfiguration, error) {
10 | deployments, err := p.DeploymentList(ctx)
11 | if err != nil {
12 | return nil, err
13 | }
14 |
15 | statefulSets, err := p.StatefulSetList(ctx)
16 | if err != nil {
17 | return nil, err
18 | }
19 |
20 | return append(deployments, statefulSets...), nil
21 | }
22 |
23 | func (p *Provider) InstanceGroups(ctx context.Context) (map[string][]string, error) {
24 | deployments, err := p.DeploymentGroups(ctx)
25 | if err != nil {
26 | return nil, err
27 | }
28 |
29 | statefulSets, err := p.StatefulSetGroups(ctx)
30 | if err != nil {
31 | return nil, err
32 | }
33 |
34 | groups := make(map[string][]string)
35 | for group, instances := range deployments {
36 | groups[group] = instances
37 | }
38 |
39 | for group, instances := range statefulSets {
40 | groups[group] = append(groups[group], instances...)
41 | }
42 |
43 | return groups, nil
44 | }
45 |
--------------------------------------------------------------------------------
/pkg/provider/kubernetes/instance_start.go:
--------------------------------------------------------------------------------
1 | package kubernetes
2 |
3 | import "context"
4 |
5 | func (p *Provider) InstanceStart(ctx context.Context, name string) error {
6 | parsed, err := ParseName(name, ParseOptions{Delimiter: p.delimiter})
7 | if err != nil {
8 | return err
9 | }
10 |
11 | return p.scale(ctx, parsed, parsed.Replicas)
12 | }
13 |
--------------------------------------------------------------------------------
/pkg/provider/kubernetes/instance_stop.go:
--------------------------------------------------------------------------------
1 | package kubernetes
2 |
3 | import "context"
4 |
5 | func (p *Provider) InstanceStop(ctx context.Context, name string) error {
6 | parsed, err := ParseName(name, ParseOptions{Delimiter: p.delimiter})
7 | if err != nil {
8 | return err
9 | }
10 |
11 | return p.scale(ctx, parsed, 0)
12 | }
13 |
--------------------------------------------------------------------------------
/pkg/provider/kubernetes/kubernetes.go:
--------------------------------------------------------------------------------
1 | package kubernetes
2 |
3 | import (
4 | "context"
5 | providerConfig "github.com/sablierapp/sablier/pkg/config"
6 | "github.com/sablierapp/sablier/pkg/sablier"
7 | "log/slog"
8 |
9 | "k8s.io/client-go/kubernetes"
10 | )
11 |
12 | // Interface guard
13 | var _ sablier.Provider = (*Provider)(nil)
14 |
15 | type Provider struct {
16 | Client kubernetes.Interface
17 | delimiter string
18 | l *slog.Logger
19 | }
20 |
21 | func New(ctx context.Context, client *kubernetes.Clientset, logger *slog.Logger, config providerConfig.Kubernetes) (*Provider, error) {
22 | logger = logger.With(slog.String("provider", "kubernetes"))
23 |
24 | info, err := client.ServerVersion()
25 | if err != nil {
26 | return nil, err
27 | }
28 |
29 | logger.InfoContext(ctx, "connection established with kubernetes",
30 | slog.String("version", info.String()),
31 | slog.Float64("config.qps", float64(config.QPS)),
32 | slog.Int("config.burst", config.Burst),
33 | )
34 |
35 | return &Provider{
36 | Client: client,
37 | delimiter: config.Delimiter,
38 | l: logger,
39 | }, nil
40 |
41 | }
42 |
--------------------------------------------------------------------------------
/pkg/provider/kubernetes/parse_name.go:
--------------------------------------------------------------------------------
1 | package kubernetes
2 |
3 | import (
4 | "fmt"
5 | "strconv"
6 | "strings"
7 |
8 | v1 "k8s.io/api/apps/v1"
9 | )
10 |
11 | type ParsedName struct {
12 | Original string
13 | Kind string // deployment or statefulset
14 | Namespace string
15 | Name string
16 | Replicas int32
17 | }
18 |
19 | type ParseOptions struct {
20 | Delimiter string
21 | }
22 |
23 | func ParseName(name string, opts ParseOptions) (ParsedName, error) {
24 |
25 | split := strings.Split(name, opts.Delimiter)
26 | if len(split) != 4 {
27 | return ParsedName{}, fmt.Errorf("invalid name [%s] should be: kind%snamespace%sname%sreplicas", name, opts.Delimiter, opts.Delimiter, opts.Delimiter)
28 | }
29 |
30 | replicas, err := strconv.Atoi(split[3])
31 | if err != nil {
32 | return ParsedName{}, err
33 | }
34 |
35 | return ParsedName{
36 | Original: name,
37 | Kind: split[0],
38 | Namespace: split[1],
39 | Name: split[2],
40 | Replicas: int32(replicas),
41 | }, nil
42 | }
43 |
44 | func DeploymentName(deployment *v1.Deployment, opts ParseOptions) ParsedName {
45 | kind := "deployment"
46 | namespace := deployment.Namespace
47 | name := deployment.Name
48 | // TOOD: Use annotation for scale
49 | original := fmt.Sprintf("%s%s%s%s%s%s%d", kind, opts.Delimiter, namespace, opts.Delimiter, name, opts.Delimiter, 1)
50 |
51 | return ParsedName{
52 | Original: original,
53 | Kind: kind,
54 | Namespace: namespace,
55 | Name: name,
56 | Replicas: 1,
57 | }
58 | }
59 |
60 | func StatefulSetName(statefulSet *v1.StatefulSet, opts ParseOptions) ParsedName {
61 | kind := "statefulset"
62 | namespace := statefulSet.Namespace
63 | name := statefulSet.Name
64 | // TOOD: Use annotation for scale
65 | original := fmt.Sprintf("%s%s%s%s%s%s%d", kind, opts.Delimiter, namespace, opts.Delimiter, name, opts.Delimiter, 1)
66 |
67 | return ParsedName{
68 | Original: original,
69 | Kind: kind,
70 | Namespace: namespace,
71 | Name: name,
72 | Replicas: 1,
73 | }
74 | }
75 |
--------------------------------------------------------------------------------
/pkg/provider/kubernetes/statefulset_events.go:
--------------------------------------------------------------------------------
1 | package kubernetes
2 |
3 | import (
4 | appsv1 "k8s.io/api/apps/v1"
5 | core_v1 "k8s.io/api/core/v1"
6 | "k8s.io/client-go/informers"
7 | "k8s.io/client-go/tools/cache"
8 | "time"
9 | )
10 |
11 | func (p *Provider) watchStatefulSets(instance chan<- string) cache.SharedIndexInformer {
12 | handler := cache.ResourceEventHandlerFuncs{
13 | UpdateFunc: func(old, new interface{}) {
14 | newStatefulSet := new.(*appsv1.StatefulSet)
15 | oldStatefulSet := old.(*appsv1.StatefulSet)
16 |
17 | if newStatefulSet.ObjectMeta.ResourceVersion == oldStatefulSet.ObjectMeta.ResourceVersion {
18 | return
19 | }
20 |
21 | if *oldStatefulSet.Spec.Replicas == 0 {
22 | return
23 | }
24 |
25 | if *newStatefulSet.Spec.Replicas == 0 {
26 | parsed := StatefulSetName(newStatefulSet, ParseOptions{Delimiter: p.delimiter})
27 | instance <- parsed.Original
28 | }
29 | },
30 | DeleteFunc: func(obj interface{}) {
31 | deletedStatefulSet := obj.(*appsv1.StatefulSet)
32 | parsed := StatefulSetName(deletedStatefulSet, ParseOptions{Delimiter: p.delimiter})
33 | instance <- parsed.Original
34 | },
35 | }
36 | factory := informers.NewSharedInformerFactoryWithOptions(p.Client, 2*time.Second, informers.WithNamespace(core_v1.NamespaceAll))
37 | informer := factory.Apps().V1().StatefulSets().Informer()
38 |
39 | informer.AddEventHandler(handler)
40 | return informer
41 | }
42 |
--------------------------------------------------------------------------------
/pkg/provider/kubernetes/statefulset_inspect.go:
--------------------------------------------------------------------------------
1 | package kubernetes
2 |
3 | import (
4 | "context"
5 | "github.com/sablierapp/sablier/pkg/sablier"
6 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
7 | )
8 |
9 | func (p *Provider) StatefulSetInspect(ctx context.Context, config ParsedName) (sablier.InstanceInfo, error) {
10 | ss, err := p.Client.AppsV1().StatefulSets(config.Namespace).Get(ctx, config.Name, metav1.GetOptions{})
11 | if err != nil {
12 | return sablier.InstanceInfo{}, err
13 | }
14 |
15 | if *ss.Spec.Replicas != 0 && *ss.Spec.Replicas == ss.Status.ReadyReplicas {
16 | return sablier.ReadyInstanceState(config.Original, ss.Status.ReadyReplicas), nil
17 | }
18 |
19 | return sablier.NotReadyInstanceState(config.Original, ss.Status.ReadyReplicas, config.Replicas), nil
20 | }
21 |
--------------------------------------------------------------------------------
/pkg/provider/kubernetes/statefulset_list.go:
--------------------------------------------------------------------------------
1 | package kubernetes
2 |
3 | import (
4 | "context"
5 | "github.com/sablierapp/sablier/pkg/sablier"
6 | v1 "k8s.io/api/apps/v1"
7 | corev1 "k8s.io/api/core/v1"
8 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
9 | )
10 |
11 | func (p *Provider) StatefulSetList(ctx context.Context) ([]sablier.InstanceConfiguration, error) {
12 | labelSelector := metav1.LabelSelector{
13 | MatchLabels: map[string]string{
14 | "sablier.enable": "true",
15 | },
16 | }
17 | statefulSets, err := p.Client.AppsV1().StatefulSets(corev1.NamespaceAll).List(ctx, metav1.ListOptions{
18 | LabelSelector: metav1.FormatLabelSelector(&labelSelector),
19 | })
20 | if err != nil {
21 | return nil, err
22 | }
23 |
24 | instances := make([]sablier.InstanceConfiguration, 0, len(statefulSets.Items))
25 | for _, ss := range statefulSets.Items {
26 | instance := p.statefulSetToInstance(&ss)
27 | instances = append(instances, instance)
28 | }
29 |
30 | return instances, nil
31 | }
32 |
33 | func (p *Provider) statefulSetToInstance(ss *v1.StatefulSet) sablier.InstanceConfiguration {
34 | var group string
35 |
36 | if _, ok := ss.Labels["sablier.enable"]; ok {
37 | if g, ok := ss.Labels["sablier.group"]; ok {
38 | group = g
39 | } else {
40 | group = "default"
41 | }
42 | }
43 |
44 | parsed := StatefulSetName(ss, ParseOptions{Delimiter: p.delimiter})
45 |
46 | return sablier.InstanceConfiguration{
47 | Name: parsed.Original,
48 | Group: group,
49 | }
50 | }
51 |
52 | func (p *Provider) StatefulSetGroups(ctx context.Context) (map[string][]string, error) {
53 | labelSelector := metav1.LabelSelector{
54 | MatchLabels: map[string]string{
55 | "sablier.enable": "true",
56 | },
57 | }
58 | statefulSets, err := p.Client.AppsV1().StatefulSets(corev1.NamespaceAll).List(ctx, metav1.ListOptions{
59 | LabelSelector: metav1.FormatLabelSelector(&labelSelector),
60 | })
61 | if err != nil {
62 | return nil, err
63 | }
64 |
65 | groups := make(map[string][]string)
66 | for _, ss := range statefulSets.Items {
67 | groupName := ss.Labels["sablier.group"]
68 | if len(groupName) == 0 {
69 | groupName = "default"
70 | }
71 |
72 | group := groups[groupName]
73 | parsed := StatefulSetName(&ss, ParseOptions{Delimiter: p.delimiter})
74 | group = append(group, parsed.Original)
75 | groups[groupName] = group
76 | }
77 |
78 | return groups, nil
79 | }
80 |
--------------------------------------------------------------------------------
/pkg/provider/kubernetes/workload_scale.go:
--------------------------------------------------------------------------------
1 | package kubernetes
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | autoscalingv1 "k8s.io/api/autoscaling/v1"
7 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
8 | )
9 |
10 | type Workload interface {
11 | GetScale(ctx context.Context, workloadName string, options metav1.GetOptions) (*autoscalingv1.Scale, error)
12 | UpdateScale(ctx context.Context, workloadName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (*autoscalingv1.Scale, error)
13 | }
14 |
15 | func (p *Provider) scale(ctx context.Context, config ParsedName, replicas int32) error {
16 | var workload Workload
17 |
18 | switch config.Kind {
19 | case "deployment":
20 | workload = p.Client.AppsV1().Deployments(config.Namespace)
21 | case "statefulset":
22 | workload = p.Client.AppsV1().StatefulSets(config.Namespace)
23 | default:
24 | return fmt.Errorf("unsupported kind \"%s\" must be one of \"deployment\", \"statefulset\"", config.Kind)
25 | }
26 |
27 | s, err := workload.GetScale(ctx, config.Name, metav1.GetOptions{})
28 | if err != nil {
29 | return err
30 | }
31 |
32 | s.Spec.Replicas = replicas
33 | _, err = workload.UpdateScale(ctx, config.Name, s, metav1.UpdateOptions{})
34 |
35 | return err
36 | }
37 |
--------------------------------------------------------------------------------
/pkg/provider/types.go:
--------------------------------------------------------------------------------
1 | package provider
2 |
3 | type InstanceListOptions struct {
4 | All bool
5 | }
6 |
--------------------------------------------------------------------------------
/pkg/sablier/autostop.go:
--------------------------------------------------------------------------------
1 | package sablier
2 |
3 | import (
4 | "context"
5 | "errors"
6 | "github.com/sablierapp/sablier/pkg/provider"
7 | "github.com/sablierapp/sablier/pkg/store"
8 | "golang.org/x/sync/errgroup"
9 | "log/slog"
10 | )
11 |
12 | // StopAllUnregisteredInstances stops all auto-discovered running instances that are not yet registered
13 | // as running instances by Sablier.
14 | // By default, Sablier does not stop all already running instances. Meaning that you need to make an
15 | // initial request in order to trigger the scaling to zero.
16 | func (s *Sablier) StopAllUnregisteredInstances(ctx context.Context) error {
17 | instances, err := s.provider.InstanceList(ctx, provider.InstanceListOptions{
18 | All: false, // Only running instances
19 | })
20 | if err != nil {
21 | return err
22 | }
23 |
24 | unregistered := make([]string, 0)
25 | for _, instance := range instances {
26 | _, err = s.sessions.Get(ctx, instance.Name)
27 | if errors.Is(err, store.ErrKeyNotFound) {
28 | unregistered = append(unregistered, instance.Name)
29 | }
30 | }
31 |
32 | s.l.DebugContext(ctx, "found instances to stop", slog.Any("instances", unregistered))
33 |
34 | waitGroup := errgroup.Group{}
35 |
36 | for _, name := range unregistered {
37 | waitGroup.Go(s.stopFunc(ctx, name))
38 | }
39 |
40 | return waitGroup.Wait()
41 | }
42 |
43 | func (s *Sablier) stopFunc(ctx context.Context, name string) func() error {
44 | return func() error {
45 | err := s.provider.InstanceStop(ctx, name)
46 | if err != nil {
47 | s.l.ErrorContext(ctx, "failed to stop instance", slog.String("instance", name), slog.Any("error", err))
48 | return err
49 | }
50 | s.l.InfoContext(ctx, "stopped unregistered instance", slog.String("instance", name), slog.String("reason", "instance is enabled but not started by Sablier"))
51 | return nil
52 | }
53 | }
54 |
--------------------------------------------------------------------------------
/pkg/sablier/autostop_test.go:
--------------------------------------------------------------------------------
1 | package sablier_test
2 |
3 | import (
4 | "errors"
5 | "github.com/sablierapp/sablier/pkg/provider"
6 | "github.com/sablierapp/sablier/pkg/sablier"
7 | "github.com/sablierapp/sablier/pkg/store"
8 | "gotest.tools/v3/assert"
9 | "testing"
10 | )
11 |
12 | func TestStopAllUnregisteredInstances(t *testing.T) {
13 | s, sessions, p := setupSablier(t)
14 |
15 | ctx := t.Context()
16 |
17 | // Define instances and registered instances
18 | instances := []sablier.InstanceConfiguration{
19 | {Name: "instance1"},
20 | {Name: "instance2"},
21 | }
22 |
23 | sessions.EXPECT().Get(ctx, "instance1").Return(sablier.InstanceInfo{}, store.ErrKeyNotFound)
24 | sessions.EXPECT().Get(ctx, "instance2").Return(sablier.InstanceInfo{
25 | Name: "instance2",
26 | Status: sablier.InstanceStatusReady,
27 | }, nil)
28 |
29 | // Set up expectations for InstanceList
30 | p.EXPECT().InstanceList(ctx, provider.InstanceListOptions{
31 | All: false,
32 | }).Return(instances, nil)
33 |
34 | // Set up expectations for InstanceStop
35 | p.EXPECT().InstanceStop(ctx, "instance1").Return(nil)
36 |
37 | // Call the function under test
38 | err := s.StopAllUnregisteredInstances(ctx)
39 | assert.NilError(t, err)
40 | }
41 |
42 | func TestStopAllUnregisteredInstances_WithError(t *testing.T) {
43 | s, sessions, p := setupSablier(t)
44 | ctx := t.Context()
45 |
46 | // Define instances and registered instances
47 | instances := []sablier.InstanceConfiguration{
48 | {Name: "instance1"},
49 | {Name: "instance2"},
50 | }
51 |
52 | sessions.EXPECT().Get(ctx, "instance1").Return(sablier.InstanceInfo{}, store.ErrKeyNotFound)
53 | sessions.EXPECT().Get(ctx, "instance2").Return(sablier.InstanceInfo{
54 | Name: "instance2",
55 | Status: sablier.InstanceStatusReady,
56 | }, nil)
57 |
58 | // Set up expectations for InstanceList
59 | p.EXPECT().InstanceList(ctx, provider.InstanceListOptions{
60 | All: false,
61 | }).Return(instances, nil)
62 |
63 | // Set up expectations for InstanceStop with error
64 | p.EXPECT().InstanceStop(ctx, "instance1").Return(errors.New("stop error"))
65 |
66 | // Call the function under test
67 | err := s.StopAllUnregisteredInstances(ctx)
68 | assert.Error(t, err, "stop error")
69 | }
70 |
--------------------------------------------------------------------------------
/pkg/sablier/errors.go:
--------------------------------------------------------------------------------
1 | package sablier
2 |
3 | import (
4 | "fmt"
5 | "time"
6 | )
7 |
8 | type ErrGroupNotFound struct {
9 | Group string
10 | AvailableGroups []string
11 | }
12 |
13 | func (g ErrGroupNotFound) Error() string {
14 | return fmt.Sprintf("group %s not found", g.Group)
15 | }
16 |
17 | type ErrRequestBinding struct {
18 | Err error
19 | }
20 |
21 | func (e ErrRequestBinding) Error() string {
22 | return e.Err.Error()
23 | }
24 |
25 | type ErrTimeout struct {
26 | Duration time.Duration
27 | }
28 |
29 | func (e ErrTimeout) Error() string {
30 | return fmt.Sprintf("timeout after %s", e.Duration)
31 | }
32 |
--------------------------------------------------------------------------------
/pkg/sablier/group_watch.go:
--------------------------------------------------------------------------------
1 | package sablier
2 |
3 | import (
4 | "context"
5 | "log/slog"
6 | "time"
7 | )
8 |
9 | func (s *Sablier) GroupWatch(ctx context.Context) {
10 | // This should be changed to event based instead of polling.
11 | ticker := time.NewTicker(2 * time.Second)
12 | for {
13 | select {
14 | case <-ctx.Done():
15 | s.l.InfoContext(ctx, "stop watching groups", slog.Any("reason", ctx.Err()))
16 | return
17 | case <-ticker.C:
18 | groups, err := s.provider.InstanceGroups(ctx)
19 | if err != nil {
20 | s.l.ErrorContext(ctx, "cannot retrieve group from provider", slog.Any("reason", err))
21 | } else if groups != nil {
22 | s.SetGroups(groups)
23 | }
24 | }
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/pkg/sablier/instance.go:
--------------------------------------------------------------------------------
1 | package sablier
2 |
3 | type InstanceStatus string
4 |
5 | const (
6 | InstanceStatusReady = "ready"
7 | InstanceStatusNotReady = "not-ready"
8 | InstanceStatusUnrecoverable = "unrecoverable"
9 | )
10 |
11 | type InstanceInfo struct {
12 | Name string `json:"name"`
13 | CurrentReplicas int32 `json:"currentReplicas"`
14 | DesiredReplicas int32 `json:"desiredReplicas"`
15 | Status InstanceStatus `json:"status"`
16 | Message string `json:"message,omitempty"`
17 | }
18 |
19 | type InstanceConfiguration struct {
20 | Name string
21 | Group string
22 | }
23 |
24 | func (instance InstanceInfo) IsReady() bool {
25 | return instance.Status == InstanceStatusReady
26 | }
27 |
28 | func UnrecoverableInstanceState(name string, message string, desiredReplicas int32) InstanceInfo {
29 | return InstanceInfo{
30 | Name: name,
31 | CurrentReplicas: 0,
32 | DesiredReplicas: desiredReplicas,
33 | Status: InstanceStatusUnrecoverable,
34 | Message: message,
35 | }
36 | }
37 |
38 | func ReadyInstanceState(name string, replicas int32) InstanceInfo {
39 | return InstanceInfo{
40 | Name: name,
41 | CurrentReplicas: replicas,
42 | DesiredReplicas: replicas,
43 | Status: InstanceStatusReady,
44 | }
45 | }
46 |
47 | func NotReadyInstanceState(name string, currentReplicas int32, desiredReplicas int32) InstanceInfo {
48 | return InstanceInfo{
49 | Name: name,
50 | CurrentReplicas: currentReplicas,
51 | DesiredReplicas: desiredReplicas,
52 | Status: InstanceStatusNotReady,
53 | }
54 | }
55 |
--------------------------------------------------------------------------------
/pkg/sablier/instance_expired.go:
--------------------------------------------------------------------------------
1 | package sablier
2 |
3 | import (
4 | "context"
5 | "log/slog"
6 | )
7 |
8 | func OnInstanceExpired(ctx context.Context, provider Provider, logger *slog.Logger) func(string) {
9 | return func(_key string) {
10 | go func(key string) {
11 | logger.InfoContext(ctx, "instance expired", slog.String("instance", key))
12 | err := provider.InstanceStop(ctx, key)
13 | if err != nil {
14 | logger.ErrorContext(ctx, "instance expired could not be stopped from provider", slog.String("instance", key), slog.Any("error", err))
15 | }
16 | }(_key)
17 | }
18 | }
19 |
--------------------------------------------------------------------------------
/pkg/sablier/instance_request.go:
--------------------------------------------------------------------------------
1 | package sablier
2 |
3 | import (
4 | "context"
5 | "errors"
6 | "fmt"
7 | "github.com/sablierapp/sablier/pkg/store"
8 | "log/slog"
9 | "time"
10 | )
11 |
12 | func (s *Sablier) InstanceRequest(ctx context.Context, name string, duration time.Duration) (InstanceInfo, error) {
13 | if name == "" {
14 | return InstanceInfo{}, errors.New("instance name cannot be empty")
15 | }
16 |
17 | state, err := s.sessions.Get(ctx, name)
18 | if errors.Is(err, store.ErrKeyNotFound) {
19 | s.l.DebugContext(ctx, "request to start instance received", slog.String("instance", name))
20 |
21 | err = s.provider.InstanceStart(ctx, name)
22 | if err != nil {
23 | return InstanceInfo{}, err
24 | }
25 |
26 | state, err = s.provider.InstanceInspect(ctx, name)
27 | if err != nil {
28 | return InstanceInfo{}, err
29 | }
30 | s.l.DebugContext(ctx, "request to start instance status completed", slog.String("instance", name), slog.String("status", string(state.Status)))
31 | } else if err != nil {
32 | s.l.ErrorContext(ctx, "request to start instance failed", slog.String("instance", name), slog.Any("error", err))
33 | return InstanceInfo{}, fmt.Errorf("cannot retrieve instance from store: %w", err)
34 | } else if state.Status != InstanceStatusReady {
35 | s.l.DebugContext(ctx, "request to check instance status received", slog.String("instance", name), slog.String("current_status", string(state.Status)))
36 | state, err = s.provider.InstanceInspect(ctx, name)
37 | if err != nil {
38 | return InstanceInfo{}, err
39 | }
40 | s.l.DebugContext(ctx, "request to check instance status completed", slog.String("instance", name), slog.String("new_status", string(state.Status)))
41 | }
42 |
43 | s.l.DebugContext(ctx, "set expiration for instance", slog.String("instance", name), slog.Duration("expiration", duration))
44 |
45 | err = s.sessions.Put(ctx, state, duration)
46 | if err != nil {
47 | s.l.Error("could not put instance to store, will not expire", slog.Any("error", err), slog.String("instance", state.Name))
48 | return InstanceInfo{}, fmt.Errorf("could not put instance to store: %w", err)
49 | }
50 | return state, nil
51 | }
52 |
--------------------------------------------------------------------------------
/pkg/sablier/provider.go:
--------------------------------------------------------------------------------
1 | package sablier
2 |
3 | import (
4 | "context"
5 | "github.com/sablierapp/sablier/pkg/provider"
6 | )
7 |
8 | //go:generate go tool mockgen -package providertest -source=provider.go -destination=../provider/providertest/mock_provider.go *
9 |
10 | type Provider interface {
11 | InstanceStart(ctx context.Context, name string) error
12 | InstanceStop(ctx context.Context, name string) error
13 | InstanceInspect(ctx context.Context, name string) (InstanceInfo, error)
14 | InstanceGroups(ctx context.Context) (map[string][]string, error)
15 | InstanceList(ctx context.Context, options provider.InstanceListOptions) ([]InstanceConfiguration, error)
16 |
17 | NotifyInstanceStopped(ctx context.Context, instance chan<- string)
18 | }
19 |
--------------------------------------------------------------------------------
/pkg/sablier/sablier.go:
--------------------------------------------------------------------------------
1 | package sablier
2 |
3 | import (
4 | "context"
5 | "github.com/google/go-cmp/cmp"
6 | "log/slog"
7 | "sync"
8 | "time"
9 | )
10 |
11 | type Sablier struct {
12 | provider Provider
13 | sessions Store
14 |
15 | groupsMu sync.RWMutex
16 | groups map[string][]string
17 |
18 | // BlockingRefreshFrequency is the frequency at which the instances are checked
19 | // against the provider. Defaults to 5 seconds.
20 | BlockingRefreshFrequency time.Duration
21 |
22 | l *slog.Logger
23 | }
24 |
25 | func New(logger *slog.Logger, store Store, provider Provider) *Sablier {
26 | return &Sablier{
27 | provider: provider,
28 | sessions: store,
29 | groupsMu: sync.RWMutex{},
30 | groups: map[string][]string{},
31 | l: logger,
32 | BlockingRefreshFrequency: 5 * time.Second,
33 | }
34 | }
35 |
36 | func (s *Sablier) SetGroups(groups map[string][]string) {
37 | s.groupsMu.Lock()
38 | defer s.groupsMu.Unlock()
39 | if groups == nil {
40 | groups = map[string][]string{}
41 | }
42 | if diff := cmp.Diff(s.groups, groups); diff != "" {
43 | // TODO: Change this log for a friendly logging, groups rarely change, so we can put some effort on displaying what changed
44 | s.l.Info("set groups", slog.Any("old", s.groups), slog.Any("new", groups), slog.Any("diff", diff))
45 | s.groups = groups
46 | }
47 | }
48 |
49 | func (s *Sablier) RemoveInstance(ctx context.Context, name string) error {
50 | return s.sessions.Delete(ctx, name)
51 | }
52 |
--------------------------------------------------------------------------------
/pkg/sablier/sablier_test.go:
--------------------------------------------------------------------------------
1 | package sablier_test
2 |
3 | import (
4 | "github.com/neilotoole/slogt"
5 | "github.com/sablierapp/sablier/pkg/provider/providertest"
6 | "github.com/sablierapp/sablier/pkg/sablier"
7 | "github.com/sablierapp/sablier/pkg/store/storetest"
8 | "go.uber.org/mock/gomock"
9 | "testing"
10 | )
11 |
12 | func setupSablier(t *testing.T) (*sablier.Sablier, *storetest.MockStore, *providertest.MockProvider) {
13 | t.Helper()
14 | ctrl := gomock.NewController(t)
15 |
16 | p := providertest.NewMockProvider(ctrl)
17 | s := storetest.NewMockStore(ctrl)
18 |
19 | m := sablier.New(slogt.New(t), s, p)
20 | return m, s, p
21 | }
22 |
--------------------------------------------------------------------------------
/pkg/sablier/session.go:
--------------------------------------------------------------------------------
1 | package sablier
2 |
3 | import (
4 | "encoding/json"
5 | "maps"
6 | )
7 |
8 | type SessionState struct {
9 | Instances map[string]InstanceInfoWithError `json:"instances"`
10 | }
11 |
12 | func (s *SessionState) IsReady() bool {
13 | if s.Instances == nil {
14 | s.Instances = map[string]InstanceInfoWithError{}
15 | }
16 |
17 | for _, v := range s.Instances {
18 | if v.Error != nil || v.Instance.Status != InstanceStatusReady {
19 | return false
20 | }
21 | }
22 |
23 | return true
24 | }
25 |
26 | func (s *SessionState) Status() string {
27 | if s.IsReady() {
28 | return "ready"
29 | }
30 |
31 | return "not-ready"
32 | }
33 |
34 | func (s *SessionState) MarshalJSON() ([]byte, error) {
35 | instances := maps.Values(s.Instances)
36 |
37 | return json.Marshal(map[string]any{
38 | "instances": instances,
39 | "status": s.Status(),
40 | })
41 | }
42 |
--------------------------------------------------------------------------------
/pkg/sablier/store.go:
--------------------------------------------------------------------------------
1 | package sablier
2 |
3 | import (
4 | "context"
5 | "time"
6 | )
7 |
8 | //go:generate go tool mockgen -package storetest -source=store.go -destination=../store/storetest/mocks_store.go *
9 |
10 | type Store interface {
11 | Get(context.Context, string) (InstanceInfo, error)
12 | Put(context.Context, InstanceInfo, time.Duration) error
13 | Delete(context.Context, string) error
14 | OnExpire(context.Context, func(string)) error
15 | }
16 |
--------------------------------------------------------------------------------
/pkg/store/inmemory/inmemory.go:
--------------------------------------------------------------------------------
1 | package inmemory
2 |
3 | import (
4 | "context"
5 | "encoding/json"
6 | "github.com/sablierapp/sablier/pkg/sablier"
7 | "github.com/sablierapp/sablier/pkg/store"
8 | "github.com/sablierapp/sablier/pkg/tinykv"
9 | "time"
10 | )
11 |
12 | var _ sablier.Store = (*InMemory)(nil)
13 | var _ json.Marshaler = (*InMemory)(nil)
14 | var _ json.Unmarshaler = (*InMemory)(nil)
15 |
16 | func NewInMemory() sablier.Store {
17 | return &InMemory{
18 | kv: tinykv.New[sablier.InstanceInfo](1*time.Second, nil),
19 | }
20 | }
21 |
22 | type InMemory struct {
23 | kv tinykv.KV[sablier.InstanceInfo]
24 | }
25 |
26 | func (i InMemory) UnmarshalJSON(bytes []byte) error {
27 | return i.kv.UnmarshalJSON(bytes)
28 | }
29 |
30 | func (i InMemory) MarshalJSON() ([]byte, error) {
31 | return i.kv.MarshalJSON()
32 | }
33 |
34 | func (i InMemory) Get(_ context.Context, s string) (sablier.InstanceInfo, error) {
35 | val, ok := i.kv.Get(s)
36 | if !ok {
37 | return sablier.InstanceInfo{}, store.ErrKeyNotFound
38 | }
39 | return val, nil
40 | }
41 |
42 | func (i InMemory) Put(_ context.Context, state sablier.InstanceInfo, duration time.Duration) error {
43 | return i.kv.Put(state.Name, state, duration)
44 | }
45 |
46 | func (i InMemory) Delete(_ context.Context, s string) error {
47 | i.kv.Delete(s)
48 | return nil
49 | }
50 |
51 | func (i InMemory) OnExpire(_ context.Context, f func(string)) error {
52 | i.kv.SetOnExpire(func(k string, _ sablier.InstanceInfo) {
53 | f(k)
54 | })
55 | return nil
56 | }
57 |
--------------------------------------------------------------------------------
/pkg/store/inmemory/inmemory_test.go:
--------------------------------------------------------------------------------
1 | package inmemory
2 |
3 | import (
4 | "context"
5 | "github.com/sablierapp/sablier/pkg/sablier"
6 | "github.com/sablierapp/sablier/pkg/store"
7 | "gotest.tools/v3/assert"
8 | "testing"
9 | "time"
10 | )
11 |
12 | func TestInMemory(t *testing.T) {
13 | t.Parallel()
14 | t.Run("InMemoryErrNotFound", func(t *testing.T) {
15 | t.Parallel()
16 | ctx := context.Background()
17 | vk := NewInMemory()
18 |
19 | _, err := vk.Get(ctx, "test")
20 | assert.ErrorIs(t, err, store.ErrKeyNotFound)
21 | })
22 | t.Run("InMemoryPut", func(t *testing.T) {
23 | t.Parallel()
24 | ctx := context.Background()
25 | vk := NewInMemory()
26 |
27 | err := vk.Put(ctx, sablier.InstanceInfo{Name: "test"}, 1*time.Second)
28 | assert.NilError(t, err)
29 |
30 | i, err := vk.Get(ctx, "test")
31 | assert.NilError(t, err)
32 | assert.Equal(t, i.Name, "test")
33 |
34 | <-time.After(2 * time.Second)
35 | _, err = vk.Get(ctx, "test")
36 | assert.ErrorIs(t, err, store.ErrKeyNotFound)
37 | })
38 | t.Run("InMemoryDelete", func(t *testing.T) {
39 | t.Parallel()
40 | ctx := context.Background()
41 | vk := NewInMemory()
42 |
43 | err := vk.Put(ctx, sablier.InstanceInfo{Name: "test"}, 30*time.Second)
44 | assert.NilError(t, err)
45 |
46 | i, err := vk.Get(ctx, "test")
47 | assert.NilError(t, err)
48 | assert.Equal(t, i.Name, "test")
49 |
50 | err = vk.Delete(ctx, "test")
51 | assert.NilError(t, err)
52 |
53 | _, err = vk.Get(ctx, "test")
54 | assert.ErrorIs(t, err, store.ErrKeyNotFound)
55 | })
56 | t.Run("InMemoryOnExpire", func(t *testing.T) {
57 | t.Parallel()
58 | vk := NewInMemory()
59 |
60 | ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
61 | defer cancel()
62 |
63 | expirations := make(chan string)
64 | err := vk.OnExpire(ctx, func(key string) {
65 | expirations <- key
66 | })
67 | assert.NilError(t, err)
68 |
69 | err = vk.Put(ctx, sablier.InstanceInfo{Name: "test"}, 1*time.Second)
70 | assert.NilError(t, err)
71 | expired := <-expirations
72 | assert.Equal(t, expired, "test")
73 | })
74 | }
75 |
--------------------------------------------------------------------------------
/pkg/store/store.go:
--------------------------------------------------------------------------------
1 | package store
2 |
3 | import (
4 | "errors"
5 | )
6 |
7 | var ErrKeyNotFound = errors.New("key not found")
8 |
--------------------------------------------------------------------------------
/pkg/store/valkey/valkey.go:
--------------------------------------------------------------------------------
1 | package valkey
2 |
3 | import (
4 | "context"
5 | "encoding/json"
6 | "github.com/sablierapp/sablier/pkg/sablier"
7 | "github.com/sablierapp/sablier/pkg/store"
8 | "github.com/valkey-io/valkey-go"
9 | "log/slog"
10 | "strings"
11 | "time"
12 | )
13 |
14 | var _ sablier.Store = (*ValKey)(nil)
15 |
16 | type ValKey struct {
17 | Client valkey.Client
18 | }
19 |
20 | func New(ctx context.Context, client valkey.Client) (sablier.Store, error) {
21 | err := client.Do(ctx, client.B().Ping().Build()).Error()
22 | if err != nil {
23 | return nil, err
24 | }
25 |
26 | err = client.Do(ctx, client.B().ConfigSet().ParameterValue().
27 | ParameterValue("notify-keyspace-events", "KEx").
28 | Build()).Error()
29 | if err != nil {
30 | return nil, err
31 | }
32 |
33 | return &ValKey{Client: client}, nil
34 | }
35 |
36 | func (v *ValKey) Get(ctx context.Context, s string) (sablier.InstanceInfo, error) {
37 | b, err := v.Client.Do(ctx, v.Client.B().Get().Key(s).Build()).AsBytes()
38 | if valkey.IsValkeyNil(err) {
39 | return sablier.InstanceInfo{}, store.ErrKeyNotFound
40 | }
41 | if err != nil {
42 | return sablier.InstanceInfo{}, err
43 | }
44 |
45 | var i sablier.InstanceInfo
46 | err = json.Unmarshal(b, &i)
47 | if err != nil {
48 | return sablier.InstanceInfo{}, err
49 | }
50 |
51 | return i, nil
52 | }
53 |
54 | func (v *ValKey) Put(ctx context.Context, state sablier.InstanceInfo, duration time.Duration) error {
55 | value, err := json.Marshal(state)
56 | if err != nil {
57 | return err
58 | }
59 |
60 | return v.Client.Do(ctx, v.Client.B().Set().Key(state.Name).Value(string(value)).Ex(duration).Build()).Error()
61 | }
62 |
63 | func (v *ValKey) Delete(ctx context.Context, s string) error {
64 | return v.Client.Do(ctx, v.Client.B().Del().Key(s).Build()).Error()
65 | }
66 |
67 | func (v *ValKey) OnExpire(ctx context.Context, f func(string)) error {
68 | go func() {
69 | err := v.Client.Receive(ctx, v.Client.B().Psubscribe().Pattern("__key*__:*").Build(), func(msg valkey.PubSubMessage) {
70 | if msg.Message == "expired" {
71 | split := strings.Split(msg.Channel, ":")
72 | key := split[len(split)-1]
73 | f(key)
74 | }
75 | })
76 | if err != nil {
77 | slog.Error("error subscribing", slog.Any("error", err))
78 | }
79 | }()
80 | return nil
81 | }
82 |
--------------------------------------------------------------------------------
/pkg/theme/errors.go:
--------------------------------------------------------------------------------
1 | package theme
2 |
3 | import (
4 | "fmt"
5 | )
6 |
7 | type ErrThemeNotFound struct {
8 | Theme string
9 | AvailableThemes []string
10 | }
11 |
12 | func (t ErrThemeNotFound) Error() string {
13 | return fmt.Sprintf("theme %s not found", t.Theme)
14 | }
15 |
--------------------------------------------------------------------------------
/pkg/theme/list.go:
--------------------------------------------------------------------------------
1 | package theme
2 |
3 | import "strings"
4 |
5 | // List all the loaded themes
6 | func (t *Themes) List() []string {
7 | themes := make([]string, 0)
8 |
9 | for _, template := range t.themes.Templates() {
10 | if strings.HasSuffix(template.Name(), ".html") {
11 | themes = append(themes, strings.TrimSuffix(template.Name(), ".html"))
12 | }
13 | }
14 |
15 | return themes
16 | }
17 |
--------------------------------------------------------------------------------
/pkg/theme/list_test.go:
--------------------------------------------------------------------------------
1 | package theme_test
2 |
3 | import (
4 | "github.com/neilotoole/slogt"
5 | "github.com/sablierapp/sablier/pkg/theme"
6 | "testing"
7 | "testing/fstest"
8 |
9 | "github.com/stretchr/testify/assert"
10 | )
11 |
12 | func TestList(t *testing.T) {
13 | themes, err := theme.NewWithCustomThemes(
14 | fstest.MapFS{
15 | "theme1.html": &fstest.MapFile{},
16 | "inner/theme2.html": &fstest.MapFile{},
17 | }, slogt.New(t))
18 | if err != nil {
19 | t.Error(err)
20 | return
21 | }
22 |
23 | list := themes.List()
24 |
25 | assert.ElementsMatch(t, []string{"theme1", "theme2", "ghost", "hacker-terminal", "matrix", "shuffle"}, list)
26 | }
27 |
--------------------------------------------------------------------------------
/pkg/theme/parse.go:
--------------------------------------------------------------------------------
1 | package theme
2 |
3 | import (
4 | "io/fs"
5 | "log/slog"
6 | "strings"
7 | )
8 |
9 | func (t *Themes) ParseTemplatesFS(f fs.FS) error {
10 | err := fs.WalkDir(f, ".", func(path string, d fs.DirEntry, err error) error {
11 | if strings.Contains(path, ".html") {
12 | t.l.Info("theme found", slog.String("path", path))
13 | _, err = t.themes.ParseFS(f, path)
14 | if err != nil {
15 | t.l.Info("cannot add theme", slog.String("path", path), slog.Any("reason", err))
16 | return err
17 | }
18 |
19 | t.l.Info("successfully added theme", slog.String("path", path))
20 | }
21 | return err
22 | })
23 |
24 | return err
25 | }
26 |
--------------------------------------------------------------------------------
/pkg/theme/render.go:
--------------------------------------------------------------------------------
1 | package theme
2 |
3 | import (
4 | "fmt"
5 | "github.com/sablierapp/sablier/pkg/version"
6 | "io"
7 |
8 | "github.com/sablierapp/sablier/pkg/durations"
9 | )
10 |
11 | func (t *Themes) Render(name string, opts Options, writer io.Writer) error {
12 | var instances []Instance
13 |
14 | if opts.ShowDetails {
15 | instances = opts.InstanceStates
16 | } else {
17 | instances = []Instance{}
18 | }
19 |
20 | options := templateOptions{
21 | DisplayName: opts.DisplayName,
22 | InstanceStates: instances,
23 | SessionDuration: durations.Humanize(opts.SessionDuration),
24 | RefreshFrequency: fmt.Sprintf("%d", int64(opts.RefreshFrequency.Seconds())),
25 | Version: version.Version,
26 | }
27 |
28 | tpl := t.themes.Lookup(fmt.Sprintf("%s.html", name))
29 | if tpl == nil {
30 | return ErrThemeNotFound{
31 | Theme: name,
32 | AvailableThemes: t.List(),
33 | }
34 | }
35 |
36 | return tpl.Execute(writer, options)
37 | }
38 |
--------------------------------------------------------------------------------
/pkg/theme/theme.go:
--------------------------------------------------------------------------------
1 | package theme
2 |
3 | import (
4 | "embed"
5 | "html/template"
6 | "io/fs"
7 | "log/slog"
8 | )
9 |
10 | // List of built-it themes
11 | //
12 | //go:embed embedded/*.html
13 | var embeddedThemesFS embed.FS
14 |
15 | type Themes struct {
16 | themes *template.Template
17 | l *slog.Logger
18 | }
19 |
20 | func New(logger *slog.Logger) (*Themes, error) {
21 | themes := &Themes{
22 | themes: template.New("root"),
23 | l: logger,
24 | }
25 |
26 | err := themes.ParseTemplatesFS(embeddedThemesFS)
27 | if err != nil {
28 | // Should never happen
29 | logger.Error("could not parse embedded templates", slog.Any("reason", err))
30 | return nil, err
31 | }
32 |
33 | return themes, nil
34 | }
35 |
36 | func NewWithCustomThemes(custom fs.FS, logger *slog.Logger) (*Themes, error) {
37 | themes := &Themes{
38 | themes: template.New("root"),
39 | l: logger,
40 | }
41 |
42 | err := themes.ParseTemplatesFS(embeddedThemesFS)
43 | if err != nil {
44 | // Should never happen
45 | logger.Error("could not parse embedded templates", slog.Any("reason", err))
46 | return nil, err
47 | }
48 |
49 | err = themes.ParseTemplatesFS(custom)
50 | if err != nil {
51 | logger.Error("could not parse custom templates", slog.Any("reason", err))
52 | return nil, err
53 | }
54 |
55 | return themes, nil
56 | }
57 |
--------------------------------------------------------------------------------
/pkg/theme/types.go:
--------------------------------------------------------------------------------
1 | package theme
2 |
3 | import "time"
4 |
5 | // Theme represents an available theme
6 | type Theme struct {
7 | Name string
8 | Embedded bool
9 | }
10 |
11 | // Instance holds the current state about an instance
12 | type Instance struct {
13 | Name string
14 | Status string
15 | Error error
16 | CurrentReplicas int32
17 | DesiredReplicas int32
18 | }
19 |
20 | // Options holds the customizable input to template
21 | type Options struct {
22 | DisplayName string
23 | ShowDetails bool
24 | InstanceStates []Instance
25 | SessionDuration time.Duration
26 | RefreshFrequency time.Duration
27 | }
28 |
29 | // templateOptions holds the internal options used to template
30 | type templateOptions struct {
31 | DisplayName string
32 | InstanceStates []Instance
33 | SessionDuration string
34 | RefreshFrequency string
35 | Version string
36 | }
37 |
--------------------------------------------------------------------------------
/pkg/tinykv/retry.go:
--------------------------------------------------------------------------------
1 | package tinykv
2 |
3 | import (
4 | "github.com/pkg/errors"
5 | )
6 |
7 | // Try tries to run a function and recovers from a panic, in case
8 | // one happens, and returns the error, if there are any.
9 | func try(f func() error) (errRun error) {
10 | defer func() {
11 | if e := recover(); e != nil {
12 | if err, ok := e.(error); ok {
13 | errRun = err
14 | return
15 | }
16 | errRun = errors.Errorf("RECOVERED, UNKNOWN ERROR: %+v", e)
17 | }
18 | }()
19 | return f()
20 | }
21 |
--------------------------------------------------------------------------------
/pkg/version/info.go:
--------------------------------------------------------------------------------
1 | package version
2 |
3 | import (
4 | "bytes"
5 | "fmt"
6 | "runtime"
7 | "strings"
8 | "text/template"
9 | )
10 |
11 | // Build information. Populated at build-time.
12 | var (
13 | Version string
14 | Revision string
15 | Branch string
16 | BuildUser string
17 | BuildDate string
18 | GoVersion = runtime.Version()
19 | )
20 |
21 | // versionInfoTmpl contains the template used by Info.
22 | var versionInfoTmpl = `
23 | {{.program}}, version {{.version}} (branch: {{.branch}}, revision: {{.revision}})
24 | build user: {{.buildUser}}
25 | build date: {{.buildDate}}
26 | go version: {{.goVersion}}
27 | platform: {{.platform}}
28 | `
29 |
30 | // Print returns version information.
31 | func Print(program string) string {
32 | m := map[string]string{
33 | "program": program,
34 | "version": Version,
35 | "revision": Revision,
36 | "branch": Branch,
37 | "buildUser": BuildUser,
38 | "buildDate": BuildDate,
39 | "goVersion": GoVersion,
40 | "platform": runtime.GOOS + "/" + runtime.GOARCH,
41 | }
42 | t := template.Must(template.New("version").Parse(versionInfoTmpl))
43 |
44 | var buf bytes.Buffer
45 | if err := t.ExecuteTemplate(&buf, "version", m); err != nil {
46 | panic(err)
47 | }
48 | return strings.TrimSpace(buf.String())
49 | }
50 |
51 | // Info returns version, branch and revision information.
52 | func Info() string {
53 | return fmt.Sprintf("(version=%s, branch=%s, revision=%s)", Version, Branch, Revision)
54 | }
55 |
56 | // BuildContext returns goVersion, buildUser and buildDate information.
57 | func BuildContext() string {
58 | return fmt.Sprintf("(go=%s, user=%s, date=%s)", GoVersion, BuildUser, BuildDate)
59 | }
60 |
61 | func Map() map[string]string {
62 | return map[string]string{
63 | "program": "sablier",
64 | "version": Version,
65 | "revision": Revision,
66 | "branch": Branch,
67 | "buildUser": BuildUser,
68 | "buildDate": BuildDate,
69 | "goVersion": GoVersion,
70 | "platform": runtime.GOOS + "/" + runtime.GOARCH,
71 | }
72 | }
73 |
--------------------------------------------------------------------------------
/plugins/caddy/.gitignore:
--------------------------------------------------------------------------------
1 | # Binaries for programs and plugins
2 | *.exe
3 | *.exe~
4 | *.dll
5 | *.so
6 | *.dylib
7 |
8 | # Test binary, built with `go test -c`
9 | *.test
10 |
11 | # Output of the go coverage tool, specifically when used with LiteIDE
12 | *.out
13 |
14 | # Dependency directories (remove the comment below to include it)
15 | # vendor/
16 | traefik
--------------------------------------------------------------------------------
/plugins/caddy/Dockerfile:
--------------------------------------------------------------------------------
1 | ARG CADDY_VERSION=2.9.1
2 | FROM caddy:${CADDY_VERSION}-builder AS builder
3 |
4 | COPY . .
5 |
6 | RUN xcaddy build \
7 | --with github.com/sablierapp/sablier/plugins/caddy=.
8 |
9 | FROM caddy:${CADDY_VERSION}
10 |
11 | COPY --from=builder /usr/bin/caddy /usr/bin/caddy
--------------------------------------------------------------------------------
/plugins/caddy/e2e/docker/Caddyfile:
--------------------------------------------------------------------------------
1 | :80 {
2 | route /dynamic/whoami {
3 | sablier http://sablier:10000 {
4 | names docker_classic_e2e-whoami-1
5 | session_duration 1m
6 | dynamic {
7 | display_name Dynamic Whoami
8 | theme hacker-terminal
9 | }
10 | }
11 | reverse_proxy whoami:80
12 | }
13 |
14 | route /blocking/whoami {
15 | sablier http://sablier:10000 {
16 | names docker_classic_e2e-whoami-1
17 | session_duration 1m
18 | blocking {
19 | timeout 30s
20 | }
21 | }
22 | reverse_proxy whoami:80
23 | }
24 |
25 | route /multiple/whoami {
26 | sablier http://sablier:10000 {
27 | names docker_classic_e2e-whoami-1 docker_classic_e2e-nginx-1
28 | session_duration 1m
29 | dynamic {
30 | display_name Multiple Whoami
31 | theme=hacker-terminal
32 | }
33 | }
34 | reverse_proxy whoami:80
35 | }
36 |
37 | route /multiple/nginx {
38 | sablier http://sablier:10000 {
39 | names docker_classic_e2e-whoami-1 docker_classic_e2e-nginx-1
40 | session_duration 1m
41 | dynamic {
42 | display_name Multiple Whoami
43 | theme=hacker-terminal
44 | }
45 | }
46 | reverse_proxy nginx:80
47 | }
48 |
49 | route /healthy/nginx {
50 | sablier http://sablier:10000 {
51 | names docker_classic_e2e-nginx-1
52 | session_duration 1m
53 | dynamic {
54 | display_name Healthy Nginx
55 | theme hacker-terminal
56 | }
57 | }
58 | reverse_proxy nginx:80
59 | }
60 |
61 | route /group {
62 | sablier http://sablier:10000 {
63 | group E2E
64 | session_duration 1m
65 | dynamic {
66 | display_name Group E2E
67 | theme hacker-terminal
68 | }
69 | }
70 | reverse_proxy whoami:80
71 | }
72 | }
73 |
--------------------------------------------------------------------------------
/plugins/caddy/e2e/docker/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: "3.7"
2 |
3 | services:
4 | proxy:
5 | image: caddy:local
6 | ports:
7 | - "8080:80"
8 | volumes:
9 | - ./Caddyfile:/etc/caddy/Caddyfile:ro
10 | restart: "no"
11 |
12 | sablier:
13 | image: sablierapp/sablier:local
14 | command:
15 | - start
16 | - --provider.name=docker
17 | - --logging.level=trace
18 | volumes:
19 | - '/var/run/docker.sock:/var/run/docker.sock'
20 |
21 | whoami:
22 | image: acouvreur/whoami:v1.10.2
23 | healthcheck:
24 | test: [ "CMD", "curl", "-f", "http://localhost" ]
25 | interval: 5s
26 | labels:
27 | - sablier.enable=true
28 | - sablier.group=E2E
29 |
30 | nginx:
31 | image: nginx:1.27.1
32 | healthcheck:
33 | test: ["CMD", "curl", "-f", "http://localhost"]
34 | interval: 5s
35 | labels:
36 | - sablier.enable=true
37 | - sablier.group=E2E
--------------------------------------------------------------------------------
/plugins/caddy/e2e/docker/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | DOCKER_COMPOSE_FILE=docker-compose.yml
4 | DOCKER_COMPOSE_PROJECT_NAME=docker_classic_e2e
5 |
6 | errors=0
7 |
8 | echo "Using Docker version:"
9 | docker version
10 |
11 | prepare_docker_classic() {
12 | docker compose -f $DOCKER_COMPOSE_FILE -p $DOCKER_COMPOSE_PROJECT_NAME up -d
13 | docker compose -f $DOCKER_COMPOSE_FILE -p $DOCKER_COMPOSE_PROJECT_NAME stop whoami nginx
14 | }
15 |
16 | destroy_docker_classic() {
17 | docker compose -f $DOCKER_COMPOSE_FILE -p $DOCKER_COMPOSE_PROJECT_NAME down --remove-orphans || true
18 | }
19 |
20 | run_docker_classic_test() {
21 | echo "Running Docker Classic Test: $1"
22 | prepare_docker_classic
23 | sleep 2
24 | go clean -testcache
25 | if ! go test -count=1 -tags e2e -timeout 30s -run ^${1}$ github.com/sablierapp/sablier/e2e; then
26 | errors=1
27 | docker compose -f ${DOCKER_COMPOSE_FILE} -p ${DOCKER_COMPOSE_PROJECT_NAME} logs sablier proxy
28 | fi
29 | destroy_docker_classic
30 | }
31 |
32 | trap destroy_docker_classic EXIT
33 |
34 | run_docker_classic_test Test_Dynamic
35 | run_docker_classic_test Test_Blocking
36 | run_docker_classic_test Test_Multiple
37 | run_docker_classic_test Test_Healthy
38 | run_docker_classic_test Test_Group
39 |
40 | exit $errors
--------------------------------------------------------------------------------
/plugins/caddy/e2e/docker_swarm/Caddyfile:
--------------------------------------------------------------------------------
1 | :80 {
2 | route /dynamic/whoami {
3 | sablier http://tasks.sablier:10000 {
4 | names DOCKER_SWARM_E2E_whoami
5 | session_duration 1m
6 | dynamic {
7 | display_name Dynamic Whoami
8 | theme hacker-terminal
9 | }
10 | }
11 | reverse_proxy whoami:80
12 | }
13 |
14 | route /blocking/whoami {
15 | sablier http://tasks.sablier:10000 {
16 | names DOCKER_SWARM_E2E_whoami
17 | session_duration 1m
18 | blocking {
19 | timeout 30s
20 | }
21 | }
22 | reverse_proxy whoami:80
23 | }
24 |
25 | route /multiple/whoami {
26 | sablier http://tasks.sablier:10000 {
27 | names DOCKER_SWARM_E2E_whoami DOCKER_SWARM_E2E_nginx
28 | session_duration 1m
29 | dynamic {
30 | display_name Multiple Whoami
31 | theme=hacker-terminal
32 | }
33 | }
34 | reverse_proxy whoami:80
35 | }
36 |
37 | route /multiple/nginx {
38 | sablier http://tasks.sablier:10000 {
39 | names DOCKER_SWARM_E2E_whoami DOCKER_SWARM_E2E_nginx
40 | session_duration 1m
41 | dynamic {
42 | display_name Multiple Whoami
43 | theme=hacker-terminal
44 | }
45 | }
46 | reverse_proxy nginx:80
47 | }
48 |
49 | route /healthy/nginx {
50 | sablier http://tasks.sablier:10000 {
51 | names DOCKER_SWARM_E2E_nginx
52 | session_duration 1m
53 | dynamic {
54 | display_name Healthy Nginx
55 | theme hacker-terminal
56 | }
57 | }
58 | reverse_proxy nginx:80
59 | }
60 |
61 | route /group {
62 | sablier http://sablier:10000 {
63 | group E2E
64 | session_duration 1m
65 | dynamic {
66 | display_name Group E2E
67 | theme hacker-terminal
68 | }
69 | }
70 | reverse_proxy whoami:80
71 | }
72 | }
73 |
--------------------------------------------------------------------------------
/plugins/caddy/e2e/docker_swarm/docker-stack.yml:
--------------------------------------------------------------------------------
1 | version: "3.7"
2 |
3 | services:
4 | proxy:
5 | image: caddy:local
6 | ports:
7 | - target: 80
8 | published: 8080
9 | protocol: tcp
10 | mode: host # Won't work in github actions otherwise
11 | volumes:
12 | - ./Caddyfile:/etc/caddy/Caddyfile:ro
13 | deploy:
14 | restart_policy:
15 | condition: none # Do not restart on setup failure
16 |
17 | sablier:
18 | image: sablierapp/sablier:local
19 | command:
20 | - start
21 | - --provider.name=swarm
22 | - --logging.level=trace
23 | volumes:
24 | - '/var/run/docker.sock:/var/run/docker.sock'
25 |
26 | whoami:
27 | image: acouvreur/whoami:v1.10.2
28 | healthcheck:
29 | test: [ "CMD", "curl", "-f", "http://localhost" ]
30 | interval: 5s
31 | deploy:
32 | labels:
33 | - sablier.enable=true
34 | - sablier.group=E2E
35 | replicas: 0
36 |
37 | nginx:
38 | image: nginx:1.23.1
39 | healthcheck:
40 | test: ["CMD", "curl", "-f", "http://localhost"]
41 | interval: 5s
42 | deploy:
43 | labels:
44 | - sablier.enable=true
45 | - sablier.group=E2E
46 | replicas: 0
--------------------------------------------------------------------------------
/plugins/caddy/e2e/docker_swarm/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | DOCKER_STACK_FILE=docker-stack.yml
4 | DOCKER_STACK_NAME=DOCKER_SWARM_E2E
5 |
6 | errors=0
7 |
8 | echo "Using Docker version:"
9 | docker version
10 |
11 | prepare_docker_swarm() {
12 | docker swarm init
13 | }
14 |
15 | prepare_docker_stack() {
16 | docker stack deploy --compose-file $DOCKER_STACK_FILE ${DOCKER_STACK_NAME}
17 | docker run --rm -it -v /var/run/docker.sock:/var/run/docker.sock sudobmitch/docker-stack-wait -t 60 ${DOCKER_STACK_NAME}
18 | }
19 |
20 | destroy_docker_stack() {
21 | docker stack rm ${DOCKER_STACK_NAME}
22 | # Sometimes, the network is not well cleaned up, see https://github.com/moby/moby/issues/30942#issuecomment-540699206
23 | until [ -z "$(docker stack ps ${DOCKER_STACK_NAME} -q)" ]; do sleep 1; done
24 | }
25 |
26 | destroy_docker_swarm() {
27 | docker swarm leave -f || true
28 | }
29 |
30 | run_docker_swarm_test() {
31 | echo "Running Docker Swarm Test: $1"
32 | prepare_docker_stack
33 | sleep 10
34 | go clean -testcache
35 | if ! go test -count=1 -tags e2e -timeout 30s -run ^${1}$ github.com/sablierapp/sablier/e2e; then
36 | errors=1
37 | docker service logs ${DOCKER_STACK_NAME}_sablier
38 | docker service logs ${DOCKER_STACK_NAME}_proxy
39 | fi
40 | destroy_docker_stack
41 | }
42 |
43 | trap destroy_docker_swarm EXIT
44 |
45 | prepare_docker_swarm
46 | prepare_docker_stack
47 | run_docker_swarm_test Test_Dynamic
48 | run_docker_swarm_test Test_Blocking
49 | run_docker_swarm_test Test_Multiple
50 | run_docker_swarm_test Test_Healthy
51 | run_docker_swarm_test Test_Group
52 |
53 | exit $errors
--------------------------------------------------------------------------------
/plugins/caddy/e2e/kubernetes/Caddyfile:
--------------------------------------------------------------------------------
1 | :80 {
2 | route /dynamic/whoami {
3 | sablier url=http://tasks.sablier:10000 names=e2e-whoami-1 session_duration=1m dynamic.display_name=Dynamic-Whoami dynamic.theme=hacker-terminal
4 | reverse_proxy whoami:80
5 | }
6 |
7 | route /blocking/whoami {
8 | sablier url=http://tasks.sablier:10000 names=e2e-whoami-1 session_duration=1m blocking.timeout=30s
9 | reverse_proxy whoami:80
10 | }
11 |
12 | route /multiple {
13 | sablier url=http://tasks.sablier:10000 names=e2e-whoami-1,e2e-nginx-1 session_duration=1m dynamic.display_name=Multiple-Whoami dynamic.theme=hacker-terminal
14 | reverse_proxy /multiple/whoami whoami:80
15 | reverse_proxy /multiple/nginx nginx:80
16 | }
17 |
18 | route /healthy/nginx {
19 | sablier url=http://tasks.sablier:10000 names=e2e-nginx-1 session_duration=1m dynamic.display_name=Healthy-Nginx dynamic.theme=hacker-terminal
20 | reverse_proxy nginx:80
21 | }
22 |
23 | route /group {
24 | sablier url=http://tasks.sablier:10000 {
25 | group E2E
26 | session_duration 1m
27 | dynamic {
28 | display_name Group E2E
29 | theme hacker-terminal
30 | }
31 | }
32 | reverse_proxy whoami:80
33 | }
34 | }
35 |
--------------------------------------------------------------------------------
/plugins/caddy/e2e/kubernetes/docker-kubernetes.yml:
--------------------------------------------------------------------------------
1 | version: '3'
2 | services:
3 | server:
4 | image: "rancher/k3s:v1.23.12-k3s1"
5 | command: server --no-deploy traefik
6 | tmpfs:
7 | - /run
8 | - /var/run
9 | ulimits:
10 | nproc: 65535
11 | nofile:
12 | soft: 65535
13 | hard: 65535
14 | privileged: true
15 | restart: always
16 | environment:
17 | - K3S_KUBECONFIG_OUTPUT=/output/kubeconfig.yaml
18 | - K3S_KUBECONFIG_MODE=666
19 | volumes:
20 | # This is just so that we get the kubeconfig file out
21 | - .:/output
22 | - '../../..:/plugins-local/src/github.com/sablierapp/sablier'
23 | ports:
24 | - 6443:6443 # Kubernetes API Server
25 | - 8080:80 # Ingress controller port 80
26 |
--------------------------------------------------------------------------------
/plugins/caddy/e2e/kubernetes/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | export DOCKER_COMPOSE_FILE=docker-kubernetes.yml
4 | export DOCKER_COMPOSE_PROJECT_NAME=kubernetes_e2e
5 |
6 | errors=0
7 |
8 | export KUBECONFIG=./kubeconfig.yaml
9 |
10 | echo "Using Docker version:"
11 | docker version
12 |
13 | prepare_kubernetes() {
14 | docker compose -f $DOCKER_COMPOSE_FILE -p $DOCKER_COMPOSE_PROJECT_NAME up -d
15 | until kubectl get nodes | grep " Ready "; do sleep 1; done
16 | echo "Loading sablierapp/sablier:local into k3s..."
17 | docker save sablierapp/sablier:local | docker exec -i ${DOCKER_COMPOSE_PROJECT_NAME}-server-1 ctr images import -
18 | echo "Loading succeeded."
19 | }
20 |
21 | destroy_kubernetes() {
22 | docker compose -f $DOCKER_COMPOSE_FILE -p $DOCKER_COMPOSE_PROJECT_NAME down --volumes
23 | }
24 |
25 | prepare_deployment() {
26 | kubectl apply -f ./manifests/sablier.yml
27 | kubectl apply -f ./manifests/deployment.yml
28 | }
29 |
30 | destroy_deployment() {
31 | kubectl delete -f ./manifests/deployment.yml
32 | kubectl delete -f ./manifests/sablier.yml
33 | }
34 |
35 | prepare_stateful_set() {
36 | kubectl apply -f ./manifests/statefulset.yml
37 | }
38 |
39 | destroy_stateful_set() {
40 | kubectl delete -f ./manifests/statefulset.yml
41 | }
42 |
43 | run_kubernetes_deployment_test() {
44 | echo "---- Running Kubernetes Test: $1 ----"
45 | prepare_deployment
46 | sleep 10
47 | go clean -testcache
48 | if ! go test -count=1 -tags e2e -timeout 30s -run ^${1}$ github.com/sablierapp/sablier/e2e; then
49 | errors=1
50 | kubectl -n kube-system logs deployments/sablier-deployment
51 | # kubectl -n kube-system logs deployments/caddy
52 | fi
53 |
54 | destroy_deployment
55 | }
56 |
57 | trap destroy_kubernetes EXIT
58 |
59 | prepare_kubernetes
60 | prepare_caddy # TODO: Implement this, will fail for now
61 | # run_kubernetes_deployment_test Test_Dynamic
62 | # run_kubernetes_deployment_test Test_Blocking # Blocking is not yet supported
63 | # run_kubernetes_deployment_test Test_Multiple
64 | # run_kubernetes_deployment_test Test_Healthy
65 |
66 | exit $errors
--------------------------------------------------------------------------------
/plugins/caddy/e2e/kubernetes/values.yaml:
--------------------------------------------------------------------------------
1 | # traefik helm values
2 | image:
3 | tag: "2.9.1"
4 |
5 | additionalArguments:
6 | - "--experimental.localPlugins.sablier.moduleName=github.com/sablierapp/sablier"
7 |
8 | providers:
9 | kubernetesIngress:
10 | allowEmptyServices: true
11 | kubernetesCRD:
12 | allowEmptyServices: true
13 |
14 | additionalVolumeMounts:
15 | - name: local-sablier-plugin
16 | mountPath: /plugins-local/src/github.com/sablierapp/sablier
17 |
18 | deployment:
19 | additionalVolumes:
20 | - name: local-sablier-plugin
21 | hostPath:
22 | # directory location on host
23 | path: /plugins-local/src/github.com/sablierapp/sablier
24 | # this field is optional
25 | type: Directory
--------------------------------------------------------------------------------
/plugins/caddy/main.go:
--------------------------------------------------------------------------------
1 | package caddy
2 |
3 | import (
4 | "context"
5 | "io"
6 | "net/http"
7 |
8 | "github.com/caddyserver/caddy/v2"
9 | "github.com/caddyserver/caddy/v2/modules/caddyhttp"
10 | )
11 |
12 | func init() {
13 | caddy.RegisterModule(SablierMiddleware{})
14 | }
15 |
16 | type SablierMiddleware struct {
17 | Config Config
18 | client *http.Client
19 | request *http.Request
20 | }
21 |
22 | // CaddyModule returns the Caddy module information.
23 | func (SablierMiddleware) CaddyModule() caddy.ModuleInfo {
24 | return caddy.ModuleInfo{
25 | ID: "http.handlers.sablier",
26 | New: func() caddy.Module { return new(SablierMiddleware) },
27 | }
28 | }
29 |
30 | // Provision implements caddy.Provisioner.
31 | func (m *SablierMiddleware) Provision(ctx caddy.Context) error {
32 | req, err := m.Config.BuildRequest()
33 |
34 | if err != nil {
35 | return err
36 | }
37 |
38 | m.request = req
39 | m.client = &http.Client{}
40 |
41 | return nil
42 | }
43 |
44 | // ServeHTTP implements caddyhttp.MiddlewareHandler.
45 | func (sm SablierMiddleware) ServeHTTP(rw http.ResponseWriter, req *http.Request, next caddyhttp.Handler) error {
46 | sablierRequest := sm.request.Clone(context.TODO())
47 |
48 | resp, err := sm.client.Do(sablierRequest)
49 | if err != nil {
50 | http.Error(rw, err.Error(), http.StatusInternalServerError)
51 | return nil
52 | }
53 | defer resp.Body.Close()
54 |
55 | if resp.Header.Get("X-Sablier-Session-Status") == "ready" {
56 | next.ServeHTTP(rw, req)
57 | } else {
58 | forward(resp, rw)
59 | }
60 | return nil
61 | }
62 |
63 | func forward(resp *http.Response, rw http.ResponseWriter) {
64 | rw.Header().Set("Content-Type", resp.Header.Get("Content-Type"))
65 | rw.Header().Set("Content-Length", resp.Header.Get("Content-Length"))
66 | io.Copy(rw, resp.Body)
67 | }
68 |
69 | // Interface guards
70 | var (
71 | _ caddy.Provisioner = (*SablierMiddleware)(nil)
72 | _ caddyhttp.MiddlewareHandler = (*SablierMiddleware)(nil)
73 | )
74 |
--------------------------------------------------------------------------------
/plugins/caddy/remote.Dockerfile:
--------------------------------------------------------------------------------
1 | ARG CADDY_VERSION=2.8.4
2 | FROM caddy:${CADDY_VERSION}-builder AS builder
3 |
4 | RUN xcaddy build \
5 | --with github.com/sablierapp/sablier/plugins/caddy
6 |
7 | FROM caddy:${CADDY_VERSION}
8 |
9 | COPY --from=builder /usr/bin/caddy /usr/bin/caddy
--------------------------------------------------------------------------------
/plugins/nginx/e2e/docker/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3.9'
2 |
3 | services:
4 | proxy:
5 | image: nginx:1.27.1
6 | ports:
7 | - 8080:80
8 | volumes:
9 | # Used to load js module
10 | - ../nginx.conf:/etc/nginx/nginx.conf
11 | - ../../njs/sablier.js:/etc/nginx/conf.d/sablier.js
12 | - ./nginx.conf:/etc/nginx/conf.d/default.conf
13 |
14 | sablier:
15 | image: sablierapp/sablier:local
16 | ports:
17 | - 10000:10000
18 | command:
19 | - start
20 | - --provider.name=docker
21 | - --logging.level=trace
22 | volumes:
23 | - '/var/run/docker.sock:/var/run/docker.sock'
24 |
25 | whoami:
26 | image: acouvreur/whoami:v1.10.2
27 | healthcheck:
28 | test: [ "CMD", "curl", "-f", "http://localhost" ]
29 | interval: 5s
30 | labels:
31 | - sablier.enable=true
32 | - sablier.group=E2E
33 |
34 | nginx:
35 | image: nginx:1.27.1
36 | healthcheck:
37 | test: ["CMD", "curl", "-f", "http://localhost"]
38 | interval: 5s
39 | labels:
40 | - sablier.enable=true
41 | - sablier.group=E2E
--------------------------------------------------------------------------------
/plugins/nginx/e2e/docker/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | DOCKER_COMPOSE_FILE=docker-compose.yml
4 | DOCKER_COMPOSE_PROJECT_NAME=docker_classic_e2e
5 |
6 | errors=0
7 |
8 | echo "Using Docker version:"
9 | docker version
10 |
11 | prepare_docker_classic() {
12 | docker compose -f $DOCKER_COMPOSE_FILE -p $DOCKER_COMPOSE_PROJECT_NAME up -d
13 | }
14 |
15 | destroy_docker_classic() {
16 | docker compose -f $DOCKER_COMPOSE_FILE -p $DOCKER_COMPOSE_PROJECT_NAME down --remove-orphans || true
17 | }
18 |
19 | run_docker_classic_test() {
20 | echo "Running Docker Classic Test: $1"
21 | prepare_docker_classic
22 | sleep 2
23 | go clean -testcache
24 | if ! go test -count=1 -tags e2e -timeout 30s -run ^${1}$ github.com/sablierapp/sablier/e2e; then
25 | errors=1
26 | docker compose -f ${DOCKER_COMPOSE_FILE} -p ${DOCKER_COMPOSE_PROJECT_NAME} logs sablier proxy
27 | fi
28 | destroy_docker_classic
29 | }
30 |
31 | trap destroy_docker_classic EXIT
32 |
33 | run_docker_classic_test Test_Dynamic
34 | run_docker_classic_test Test_Blocking
35 | run_docker_classic_test Test_Multiple
36 | run_docker_classic_test Test_Healthy
37 | run_docker_classic_test Test_Group
38 |
39 | exit $errors
--------------------------------------------------------------------------------
/plugins/nginx/e2e/docker_swarm/docker-stack.yml:
--------------------------------------------------------------------------------
1 | version: '3.9'
2 |
3 | services:
4 | proxy:
5 | image: nginx:1.23.1
6 | ports:
7 | - target: 80
8 | published: 8080
9 | protocol: tcp
10 | mode: host # Won't work in github actions otherwise
11 | volumes:
12 | # Used to load js module
13 | - ../nginx.conf:/etc/nginx/nginx.conf
14 | - ../../njs/sablier.js:/etc/nginx/conf.d/sablier.js
15 | - ./nginx.conf:/etc/nginx/conf.d/default.conf
16 |
17 | sablier:
18 | image: sablierapp/sablier:local
19 | ports:
20 | - 10000:10000
21 | command:
22 | - start
23 | - --provider.name=swarm
24 | - --logging.level=trace
25 | volumes:
26 | - '/var/run/docker.sock:/var/run/docker.sock'
27 |
28 | whoami:
29 | image: acouvreur/whoami:v1.10.2
30 | healthcheck:
31 | test: [ "CMD", "curl", "-f", "http://localhost" ]
32 | interval: 5s
33 | deploy:
34 | labels:
35 | - sablier.enable=true
36 | - sablier.group=E2E
37 | replicas: 0
38 |
39 | nginx:
40 | image: nginx:1.23.1
41 | healthcheck:
42 | test: ["CMD", "curl", "-f", "http://localhost"]
43 | interval: 5s
44 | deploy:
45 | labels:
46 | - sablier.enable=true
47 | - sablier.group=E2E
48 | replicas: 0
--------------------------------------------------------------------------------
/plugins/nginx/e2e/docker_swarm/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | DOCKER_STACK_FILE=docker-stack.yml
4 | DOCKER_STACK_NAME=DOCKER_SWARM_E2E
5 |
6 | errors=0
7 |
8 | echo "Using Docker version:"
9 | docker version
10 |
11 | prepare_docker_swarm() {
12 | docker swarm init
13 | }
14 |
15 | prepare_docker_stack() {
16 | docker stack deploy --compose-file $DOCKER_STACK_FILE ${DOCKER_STACK_NAME}
17 | docker run --rm -it -v /var/run/docker.sock:/var/run/docker.sock sudobmitch/docker-stack-wait -t 60 ${DOCKER_STACK_NAME}
18 | }
19 |
20 | destroy_docker_stack() {
21 | docker stack rm ${DOCKER_STACK_NAME}
22 | # Sometimes, the network is not well cleaned up, see https://github.com/moby/moby/issues/30942#issuecomment-540699206
23 | until [ -z "$(docker stack ps ${DOCKER_STACK_NAME} -q)" ]; do sleep 1; done
24 | }
25 |
26 | destroy_docker_swarm() {
27 | docker swarm leave -f || true
28 | }
29 |
30 | run_docker_swarm_test() {
31 | echo "Running Docker Swarm Test: $1"
32 | prepare_docker_stack
33 | sleep 10
34 | go clean -testcache
35 | if ! go test -count=1 -tags e2e -timeout 30s -run ^${1}$ github.com/sablierapp/sablier/e2e; then
36 | errors=1
37 | docker service logs ${DOCKER_STACK_NAME}_sablier
38 | docker service logs ${DOCKER_STACK_NAME}_proxy
39 | fi
40 | destroy_docker_stack
41 | }
42 |
43 | trap destroy_docker_swarm EXIT
44 |
45 | prepare_docker_swarm
46 | run_docker_swarm_test Test_Dynamic
47 | run_docker_swarm_test Test_Blocking
48 | run_docker_swarm_test Test_Multiple
49 | run_docker_swarm_test Test_Healthy
50 | run_docker_swarm_test Test_Group
51 |
52 | exit $errors
--------------------------------------------------------------------------------
/plugins/nginx/e2e/kubernetes/docker-kubernetes.yml:
--------------------------------------------------------------------------------
1 | version: '3'
2 | services:
3 | server:
4 | image: "rancher/k3s:v1.23.12-k3s1"
5 | command: server --no-deploy traefik
6 | tmpfs:
7 | - /run
8 | - /var/run
9 | ulimits:
10 | nproc: 65535
11 | nofile:
12 | soft: 65535
13 | hard: 65535
14 | privileged: true
15 | restart: always
16 | environment:
17 | - K3S_KUBECONFIG_OUTPUT=/output/kubeconfig.yaml
18 | - K3S_KUBECONFIG_MODE=666
19 | volumes:
20 | # This is just so that we get the kubeconfig file out
21 | - .:/output
22 | - '../../..:/plugins-local/src/github.com/sablierapp/sablier'
23 | ports:
24 | - 6443:6443 # Kubernetes API Server
25 | - 8080:80 # Ingress controller port 80
26 |
--------------------------------------------------------------------------------
/plugins/nginx/e2e/kubernetes/manifests/sablier.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: sablier-deployment
5 | namespace: kube-system
6 | labels:
7 | app: sablier
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | app: sablier
13 | template:
14 | metadata:
15 | labels:
16 | app: sablier
17 | spec:
18 | serviceAccountName: sablier
19 | containers:
20 | - name: sablier
21 | image: sablierapp/sablier:local
22 | args: ["start", "--provider.name=kubernetes", "--logging.level=trace"]
23 | ports:
24 | - containerPort: 10000
25 | ---
26 | apiVersion: v1
27 | kind: Service
28 | metadata:
29 | name: sablier
30 | namespace: kube-system
31 | spec:
32 | selector:
33 | app: sablier
34 | ports:
35 | - protocol: TCP
36 | port: 10000
37 | targetPort: 10000
38 | ---
39 | apiVersion: v1
40 | kind: ServiceAccount
41 | metadata:
42 | name: sablier
43 | namespace: kube-system
44 | ---
45 | apiVersion: rbac.authorization.k8s.io/v1
46 | kind: ClusterRole
47 | metadata:
48 | name: sablier
49 | namespace: kube-system
50 | rules:
51 | - apiGroups:
52 | - apps
53 | - ""
54 | resources:
55 | - deployments
56 | - deployments/scale
57 | - statefulsets
58 | - statefulsets/scale
59 | verbs:
60 | - patch # Scale up and down
61 | - get # Retrieve info about specific dep
62 | - update # Scale up and down
63 | - list # Events
64 | - watch # Events
65 | ---
66 | apiVersion: rbac.authorization.k8s.io/v1
67 | kind: ClusterRoleBinding
68 | metadata:
69 | name: sablier
70 | namespace: kube-system
71 | roleRef:
72 | apiGroup: rbac.authorization.k8s.io
73 | kind: ClusterRole
74 | name: sablier
75 | subjects:
76 | - kind: ServiceAccount
77 | name: sablier
78 | namespace: kube-system
--------------------------------------------------------------------------------
/plugins/nginx/e2e/kubernetes/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | export DOCKER_COMPOSE_FILE=docker-kubernetes.yml
4 | export DOCKER_COMPOSE_PROJECT_NAME=kubernetes_e2e
5 |
6 | errors=0
7 |
8 | export KUBECONFIG=./kubeconfig.yaml
9 |
10 | echo "Using Docker version:"
11 | docker version
12 |
13 | prepare_kubernetes() {
14 | docker compose -f $DOCKER_COMPOSE_FILE -p $DOCKER_COMPOSE_PROJECT_NAME up -d
15 | until kubectl get nodes | grep " Ready "; do sleep 1; done
16 | echo "Loading sablierapp/sablier:local into k3s..."
17 | docker save sablierapp/sablier:local | docker exec -i ${DOCKER_COMPOSE_PROJECT_NAME}-server-1 ctr images import -
18 | echo "Loading succeeded."
19 | }
20 |
21 | destroy_kubernetes() {
22 | docker compose -f $DOCKER_COMPOSE_FILE -p $DOCKER_COMPOSE_PROJECT_NAME down --volumes
23 | }
24 |
25 | prepare_deployment() {
26 | kubectl apply -f ./manifests/sablier.yml
27 | kubectl apply -f ./manifests/deployment.yml
28 | }
29 |
30 | destroy_deployment() {
31 | kubectl delete -f ./manifests/deployment.yml
32 | kubectl delete -f ./manifests/sablier.yml
33 | }
34 |
35 | prepare_stateful_set() {
36 | kubectl apply -f ./manifests/statefulset.yml
37 | }
38 |
39 | destroy_stateful_set() {
40 | kubectl delete -f ./manifests/statefulset.yml
41 | }
42 |
43 | run_kubernetes_deployment_test() {
44 | echo "---- Running Kubernetes Test: $1 ----"
45 | prepare_deployment
46 | sleep 10
47 | go clean -testcache
48 | if ! go test -count=1 -tags e2e -timeout 30s -run ^${1}$ github.com/sablierapp/sablier/e2e; then
49 | errors=1
50 | kubectl -n kube-system logs deployments/sablier-deployment
51 | # kubectl -n kube-system logs deployments/nginx
52 | fi
53 |
54 | destroy_deployment
55 | }
56 |
57 | trap destroy_kubernetes EXIT
58 |
59 | prepare_kubernetes
60 | prepare_nginx # TODO: Implement this, will fail for now
61 | # run_kubernetes_deployment_test Test_Dynamic
62 | # run_kubernetes_deployment_test Test_Blocking # Blocking is not yet supported
63 | # run_kubernetes_deployment_test Test_Multiple
64 | # run_kubernetes_deployment_test Test_Healthy
65 |
66 | exit $errors
--------------------------------------------------------------------------------
/plugins/nginx/e2e/nginx.conf:
--------------------------------------------------------------------------------
1 | load_module modules/ngx_http_js_module.so;
2 |
3 | user nginx;
4 | worker_processes auto;
5 |
6 | error_log /var/log/nginx/error.log notice;
7 | pid /var/run/nginx.pid;
8 |
9 |
10 | events {
11 | worker_connections 1024;
12 | }
13 |
14 |
15 | http {
16 | include /etc/nginx/mime.types;
17 | default_type application/octet-stream;
18 |
19 | log_format main '$remote_addr - $remote_user [$time_local] "$request" '
20 | '$status $body_bytes_sent "$http_referer" '
21 | '"$http_user_agent" "$http_x_forwarded_for"';
22 |
23 | access_log /var/log/nginx/access.log main;
24 |
25 | sendfile on;
26 | #tcp_nopush on;
27 |
28 | keepalive_timeout 65;
29 |
30 | #gzip on;
31 |
32 | include /etc/nginx/conf.d/*.conf;
33 | }
--------------------------------------------------------------------------------
/plugins/proxywasm/BlockingConfiguration_json.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import jsoniter "github.com/json-iterator/tinygo"
4 |
5 | type BlockingConfiguration_json struct {
6 | }
7 | func (json BlockingConfiguration_json) Type() interface{} {
8 | var val BlockingConfiguration
9 | return val
10 | }
11 | func (json BlockingConfiguration_json) Unmarshal(iter *jsoniter.Iterator, out interface{}) {
12 | BlockingConfiguration_json_unmarshal(iter, out.(*BlockingConfiguration))
13 | }
14 | func (json BlockingConfiguration_json) Marshal(stream *jsoniter.Stream, val interface{}) {
15 | BlockingConfiguration_json_marshal(stream, val.(BlockingConfiguration))
16 | }
17 | func BlockingConfiguration_json_unmarshal(iter *jsoniter.Iterator, out *BlockingConfiguration) {
18 | more := iter.ReadObjectHead()
19 | for more {
20 | field := iter.ReadObjectField()
21 | if !BlockingConfiguration_json_unmarshal_field(iter, field, out) {
22 | iter.Skip()
23 | }
24 | more = iter.ReadObjectMore()
25 | }
26 | }
27 | func BlockingConfiguration_json_unmarshal_field(iter *jsoniter.Iterator, field string, out *BlockingConfiguration) bool {
28 | switch {
29 | case field == `timeout`:
30 | iter.ReadString(&(*out).Timeout)
31 | return true
32 | }
33 | return false
34 | }
35 | func BlockingConfiguration_json_marshal(stream *jsoniter.Stream, val BlockingConfiguration) {
36 | stream.WriteObjectHead()
37 | BlockingConfiguration_json_marshal_field(stream, val)
38 | stream.WriteObjectTail()
39 | }
40 | func BlockingConfiguration_json_marshal_field(stream *jsoniter.Stream, val BlockingConfiguration) {
41 | stream.WriteObjectField(`timeout`)
42 | stream.WriteString(val.Timeout)
43 | stream.WriteMore()
44 | }
45 |
--------------------------------------------------------------------------------
/plugins/proxywasm/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM golang:1.24 AS build
2 |
3 | WORKDIR /go/src/sablier/plugins/proxywasm
4 |
5 | COPY go.mod ./
6 | COPY go.sum ./
7 | RUN go mod download
8 |
9 | COPY . /go/src/sablier/plugins/proxywasm
10 |
11 | RUN make build
12 |
13 | FROM scratch
14 |
15 | COPY --from=build /go/src/sablier/plugins/proxywasm/sablierproxywasm.wasm ./plugin.wasm
--------------------------------------------------------------------------------
/plugins/proxywasm/Makefile:
--------------------------------------------------------------------------------
1 | build:
2 | go generate
3 | env GOOS=wasip1 GOARCH=wasm go build -buildmode=c-shared -o sablierproxywasm.wasm .
4 |
5 | docker:
6 | docker build -t sablierapp/sablier-proxy-wasm:latest .
--------------------------------------------------------------------------------
/plugins/proxywasm/README.md:
--------------------------------------------------------------------------------
1 | # Proxy Wasm Sablier Plugin
2 |
3 | See more at
4 | - https://github.com/proxy-wasm/spec
5 | - https://github.com/proxy-wasm/proxy-wasm-go-sdk
6 |
--------------------------------------------------------------------------------
/plugins/proxywasm/e2e/apacheapisix/README.md:
--------------------------------------------------------------------------------
1 | # Sablier ProxyWasm Plugin integration with Apache APISIX
2 |
3 |
--------------------------------------------------------------------------------
/plugins/proxywasm/e2e/apacheapisix/docker/apisix.yaml:
--------------------------------------------------------------------------------
1 | routes:
2 | - uri: "/dynamic/whoami"
3 | plugins:
4 | proxywasm_sablier_plugin:
5 | conf: '{ "sablier_url": "sablier:10000", "names": ["docker_classic_e2e-whoami-1"], "session_duration": "1m", "dynamic": { "display_name": "Dynamic Whoami", "theme": "hacker-terminal" } }'
6 | upstream:
7 | type: roundrobin
8 | nodes:
9 | "whoami:80": 1
10 |
11 | - uri: "/blocking/whoami"
12 | plugins:
13 | proxywasm_sablier_plugin:
14 | conf: '{ "sablier_url": "sablier:10000", "names": ["docker_classic_e2e-whoami-1"], "session_duration": "1m", "blocking": { "timeout": "30s" } }'
15 | upstream:
16 | type: roundrobin
17 | nodes:
18 | "whoami:80": 1
19 |
20 | - uri: "/multiple/whoami"
21 | plugins:
22 | proxywasm_sablier_plugin:
23 | conf: '{ "sablier_url": "sablier:10000", "names": ["docker_classic_e2e-whoami-1", "docker_classic_e2e-nginx-1"], "session_duration": "1m", "dynamic": { "display_name": "Multiple Whoami" } }'
24 | upstream:
25 | type: roundrobin
26 | nodes:
27 | "whoami:80": 1
28 |
29 | - uri: "/multiple/nginx"
30 | plugins:
31 | proxywasm_sablier_plugin:
32 | conf: '{ "sablier_url": "sablier:10000", "names": ["docker_classic_e2e-whoami-1", "docker_classic_e2e-nginx-1"], "session_duration": "1m", "dynamic": { "display_name": "Multiple Whoami" } }'
33 | upstream:
34 | type: roundrobin
35 | nodes:
36 | "nginx:80": 1
37 |
38 | - uri: "/healthy/nginx"
39 | plugins:
40 | proxywasm_sablier_plugin:
41 | conf: '{ "sablier_url": "sablier:10000", "names": ["docker_classic_e2e-nginx-1"], "session_duration": "1m", "dynamic": { "display_name": "Healthy Nginx" } }'
42 | upstream:
43 | type: roundrobin
44 | nodes:
45 | "nginx:80": 1
46 |
47 | - uri: "/group"
48 | plugins:
49 | proxywasm_sablier_plugin:
50 | conf: '{ "sablier_url": "sablier:10000", "group": "E2E", "session_duration": "1m", "dynamic": { "display_name": "Group E2E" } }'
51 | upstream:
52 | type: roundrobin
53 | nodes:
54 | "whoami:80": 1
55 | #END
--------------------------------------------------------------------------------
/plugins/proxywasm/e2e/apacheapisix/docker/compose.yaml:
--------------------------------------------------------------------------------
1 | services:
2 | apisix:
3 | image: apache/apisix:3.11.0-debian
4 | restart: always
5 | volumes:
6 | - ./config.yaml:/usr/local/apisix/conf/config.yaml:ro
7 | - ./apisix.yaml:/usr/local/apisix/conf/apisix.yaml:ro
8 | - ../../../sablierproxywasm.wasm:/wasm/sablierproxywasm.wasm
9 | ports:
10 | - "8080:9080/tcp"
11 |
12 | sablier:
13 | image: sablierapp/sablier:local
14 | command:
15 | - start
16 | - --provider.name=docker
17 | - --logging.level=trace
18 | volumes:
19 | - '/var/run/docker.sock:/var/run/docker.sock'
20 |
21 | whoami:
22 | image: acouvreur/whoami:v1.10.2
23 | healthcheck:
24 | test: [ "CMD", "curl", "-f", "http://localhost" ]
25 | interval: 5s
26 | labels:
27 | - sablier.enable=true
28 | - sablier.group=E2E
29 |
30 | nginx:
31 | image: nginx:1.27.1
32 | healthcheck:
33 | test: ["CMD", "curl", "-f", "http://localhost"]
34 | interval: 5s
35 | labels:
36 | - sablier.enable=true
37 | - sablier.group=E2E
--------------------------------------------------------------------------------
/plugins/proxywasm/e2e/apacheapisix/docker/config.yaml:
--------------------------------------------------------------------------------
1 | deployment:
2 | role: data_plane
3 | role_data_plane:
4 | config_provider: yaml
5 |
6 | wasm:
7 | plugins:
8 | - name: proxywasm_sablier_plugin
9 | priority: 7997
10 | file: /wasm/sablierproxywasm.wasm
11 |
12 |
--------------------------------------------------------------------------------
/plugins/proxywasm/e2e/apacheapisix/docker/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | DOCKER_COMPOSE_FILE=compose.yaml
4 | DOCKER_COMPOSE_PROJECT_NAME=docker_classic_e2e
5 |
6 | errors=0
7 |
8 | echo "Using Docker version:"
9 | docker version
10 |
11 | prepare_docker_classic() {
12 | docker compose -f $DOCKER_COMPOSE_FILE -p $DOCKER_COMPOSE_PROJECT_NAME up -d
13 | docker compose -f $DOCKER_COMPOSE_FILE -p $DOCKER_COMPOSE_PROJECT_NAME stop whoami nginx
14 | }
15 |
16 | destroy_docker_classic() {
17 | docker compose -f $DOCKER_COMPOSE_FILE -p $DOCKER_COMPOSE_PROJECT_NAME down --remove-orphans || true
18 | }
19 |
20 | run_docker_classic_test() {
21 | echo "Running Docker Classic Test: $1"
22 | prepare_docker_classic
23 | sleep 2
24 | go clean -testcache
25 | if ! go test -count=1 -tags e2e -timeout 30s -run ^${1}$ github.com/sablierapp/sablier/e2e; then
26 | errors=1
27 | docker compose -f ${DOCKER_COMPOSE_FILE} -p ${DOCKER_COMPOSE_PROJECT_NAME} logs sablier apisix
28 | fi
29 | destroy_docker_classic
30 | }
31 |
32 | trap destroy_docker_classic EXIT
33 |
34 | run_docker_classic_test Test_Dynamic
35 | run_docker_classic_test Test_Blocking
36 | run_docker_classic_test Test_Multiple
37 | run_docker_classic_test Test_Healthy
38 | run_docker_classic_test Test_Group
39 |
40 | exit $errors
--------------------------------------------------------------------------------
/plugins/proxywasm/e2e/envoy/docker/compose.yaml:
--------------------------------------------------------------------------------
1 | services:
2 | envoy:
3 | image: envoyproxy/envoy:v1.33-latest
4 | command: /usr/local/bin/envoy -c /etc/envoy.yaml
5 | volumes:
6 | - ./envoy.yaml:/etc/envoy.yaml
7 | - ../../../sablierproxywasm.wasm:/etc/sablierproxywasm.wasm
8 | ports:
9 | - "8080:8080"
10 |
11 | sablier:
12 | image: sablierapp/sablier:local
13 | command:
14 | - start
15 | - --provider.name=docker
16 | - --logging.level=trace
17 | volumes:
18 | - '/var/run/docker.sock:/var/run/docker.sock'
19 |
20 | whoami:
21 | image: acouvreur/whoami:v1.10.2
22 | healthcheck:
23 | test: [ "CMD", "curl", "-f", "http://localhost" ]
24 | interval: 5s
25 | labels:
26 | - sablier.enable=true
27 | - sablier.group=E2E
28 |
29 | nginx:
30 | image: nginx:1.27.1
31 | healthcheck:
32 | test: ["CMD", "curl", "-f", "http://localhost"]
33 | interval: 5s
34 | labels:
35 | - sablier.enable=true
36 | - sablier.group=E2E
--------------------------------------------------------------------------------
/plugins/proxywasm/e2e/envoy/docker/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | DOCKER_COMPOSE_FILE=compose.yaml
4 | DOCKER_COMPOSE_PROJECT_NAME=docker_classic_e2e
5 |
6 | errors=0
7 |
8 | echo "Using Docker version:"
9 | docker version
10 |
11 | prepare_docker_classic() {
12 | docker compose -f $DOCKER_COMPOSE_FILE -p $DOCKER_COMPOSE_PROJECT_NAME up -d
13 | docker compose -f $DOCKER_COMPOSE_FILE -p $DOCKER_COMPOSE_PROJECT_NAME stop whoami nginx
14 | }
15 |
16 | destroy_docker_classic() {
17 | docker compose -f $DOCKER_COMPOSE_FILE -p $DOCKER_COMPOSE_PROJECT_NAME down --remove-orphans || true
18 | }
19 |
20 | run_docker_classic_test() {
21 | echo "Running Docker Classic Test: $1"
22 | prepare_docker_classic
23 | sleep 2
24 | go clean -testcache
25 | if ! go test -count=1 -tags e2e -timeout 30s -run ^${1}$ github.com/sablierapp/sablier/e2e; then
26 | errors=1
27 | docker compose -f ${DOCKER_COMPOSE_FILE} -p ${DOCKER_COMPOSE_PROJECT_NAME} logs sablier envoy
28 | fi
29 | destroy_docker_classic
30 | }
31 |
32 | trap destroy_docker_classic EXIT
33 |
34 | run_docker_classic_test Test_Dynamic
35 | run_docker_classic_test Test_Blocking
36 | run_docker_classic_test Test_Multiple
37 | run_docker_classic_test Test_Healthy
38 | run_docker_classic_test Test_Group
39 |
40 | exit $errors
--------------------------------------------------------------------------------
/plugins/proxywasm/e2e/istio/kubernetes/README.md:
--------------------------------------------------------------------------------
1 | # Install kubectl
2 | sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
3 |
4 | # Install helm3
5 | curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash
6 |
7 | # Start k3s
8 | docker compose up -d
9 | sudo chown vscode ./kubeconfig.yaml
10 | chmod 600 ./kubeconfig.yaml
11 | export KUBECONFIG=./kubeconfig.yaml
12 |
13 | kubectl create configmap -n istio-system sablier-wasm-plugin --from-file ../../sablierproxywasm.wasm
14 |
15 | # Install Istio Helm charts
16 | helm repo add istio https://istio-release.storage.googleapis.com/charts
17 | helm repo update
18 | helm install istio-base istio/base -n istio-system --wait
19 | helm install istiod istio/istiod -n istio-system --wait
20 | kubectl label namespace istio-system istio-injection=enabled
21 | helm install istio-ingressgateway istio/gateway --values ./istio-gateway-values.yaml -n istio-system --wait
22 |
23 | # Install Sablier
24 | kubectl apply -f ./manifests/sablier.yml
25 |
26 | # Build proxywasm
27 | make docker
--------------------------------------------------------------------------------
/plugins/proxywasm/e2e/istio/kubernetes/compose.yaml:
--------------------------------------------------------------------------------
1 | version: '3'
2 | services:
3 | server:
4 | image: "rancher/k3s:v1.31.5-k3s1"
5 | command: server --no-deploy traefik
6 | tmpfs:
7 | - /run
8 | - /var/run
9 | ulimits:
10 | nproc: 65535
11 | nofile:
12 | soft: 65535
13 | hard: 65535
14 | privileged: true
15 | restart: always
16 | environment:
17 | - K3S_KUBECONFIG_OUTPUT=/output/kubeconfig.yaml
18 | - K3S_KUBECONFIG_MODE=666
19 | volumes:
20 | # This is just so that we get the kubeconfig file out
21 | - .:/output
22 | ports:
23 | - 6443:6443 # Kubernetes API Server
24 | - 8080:80 # Ingress controller port 80
25 |
--------------------------------------------------------------------------------
/plugins/proxywasm/e2e/istio/kubernetes/istio-gateway-values.yaml:
--------------------------------------------------------------------------------
1 | volumes:
2 | - name: wasmfilter
3 | configMap:
4 | name: sablier-wasm-plugin
5 |
6 | volumeMounts:
7 | - name: wasmfilter
8 | mountPath: /opt/filters/sablierproxywasm.wasm
--------------------------------------------------------------------------------
/plugins/proxywasm/e2e/istio/kubernetes/manifests/ingressgateway.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.istio.io/v1alpha3
2 | kind: Gateway
3 | metadata:
4 | name: gateway
5 | namespace: istio-system
6 | spec:
7 | selector:
8 | istio: ingressgateway
9 | servers:
10 | - port:
11 | number: 80
12 | name: http
13 | protocol: HTTP
14 | hosts:
15 | - "*"
--------------------------------------------------------------------------------
/plugins/proxywasm/e2e/istio/kubernetes/manifests/nginx.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: nginx
5 | spec:
6 | replicas: 1
7 | selector:
8 | matchLabels:
9 | app: nginx
10 | version: v1
11 | template:
12 | metadata:
13 | labels:
14 | app: nginx
15 | version: v1
16 | spec:
17 | containers:
18 | - name: nginx
19 | image: nginx:1.27.5
20 | ---
21 | apiVersion: v1
22 | kind: Service
23 | metadata:
24 | name: nginx
25 | labels:
26 | app: nginx
27 | service: nginx
28 | spec:
29 | ports:
30 | - name: http
31 | targetPort: 80
32 | port: 80
33 | selector:
34 | app: nginx
35 | ---
36 | apiVersion: networking.istio.io/v1alpha3
37 | kind: VirtualService
38 | metadata:
39 | name: nginx
40 | spec:
41 | hosts:
42 | - "*"
43 | gateways:
44 | - gateway.istio-system.svc.cluster.local
45 | http:
46 | - match:
47 | - uri:
48 | prefix: "/multiple/nginx"
49 | - uri:
50 | prefix: "/healthy/nginx"
51 | route:
52 | - destination:
53 | port:
54 | number: 80
55 | host: nginx
--------------------------------------------------------------------------------
/plugins/proxywasm/e2e/istio/kubernetes/manifests/sablier.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: sablier-system
5 | ---
6 | apiVersion: apps/v1
7 | kind: Deployment
8 | metadata:
9 | name: sablier
10 | namespace: sablier-system
11 | labels:
12 | app: sablier
13 | spec:
14 | replicas: 1
15 | selector:
16 | matchLabels:
17 | app: sablier
18 | template:
19 | metadata:
20 | labels:
21 | app: sablier
22 | spec:
23 | serviceAccountName: sablier
24 | containers:
25 | - name: sablier
26 | image: sablierapp/sablier:local
27 | args: ["start", "--provider.name=kubernetes", "--logging.level=trace"]
28 | ports:
29 | - containerPort: 10000
30 | ---
31 | apiVersion: v1
32 | kind: Service
33 | metadata:
34 | name: sablier
35 | namespace: sablier-system
36 | spec:
37 | selector:
38 | app: sablier
39 | ports:
40 | - protocol: TCP
41 | port: 10000
42 | targetPort: 10000
43 | ---
44 | apiVersion: v1
45 | kind: ServiceAccount
46 | metadata:
47 | name: sablier
48 | namespace: sablier-system
49 | ---
50 | apiVersion: rbac.authorization.k8s.io/v1
51 | kind: ClusterRole
52 | metadata:
53 | name: sablier
54 | namespace: sablier-system
55 | rules:
56 | - apiGroups:
57 | - apps
58 | - ""
59 | resources:
60 | - deployments
61 | - deployments/scale
62 | - statefulsets
63 | - statefulsets/scale
64 | verbs:
65 | - patch # Scale up and down
66 | - get # Retrieve info about specific dep
67 | - update # Scale up and down
68 | - list # Events
69 | - watch # Events
70 | ---
71 | apiVersion: rbac.authorization.k8s.io/v1
72 | kind: ClusterRoleBinding
73 | metadata:
74 | name: sablier
75 | namespace: sablier-system
76 | roleRef:
77 | apiGroup: rbac.authorization.k8s.io
78 | kind: ClusterRole
79 | name: sablier
80 | subjects:
81 | - kind: ServiceAccount
82 | name: sablier
83 | namespace: sablier-system
--------------------------------------------------------------------------------
/plugins/proxywasm/e2e/istio/kubernetes/manifests/whoami.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: whoami
5 | spec:
6 | replicas: 1
7 | selector:
8 | matchLabels:
9 | app: whoami
10 | version: v1
11 | template:
12 | metadata:
13 | labels:
14 | app: whoami
15 | version: v1
16 | spec:
17 | containers:
18 | - name: whoami
19 | image: acouvreur/whoami:v1.10.2
20 | ---
21 | apiVersion: v1
22 | kind: Service
23 | metadata:
24 | name: whoami
25 | labels:
26 | app: whoami
27 | service: whoami
28 | spec:
29 | ports:
30 | - name: http
31 | targetPort: 80
32 | port: 80
33 | selector:
34 | app: whoami
35 | ---
36 | apiVersion: networking.istio.io/v1alpha3
37 | kind: VirtualService
38 | metadata:
39 | name: whoami
40 | spec:
41 | hosts:
42 | - "*"
43 | gateways:
44 | - gateway.istio-system.svc.cluster.local
45 | http:
46 | - match:
47 | - uri:
48 | prefix: "/dynamic/whoami"
49 | - uri:
50 | prefix: "/blocking/whoami"
51 | - uri:
52 | prefix: "/multiple/whoami"
53 | route:
54 | - destination:
55 | port:
56 | number: 80
57 | host: whoami
--------------------------------------------------------------------------------
/plugins/proxywasm/e2e/istio/kubernetes/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | export DOCKER_COMPOSE_FILE=compose.yaml
4 | export DOCKER_COMPOSE_PROJECT_NAME=kubernetes_e2e
5 |
6 | errors=0
7 |
8 | export KUBECONFIG=./kubeconfig.yaml
9 |
10 | echo "Using Docker version:"
11 | docker version
12 |
13 | prepare_kubernetes() {
14 | docker compose -f $DOCKER_COMPOSE_FILE -p $DOCKER_COMPOSE_PROJECT_NAME up -d
15 | until kubectl get nodes | grep " Ready "; do sleep 1; done
16 | echo "Loading sablierapp/sablier:local into k3s..."
17 | docker save sablierapp/sablier:local | docker exec -i ${DOCKER_COMPOSE_PROJECT_NAME}-server-1 ctr images import -
18 | echo "Loading succeeded."
19 | }
20 |
21 | destroy_kubernetes() {
22 | docker compose -f $DOCKER_COMPOSE_FILE -p $DOCKER_COMPOSE_PROJECT_NAME down --volumes
23 | }
24 |
25 | prepare_istio() {
26 | helm repo add istio https://istio-release.storage.googleapis.com/charts
27 | helm repo update
28 | kubectl create namespace istio-system
29 | helm install istio-base istio/base -n istio-system --wait
30 | helm install istiod istio/istiod -n istio-system --wait
31 | kubectl label namespace istio-system istio-injection=enabled
32 | kubectl label namespace default istio-injection=enabled
33 | kubectl create configmap -n istio-system sablier-wasm-plugin --from-file ../../../sablierproxywasm.wasm
34 | helm install istio-ingressgateway istio/gateway --values ./istio-gateway-values.yaml -n istio-system --wait
35 | }
36 |
37 | prepare_manifests() {
38 | kubectl apply -f ./manifests
39 | }
40 |
41 | destroy_manifests() {
42 | kubectl delete -f ./manifests
43 | }
44 |
45 | run_kubernetes_test() {
46 | echo "---- Running Kubernetes Test: $1 ----"
47 | prepare_manifests
48 | sleep 10
49 | go clean -testcache
50 | if ! go test -count=1 -tags e2e -timeout 30s -run ^${1}$ github.com/sablierapp/sablier/e2e; then
51 | errors=1
52 | kubectl -n kube-system logs deployments/sablier-deployment
53 | # kubectl -n kube-system logs deployments/traefik TODO: Log istio
54 | fi
55 |
56 | destroy_manifests
57 | }
58 |
59 | # trap destroy_kubernetes EXIT
60 |
61 | prepare_kubernetes
62 | prepare_istio
63 | # run_kubernetes_test Test_Dynamic
64 | # run_kubernetes_test Test_Blocking
65 | # run_kubernetes_test Test_Multiple
66 | # run_kubernetes_test Test_Healthy
67 |
68 | # exit $errors
69 |
--------------------------------------------------------------------------------
/plugins/proxywasm/e2e/nginx/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:25.10
2 |
3 | RUN apt update && apt install -y libatomic1
4 |
5 | ADD https://github.com/Kong/ngx_wasm_module/releases/download/prerelease-0.6.0/wasmx-prerelease-0.6.0-v8-x86_64-ubuntu22.04.tar.gz wasmx.tar.gz
6 |
7 | RUN mkdir /etc/nginx
8 | RUN tar -xvf wasmx.tar.gz
9 | RUN mv /wasmx-prerelease-0.6.0-v8-x86_64-ubuntu22.04/* /etc/nginx/
10 |
11 | WORKDIR /etc/nginx
12 |
13 | CMD [ "./nginx", "-g", "daemon off;" ]
--------------------------------------------------------------------------------
/plugins/proxywasm/e2e/nginx/docker/compose.yaml:
--------------------------------------------------------------------------------
1 | services:
2 | reverseproxy:
3 | build:
4 | context: ..
5 | dockerfile: Dockerfile
6 | volumes:
7 | - ./nginx.conf:/etc/nginx/nginx.conf
8 | - ../../../sablierproxywasm.wasm:/wasm/sablierproxywasm.wasm
9 | ports:
10 | - "8080:8080"
11 |
12 | sablier:
13 | image: sablierapp/sablier:local
14 | command:
15 | - start
16 | - --provider.name=docker
17 | - --logging.level=trace
18 | volumes:
19 | - '/var/run/docker.sock:/var/run/docker.sock'
20 |
21 | whoami:
22 | image: acouvreur/whoami:v1.10.2
23 | healthcheck:
24 | test: [ "CMD", "curl", "-f", "http://localhost" ]
25 | interval: 5s
26 | labels:
27 | - sablier.enable=true
28 | - sablier.group=E2E
29 |
30 | nginx:
31 | image: nginx:1.27.1
32 | healthcheck:
33 | test: ["CMD", "curl", "-f", "http://localhost"]
34 | interval: 5s
35 | labels:
36 | - sablier.enable=true
37 | - sablier.group=E2E
--------------------------------------------------------------------------------
/plugins/proxywasm/e2e/nginx/docker/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | DOCKER_COMPOSE_FILE=compose.yaml
4 | DOCKER_COMPOSE_PROJECT_NAME=docker_classic_e2e
5 |
6 | errors=0
7 |
8 | echo "Using Docker version:"
9 | docker version
10 |
11 | prepare_docker_classic() {
12 | docker compose -f $DOCKER_COMPOSE_FILE -p $DOCKER_COMPOSE_PROJECT_NAME up -d
13 | docker compose -f $DOCKER_COMPOSE_FILE -p $DOCKER_COMPOSE_PROJECT_NAME stop whoami nginx
14 | }
15 |
16 | destroy_docker_classic() {
17 | docker compose -f $DOCKER_COMPOSE_FILE -p $DOCKER_COMPOSE_PROJECT_NAME down --remove-orphans || true
18 | }
19 |
20 | run_docker_classic_test() {
21 | echo "Running Docker Classic Test: $1"
22 | prepare_docker_classic
23 | sleep 2
24 | go clean -testcache
25 | if ! go test -count=1 -tags e2e -timeout 30s -run ^${1}$ github.com/sablierapp/sablier/e2e; then
26 | errors=1
27 | docker compose -f ${DOCKER_COMPOSE_FILE} -p ${DOCKER_COMPOSE_PROJECT_NAME} logs sablier reverseproxy
28 | fi
29 | destroy_docker_classic
30 | }
31 |
32 | trap destroy_docker_classic EXIT
33 |
34 | run_docker_classic_test Test_Dynamic
35 | run_docker_classic_test Test_Blocking
36 | run_docker_classic_test Test_Multiple
37 | run_docker_classic_test Test_Healthy
38 | run_docker_classic_test Test_Group
39 |
40 | exit $errors
--------------------------------------------------------------------------------
/plugins/proxywasm/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/sablierapp/sablier/plugins/proxy-wasm
2 |
3 | go 1.24.0
4 |
5 | require (
6 | github.com/json-iterator/tinygo v0.0.0-20211221071957-84b5b690c8a0
7 | github.com/proxy-wasm/proxy-wasm-go-sdk v0.0.0-20250212164326-ab4161dcf924
8 | github.com/stretchr/testify v1.10.0
9 | github.com/tetratelabs/proxy-wasm-go-sdk v0.24.0
10 | golang.org/x/exp v0.0.0-20250207012021-f9890c6ad9f3
11 | )
12 |
13 | require (
14 | github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
15 | github.com/kr/pretty v0.3.1 // indirect
16 | github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
17 | github.com/rogpeppe/go-internal v1.13.1 // indirect
18 | github.com/tetratelabs/wazero v1.7.2 // indirect
19 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
20 | gopkg.in/yaml.v3 v3.0.1 // indirect
21 | )
22 |
--------------------------------------------------------------------------------
/plugins/traefik/.gitignore:
--------------------------------------------------------------------------------
1 | # Binaries for programs and plugins
2 | *.exe
3 | *.exe~
4 | *.dll
5 | *.so
6 | *.dylib
7 |
8 | # Test binary, built with `go test -c`
9 | *.test
10 |
11 | # Output of the go coverage tool, specifically when used with LiteIDE
12 | *.out
13 |
14 | # Dependency directories (remove the comment below to include it)
15 | # vendor/
16 | traefik
--------------------------------------------------------------------------------
/plugins/traefik/e2e/docker/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: "3.7"
2 |
3 | services:
4 | traefik:
5 | image: traefik:v3.1.4
6 | command:
7 | - --experimental.localPlugins.sablier.moduleName=github.com/sablierapp/sablier
8 | - --entryPoints.http.address=:80
9 | - --providers.docker=true
10 | - --providers.file.filename=/etc/traefik/dynamic-config.yml
11 | ports:
12 | - "8080:80"
13 | volumes:
14 | - '/var/run/docker.sock:/var/run/docker.sock'
15 | - '../../../..:/plugins-local/src/github.com/sablierapp/sablier'
16 | - './dynamic-config.yml:/etc/traefik/dynamic-config.yml'
17 | restart: "no"
18 |
19 | sablier:
20 | image: sablierapp/sablier:local
21 | command:
22 | - start
23 | - --provider.name=docker
24 | - --logging.level=trace
25 | volumes:
26 | - '/var/run/docker.sock:/var/run/docker.sock'
27 |
28 | whoami:
29 | image: acouvreur/whoami:v1.10.2
30 | # Cannot use labels because as soon as the container is stopped, the labels are not treated by Traefik
31 | # The route doesn't exist anymore. Use dynamic-config.yml file instead.
32 | healthcheck:
33 | test: [ "CMD", "curl", "-f", "http://localhost" ]
34 | interval: 5s
35 | labels:
36 | - sablier.enable=true
37 | - sablier.group=E2E
38 | # - traefik.enable
39 | # - traefik.http.routers.whoami.rule=PathPrefix(`/whoami`)
40 | # - traefik.http.routers.whoami.middlewares=ondemand
41 |
42 | nginx:
43 | image: nginx:1.27.1
44 | healthcheck:
45 | test: ["CMD", "curl", "-f", "http://localhost"]
46 | interval: 5s
47 | # Cannot use labels because as soon as the container is stopped, the labels are not treated by Traefik
48 | # The route doesn't exist anymore. Use dynamic-config.yml file instead.
49 | labels:
50 | - sablier.enable=true
51 | - sablier.group=E2E
52 | # - traefik.enable
53 | # - traefik.http.routers.nginx.rule=PathPrefix(`/nginx`)
54 | # - traefik.http.routers.nginx.middlewares=ondemand
--------------------------------------------------------------------------------
/plugins/traefik/e2e/docker/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | DOCKER_COMPOSE_FILE=docker-compose.yml
4 | DOCKER_COMPOSE_PROJECT_NAME=docker_classic_e2e
5 |
6 | errors=0
7 |
8 | echo "Using Docker version:"
9 | docker version
10 |
11 | prepare_docker_classic() {
12 | docker compose -f $DOCKER_COMPOSE_FILE -p $DOCKER_COMPOSE_PROJECT_NAME up -d
13 | docker compose -f $DOCKER_COMPOSE_FILE -p $DOCKER_COMPOSE_PROJECT_NAME stop whoami nginx
14 | }
15 |
16 | destroy_docker_classic() {
17 | docker compose -f $DOCKER_COMPOSE_FILE -p $DOCKER_COMPOSE_PROJECT_NAME down --remove-orphans || true
18 | }
19 |
20 | run_docker_classic_test() {
21 | echo "Running Docker Classic Test: $1"
22 | prepare_docker_classic
23 | sleep 2
24 | go clean -testcache
25 | if ! go test -count=1 -tags e2e -timeout 30s -run ^${1}$ github.com/sablierapp/sablier/e2e; then
26 | errors=1
27 | docker compose -f ${DOCKER_COMPOSE_FILE} -p ${DOCKER_COMPOSE_PROJECT_NAME} logs sablier traefik
28 | fi
29 | destroy_docker_classic
30 | }
31 |
32 | trap destroy_docker_classic EXIT
33 |
34 | run_docker_classic_test Test_Dynamic
35 | run_docker_classic_test Test_Blocking
36 | run_docker_classic_test Test_Multiple
37 | run_docker_classic_test Test_Healthy
38 | run_docker_classic_test Test_Group
39 |
40 | exit $errors
--------------------------------------------------------------------------------
/plugins/traefik/e2e/docker_swarm/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | DOCKER_STACK_FILE=docker-stack.yml
4 | DOCKER_STACK_NAME=DOCKER_SWARM_E2E
5 |
6 | errors=0
7 |
8 | echo "Using Docker version:"
9 | docker version
10 |
11 | prepare_docker_swarm() {
12 | docker swarm init
13 | }
14 |
15 | prepare_docker_stack() {
16 | docker stack deploy --compose-file $DOCKER_STACK_FILE ${DOCKER_STACK_NAME}
17 | docker run --rm -it -v /var/run/docker.sock:/var/run/docker.sock sudobmitch/docker-stack-wait -t 60 ${DOCKER_STACK_NAME}
18 | }
19 |
20 | destroy_docker_stack() {
21 | docker stack rm ${DOCKER_STACK_NAME}
22 | # Sometimes, the network is not well cleaned up, see https://github.com/moby/moby/issues/30942#issuecomment-540699206
23 | until [ -z "$(docker stack ps ${DOCKER_STACK_NAME} -q)" ]; do sleep 1; done
24 | }
25 |
26 | destroy_docker_swarm() {
27 | docker swarm leave -f || true
28 | }
29 |
30 | run_docker_swarm_test() {
31 | echo "Running Docker Swarm Test: $1"
32 | prepare_docker_stack
33 | sleep 10
34 | go clean -testcache
35 | if ! go test -count=1 -tags e2e -timeout 30s -run ^${1}$ github.com/sablierapp/sablier/e2e; then
36 | errors=1
37 | docker service logs ${DOCKER_STACK_NAME}_sablier
38 | docker service logs ${DOCKER_STACK_NAME}_traefik
39 | fi
40 | destroy_docker_stack
41 | }
42 |
43 | trap destroy_docker_swarm EXIT
44 |
45 | prepare_docker_swarm
46 | run_docker_swarm_test Test_Dynamic
47 | run_docker_swarm_test Test_Blocking
48 | run_docker_swarm_test Test_Multiple
49 | run_docker_swarm_test Test_Healthy
50 | run_docker_swarm_test Test_Group
51 |
52 | exit $errors
--------------------------------------------------------------------------------
/plugins/traefik/e2e/kubernetes/docker-kubernetes.yml:
--------------------------------------------------------------------------------
1 | version: '3'
2 | services:
3 | server:
4 | image: "rancher/k3s:v1.30.2-k3s1"
5 | command: server --disable=traefik
6 | tmpfs:
7 | - /run
8 | - /var/run
9 | ulimits:
10 | nproc: 65535
11 | nofile:
12 | soft: 65535
13 | hard: 65535
14 | privileged: true
15 | restart: always
16 | environment:
17 | - K3S_KUBECONFIG_OUTPUT=/output/kubeconfig.yaml
18 | - K3S_KUBECONFIG_MODE=666
19 | volumes:
20 | # This is just so that we get the kubeconfig file out
21 | - .:/output
22 | - '../../../..:/plugins-local/src/github.com/sablierapp/sablier'
23 | ports:
24 | - 6443:6443 # Kubernetes API Server
25 | - 8080:80 # Ingress controller port 80
26 |
--------------------------------------------------------------------------------
/plugins/traefik/e2e/kubernetes/manifests/sablier.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: sablier-deployment
5 | namespace: kube-system
6 | labels:
7 | app: sablier
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | app: sablier
13 | template:
14 | metadata:
15 | labels:
16 | app: sablier
17 | spec:
18 | serviceAccountName: sablier
19 | containers:
20 | - name: sablier
21 | image: sablierapp/sablier:local
22 | args: ["start", "--provider.name=kubernetes", "--logging.level=trace"]
23 | ports:
24 | - containerPort: 10000
25 | ---
26 | apiVersion: v1
27 | kind: Service
28 | metadata:
29 | name: sablier
30 | namespace: kube-system
31 | spec:
32 | selector:
33 | app: sablier
34 | ports:
35 | - protocol: TCP
36 | port: 10000
37 | targetPort: 10000
38 | ---
39 | apiVersion: v1
40 | kind: ServiceAccount
41 | metadata:
42 | name: sablier
43 | namespace: kube-system
44 | ---
45 | apiVersion: rbac.authorization.k8s.io/v1
46 | kind: ClusterRole
47 | metadata:
48 | name: sablier
49 | namespace: kube-system
50 | rules:
51 | - apiGroups:
52 | - apps
53 | - ""
54 | resources:
55 | - deployments
56 | - deployments/scale
57 | - statefulsets
58 | - statefulsets/scale
59 | verbs:
60 | - patch # Scale up and down
61 | - get # Retrieve info about specific dep
62 | - update # Scale up and down
63 | - list # Events
64 | - watch # Events
65 | ---
66 | apiVersion: rbac.authorization.k8s.io/v1
67 | kind: ClusterRoleBinding
68 | metadata:
69 | name: sablier
70 | namespace: kube-system
71 | roleRef:
72 | apiGroup: rbac.authorization.k8s.io
73 | kind: ClusterRole
74 | name: sablier
75 | subjects:
76 | - kind: ServiceAccount
77 | name: sablier
78 | namespace: kube-system
--------------------------------------------------------------------------------
/plugins/traefik/e2e/kubernetes/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | export DOCKER_COMPOSE_FILE=docker-kubernetes.yml
4 | export DOCKER_COMPOSE_PROJECT_NAME=kubernetes_e2e
5 |
6 | errors=0
7 |
8 | export KUBECONFIG=./kubeconfig.yaml
9 |
10 | echo "Using Docker version:"
11 | docker version
12 |
13 | prepare_kubernetes() {
14 | docker compose -f $DOCKER_COMPOSE_FILE -p $DOCKER_COMPOSE_PROJECT_NAME up -d
15 | until kubectl get nodes | grep " Ready "; do sleep 1; done
16 | echo "Loading sablierapp/sablier:local into k3s..."
17 | docker save sablierapp/sablier:local | docker exec -i ${DOCKER_COMPOSE_PROJECT_NAME}-server-1 ctr images import -
18 | echo "Loading succeeded."
19 | }
20 |
21 | destroy_kubernetes() {
22 | docker compose -f $DOCKER_COMPOSE_FILE -p $DOCKER_COMPOSE_PROJECT_NAME down --volumes
23 | }
24 |
25 | prepare_traefik() {
26 | helm repo add traefik https://traefik.github.io/charts
27 | helm repo update
28 | helm install traefik --version 28.3.0 traefik/traefik -f values.yaml --namespace kube-system
29 | }
30 |
31 | prepare_deployment() {
32 | kubectl apply -f ./manifests/sablier.yml
33 | kubectl apply -f ./manifests/deployment.yml
34 | }
35 |
36 | destroy_deployment() {
37 | kubectl delete -f ./manifests/deployment.yml
38 | kubectl delete -f ./manifests/sablier.yml
39 | }
40 |
41 | prepare_stateful_set() {
42 | kubectl apply -f ./manifests/statefulset.yml
43 | }
44 |
45 | destroy_stateful_set() {
46 | kubectl delete -f ./manifests/statefulset.yml
47 | }
48 |
49 | run_kubernetes_deployment_test() {
50 | echo "---- Running Kubernetes Test: $1 ----"
51 | prepare_deployment
52 | sleep 10
53 | go clean -testcache
54 | if ! go test -count=1 -tags e2e -timeout 30s -run ^${1}$ github.com/sablierapp/sablier/e2e; then
55 | errors=1
56 | kubectl -n kube-system logs deployments/sablier-deployment
57 | kubectl -n kube-system logs deployments/traefik
58 | fi
59 |
60 | destroy_deployment
61 | }
62 |
63 | trap destroy_kubernetes EXIT
64 |
65 | prepare_kubernetes
66 | prepare_traefik
67 | run_kubernetes_deployment_test Test_Dynamic
68 | run_kubernetes_deployment_test Test_Blocking
69 | run_kubernetes_deployment_test Test_Multiple
70 | run_kubernetes_deployment_test Test_Healthy
71 | run_kubernetes_deployment_test Test_Group
72 |
73 | exit $errors
74 |
--------------------------------------------------------------------------------
/plugins/traefik/e2e/kubernetes/values.yaml:
--------------------------------------------------------------------------------
1 | additionalArguments:
2 | - "--experimental.localPlugins.sablier.moduleName=github.com/sablierapp/sablier"
3 |
4 | providers:
5 | kubernetesIngress:
6 | allowEmptyServices: true
7 | kubernetesCRD:
8 | allowEmptyServices: true
9 |
10 | additionalVolumeMounts:
11 | - name: local-sablier-plugin
12 | mountPath: /plugins-local/src/github.com/sablierapp/sablier
13 |
14 | deployment:
15 | additionalVolumes:
16 | - name: local-sablier-plugin
17 | hostPath:
18 | # directory location on host
19 | path: /plugins-local/src/github.com/sablierapp/sablier
20 | # this field is optional
21 | type: Directory
--------------------------------------------------------------------------------
/plugins/traefik/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/sablierapp/sablier/plugins/traefik
2 |
3 | go 1.24.0
4 |
--------------------------------------------------------------------------------
/plugins/traefik/go.sum:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sablierapp/sablier/6e557e95c95d2fba42f7c1e0a5ac8da264b0630d/plugins/traefik/go.sum
--------------------------------------------------------------------------------
/release.config.js:
--------------------------------------------------------------------------------
1 | module.exports = {
2 | "branches": [
3 | { "name": "main" },
4 | { "name": "beta", "channel": "beta", "prerelease": "beta" },
5 | ],
6 | "plugins": [
7 | "@semantic-release/commit-analyzer",
8 | "@semantic-release/release-notes-generator",
9 | ["@semantic-release/exec", {
10 | "publishCmd": "make VERSION=${nextRelease.version} release -j 3 && make VERSION=${nextRelease.version} proxywasm"
11 | }],
12 | ["@semantic-release/github", {
13 | "assets": [
14 | "sablier*"
15 | ]
16 | }],
17 | ["@semantic-release/exec", {
18 | "prepareCmd": "make LAST=${lastRelease.version} NEXT=${nextRelease.version} update-doc-version update-doc-version-middleware"
19 | }],
20 | ["@semantic-release/git", {
21 | "assets": [["**/*.{md,yml}", "!node_modules/**/*.{md,yml}"]],
22 | "message": "docs(release): update doc version from ${lastRelease.version} to ${nextRelease.version}"
23 | }]
24 | ]
25 | }
--------------------------------------------------------------------------------
/sablier.sample.yaml:
--------------------------------------------------------------------------------
1 | provider:
2 | name: docker
3 | server:
4 | port: 10000
5 | base-path: /
6 | storage:
7 | file:
8 | sessions:
9 | default-duration: 5m
10 | expiration-interval: 20s
11 | logging:
12 | level: info
13 | strategy:
14 | dynamic:
15 | custom-themes-path:
16 | show-details-by-default: true
17 | default-theme: hacker-terminal
18 | default-refresh-frequency: 5s
19 | blocking:
20 | default-timeout: 1m
--------------------------------------------------------------------------------