├── .github ├── dependabot.yml └── workflows │ ├── codeql-analysis.yml │ └── docker-publish.yml ├── .gitignore ├── Dockerfile ├── LICENSE ├── README.md ├── cmd ├── agent.go ├── root.go └── server.go ├── examples ├── dependencies.hcl ├── dynamic-job.hcl ├── fan-out-fan-in.hcl ├── happy-job.gif ├── happy-job.hcl └── leader-task-group.hcl ├── go.mod ├── go.sum ├── main.go └── pkg ├── api ├── error.go ├── job.go ├── pipeline.go └── router.go └── controller └── pipeline.go /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # To get started with Dependabot version updates, you'll need to specify which 2 | # package ecosystems to update and where the package manifests are located. 3 | # Please see the documentation for all configuration options: 4 | # https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates 5 | 6 | version: 2 7 | updates: 8 | - package-ecosystem: "gomod" # See documentation for possible values 9 | directory: "/" # Location of package manifests 10 | schedule: 11 | interval: "daily" 12 | -------------------------------------------------------------------------------- /.github/workflows/codeql-analysis.yml: -------------------------------------------------------------------------------- 1 | # For most projects, this workflow file will not need changing; you simply need 2 | # to commit it to your repository. 3 | # 4 | # You may wish to alter this file to override the set of languages analyzed, 5 | # or to provide custom queries or build logic. 6 | # 7 | # ******** NOTE ******** 8 | # We have attempted to detect the languages in your repository. Please check 9 | # the `language` matrix defined below to confirm you have the correct set of 10 | # supported CodeQL languages. 11 | # 12 | name: "CodeQL" 13 | 14 | on: 15 | push: 16 | branches: [ main ] 17 | pull_request: 18 | # The branches below must be a subset of the branches above 19 | branches: [ main ] 20 | schedule: 21 | - cron: '32 14 * * 4' 22 | 23 | jobs: 24 | analyze: 25 | name: Analyze 26 | runs-on: ubuntu-latest 27 | permissions: 28 | actions: read 29 | contents: read 30 | security-events: write 31 | 32 | strategy: 33 | fail-fast: false 34 | matrix: 35 | language: [ 'go' ] 36 | # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ] 37 | # Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support 38 | 39 | steps: 40 | - name: Checkout repository 41 | uses: actions/checkout@v3 42 | 43 | # Initializes the CodeQL tools for scanning. 44 | - name: Initialize CodeQL 45 | uses: github/codeql-action/init@v2 46 | with: 47 | languages: ${{ matrix.language }} 48 | # If you wish to specify custom queries, you can do so here or in a config file. 49 | # By default, queries listed here will override any specified in a config file. 50 | # Prefix the list here with "+" to use these queries and those in the config file. 51 | 52 | # Details on CodeQL's query packs refer to : https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs 53 | # queries: security-extended,security-and-quality 54 | 55 | 56 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). 57 | # If this step fails, then you should remove it and run the build manually (see below) 58 | - name: Autobuild 59 | uses: github/codeql-action/autobuild@v2 60 | 61 | # ℹ️ Command-line programs to run using the OS shell. 62 | # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun 63 | 64 | # If the Autobuild fails above, remove it and uncomment the following three lines. 65 | # modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance. 66 | 67 | # - run: | 68 | # echo "Run, Build Application using script" 69 | # ./location_of_script_within_repo/buildscript.sh 70 | 71 | - name: Perform CodeQL Analysis 72 | uses: github/codeql-action/analyze@v2 73 | -------------------------------------------------------------------------------- /.github/workflows/docker-publish.yml: -------------------------------------------------------------------------------- 1 | name: Docker 2 | 3 | # This workflow uses actions that are not certified by GitHub. 4 | # They are provided by a third-party and are governed by 5 | # separate terms of service, privacy policy, and support 6 | # documentation. 7 | 8 | on: 9 | push: 10 | branches: 11 | - main 12 | - f-* # feature 13 | - b-* # bug 14 | - c-* # chore 15 | # Publish semver tags as releases. 16 | tags: [ 'v*.*.*' ] 17 | pull_request: 18 | branches: [ main ] 19 | 20 | env: 21 | # Use docker.io for Docker Hub if empty 22 | REGISTRY: ghcr.io 23 | # github.repository as / 24 | IMAGE_NAME: ${{ github.repository }} 25 | 26 | 27 | jobs: 28 | build: 29 | 30 | runs-on: ubuntu-latest 31 | permissions: 32 | contents: read 33 | packages: write 34 | # This is used to complete the identity challenge 35 | # with sigstore/fulcio when running outside of PRs. 36 | id-token: write 37 | 38 | steps: 39 | - name: Checkout repository 40 | uses: actions/checkout@v2 41 | 42 | # Install the cosign tool except on PR 43 | # https://github.com/sigstore/cosign-installer 44 | - name: Install cosign 45 | if: github.event_name != 'pull_request' 46 | uses: sigstore/cosign-installer@main 47 | with: 48 | cosign-release: 'v1.13.1' 49 | 50 | 51 | # Workaround: https://github.com/docker/build-push-action/issues/461 52 | - name: Setup Docker buildx 53 | uses: docker/setup-buildx-action@v1 54 | 55 | # Login against a Docker registry except on PR 56 | # https://github.com/docker/login-action 57 | - name: Log into registry ${{ env.REGISTRY }} 58 | if: github.event_name != 'pull_request' 59 | uses: docker/login-action@v1 60 | with: 61 | registry: ${{ env.REGISTRY }} 62 | username: ${{ github.actor }} 63 | password: ${{ secrets.GITHUB_TOKEN }} 64 | 65 | # Extract metadata (tags, labels) for Docker 66 | # https://github.com/docker/metadata-action 67 | - name: Extract Docker metadata 68 | id: meta 69 | uses: docker/metadata-action@v3 70 | with: 71 | images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} 72 | 73 | # Build and push Docker image with Buildx (don't push on PR) 74 | # https://github.com/docker/build-push-action 75 | - name: Build and push Docker image 76 | id: build-and-push 77 | uses: docker/build-push-action@v2 78 | with: 79 | context: . 80 | platforms: linux/amd64,linux/arm/v6,linux/arm/v7,linux/arm64 81 | push: ${{ github.event_name != 'pull_request' }} 82 | tags: ${{ steps.meta.outputs.tags }} 83 | labels: ${{ steps.meta.outputs.labels }} 84 | 85 | # Sign the resulting Docker image digest except on PRs. 86 | # This will only write to the public Rekor transparency log when the Docker 87 | # repository is public to avoid leaking data. If you would like to publish 88 | # transparency data even for private images, pass --force to cosign below. 89 | # https://github.com/sigstore/cosign 90 | - name: Sign the published Docker image 91 | if: ${{ github.event_name != 'pull_request' }} 92 | env: 93 | COSIGN_EXPERIMENTAL: "true" 94 | # This step uses the identity token to provision an ephemeral certificate 95 | # against the sigstore community Fulcio instance. 96 | run: IMAGE_NAME=${{ env.IMAGE_NAME }} cosign sign ${{ env.REGISTRY }}/${IMAGE_NAME,,}@${{ steps.build-and-push.outputs.digest }} 97 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.exe~ 4 | *.dll 5 | *.so 6 | *.dylib 7 | 8 | # Test binary, built with `go test -c` 9 | *.test 10 | 11 | # Output of the go coverage tool, specifically when used with LiteIDE 12 | *.out 13 | 14 | # Dependency directories (remove the comment below to include it) 15 | # vendor/ 16 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.18-alpine as builder 2 | 3 | RUN apk --no-cache add ca-certificates 4 | 5 | WORKDIR /app 6 | 7 | COPY go.mod go.sum ./ 8 | RUN go mod download 9 | 10 | COPY . ./ 11 | RUN CGO_ENABLED=0 GOOS=linux go build -v -o nomad-pipeline 12 | 13 | FROM scratch 14 | 15 | COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ 16 | COPY --from=builder /app/nomad-pipeline /nomad-pipeline 17 | 18 | ENTRYPOINT ["/nomad-pipeline"] 19 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 HyperBadger 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Nomad Pipeline 2 | 3 | Nomad is great for batch jobs, however in its current state, you can't have job dependecies which is required when running pipeline style workload. The inspiration for this project came from the [`nomad-dag-hack`](https://github.com/cgbaker/nomad-dag-hack) project and the accompanying [blog post](https://www.cgbaker.net/2020/12/hacking-nomad-job-dependencies/). 4 | 5 | ![](examples/happy-job.gif) 6 | 7 | ## How to get started? 8 | 9 | It's just 2 steps... 10 | 11 | **Inject the 'init' task group** 12 | 13 | The 'init' task will look at all the meta-tags setup in the next step and inject lifecycle task hooks into the task groups. The hooks are responsible for starting the next task group after the current one finishes. 14 | 15 | ```hcl 16 | variable "nomad_addr" { 17 | type = string 18 | } 19 | 20 | job "example-job" { 21 | 22 | meta = { 23 | "nomad-pipeline.enabled" = "true" 24 | } 25 | 26 | group "▶️" { 27 | count = 1 28 | 29 | task "init" { 30 | driver = "docker" 31 | 32 | config { 33 | image = "ghcr.io/hyperbadger/nomad-pipeline:main" 34 | args = ["agent", "init"] 35 | } 36 | 37 | env { 38 | NOMAD_ADDR = var.nomad_addr 39 | } 40 | } 41 | } 42 | 43 | ... 44 | } 45 | ``` 46 | 47 | **Annotate task groups with meta-tags** 48 | 49 | ```hcl 50 | job "example-job" { 51 | 52 | ... 53 | 54 | group "1-first-task-group" { 55 | count = 0 # <-- Important! nomad-pipeline will control the count 56 | 57 | meta = { 58 | "nomad-pipeline.root" = "true" # <-- Indicates the starting task group 59 | "nomad-pipeline.next" = "2-second-task-group" 60 | } 61 | 62 | ... 63 | } 64 | 65 | group "2-second-task-group" { 66 | count = 0 67 | 68 | ... 69 | } 70 | 71 | ... 72 | } 73 | 74 | ``` 75 | 76 | ## How to run examples? 77 | 78 | **Requirements** 79 | 80 | - Docker (with default `bridge` network) 81 | - Nomad 82 | - jq 83 | 84 | **Steps** 85 | 86 | 1. Find your Docker `bridge` network gateway IP - `export DOCKER_GATEWAY_IP=$(docker network inspect bridge | jq -r ".[].IPAM.Config[].Gateway")` 87 | 1. Start Nomad in dev mode - `nomad agent -dev -bind "${DOCKER_GATEWAY_IP}"` 88 | 1. Ensure Nomad has started by visiting `echo "http://${DOCKER_GATEWAY_IP}:4646"` 89 | 1. Set `NOMAD_ADDR` for the Nomad CLI to access Nomad - `export NOMAD_ADDR="http://${DOCKER_GATEWAY_IP}:4646"` 90 | 1. Ensure Nomad CLI works - `nomad server members` 91 | 1. Run any job in the examples/ directory - `nomad job run examples/happy-job.hcl` 92 | 93 | ## Other features 94 | 95 | **Run tasks in parallel** 96 | 97 | ***Using dependencies*** 98 | 99 | To support running tasks in parallel and having a final task that runs at the end of all parallel tasks (eg. fan-out fan-in pattern), you can use the `nomad-pipeline.dependencies` tag. 100 | 101 | ```mermaid 102 | graph TD; 103 | A-->B; 104 | A-->C; 105 | B-->D; 106 | C-->E; 107 | D-->E; 108 | ``` 109 | 110 | In the above case, the E task should look like the following, this will ensure that C and D run before E runs, even if C and D finish at different times. 111 | 112 | ```hcl 113 | group "E" { 114 | count = 0 115 | 116 | meta = { 117 | "nomad-pipeline.dependencies" = "C, D" 118 | } 119 | 120 | ... 121 | } 122 | ``` 123 | 124 | See [`dependencies.hcl`](examples/dependencies.hcl) for a more complete example. 125 | 126 | ***Using count*** 127 | 128 | Another way to implement the fan-out fan-in pattern is to have multiple instances of a task group that can all pick up some work. Without nomad-pipeline, this is quite easy, you just set the [`count` stanza](https://www.nomadproject.io/docs/job-specification/group#count) on the task group. However, when using nomad-pipeline, the control of count is not in your hands. So if you want to set a count greater than 1, you can set the `nomad-pipeline.count` tag. 129 | 130 | > 💡 *Tip: The [`count` stanza](https://www.nomadproject.io/docs/job-specification/group#count) doesn't support variable interpolation since the config value is an integer and not a string - currently Nomad only support variable interpolation for string config values. This means that `count` can't be set from a `NOMAD_META_` variable, which is required for setting the `count` dynamically in a parameterized job. Using the `nomad-pipeline.count` tag allows you work around this. All `nomad-pipeline.*` tags interpolates variables, so you can use something like `"nomad-pipeline.count" = "${NOMAD_META_count}"`* 131 | 132 | See [`examples/fan-out-fan-in.hcl`](examples/fan-out-fan-in.hcl) for a more complete example. 133 | 134 | **Dynamic tasks** 135 | 136 | Dynamic tasks allows you to have a task that outputs more tasks 🤯. These tasks are then run as part of the job. This can open up the possibility to create some powerful pipelines. An example use case is for creating periodic splits of a longer task, if you have a task that processes 5 hours of some data, you could split the task into 5x 1 hour tasks and run them in parallel. This can be achieved by having an initial task that outputs the 5 split tasks as an output. 137 | 138 | To use dynamic tasks, set the `nomad-pipeline.dynamic-tasks` tag to a path/glob of where the task JSON's will be outputted. This path should be relative to [`NOMAD_ALLOC_DIR`](https://www.nomadproject.io/docs/runtime/environment#alloc). 139 | 140 | In the following example, the 1-generate-tasks first runs and outputs the 2-echo-hey task group which then gets launched after 1-generate-tasks finishes. 141 | 142 | ```hcl 143 | group "1-generate-tasks" { 144 | count = 0 145 | 146 | meta = { 147 | "nomad-pipeline.root" = "true" 148 | "nomad-pipeline.dynamic-tasks" = "tasks/*" 149 | } 150 | 151 | task "generate-tasks" { 152 | driver = "raw_exec" 153 | 154 | config { 155 | command = "/bin/echo" 156 | args = ["generated tasks"] 157 | } 158 | 159 | template { 160 | data = <<-EOT 161 | [{ 162 | "Name": "2-echo-hey", 163 | "Count": 0, 164 | "Meta": { 165 | "nomad-pipeline.root": "true" 166 | }, 167 | "Tasks": [{ 168 | "Name": "echo", 169 | "Driver": "raw_exec", 170 | "Config": { "command": "/bin/echo", "args": [ "hey" ] } 171 | }] 172 | }] 173 | EOT 174 | 175 | destination = "${NOMAD_ALLOC_DIR}/tasks/echo_hey.json" 176 | } 177 | } 178 | 179 | ... 180 | } 181 | ``` 182 | 183 | See [`dynamic-job.hcl`](examples/dynamic-job.hcl) for a more complete example. 184 | 185 | **Job Level Leader** 186 | 187 | Nomad currently allows you to set a [`leader`](https://www.nomadproject.io/docs/job-specification/task#leader) at the task level. This allows you to gracefully shutdown all other tasks in the group when the leader task exits. 188 | 189 | Using the `nomad-pipeline.leader` tag, you can get the same functionality at the job level. You can set the tag on a task group, and when that task group completes, all other task groups will be gracefully shutdown. 190 | 191 | ```hcl 192 | group "leader" { 193 | count = 0 194 | 195 | meta = { 196 | "nomad-pipeline.leader" = "true" 197 | } 198 | 199 | ... 200 | } 201 | ``` 202 | 203 | See [`leader-task-group.hcl`](examples/leader-task-group.hcl) for a more complete example. 204 | 205 | **URL Friendly Nomad Environment Variables** 206 | 207 | There are many useful [Nomad environment variables](https://www.nomadproject.io/docs/runtime/interpolation#interpreted_env_vars) that can be used at runtime and in config fields that support variable interpolation. However, in some cases, some of these environment variables are not URL friendly - in the case of parameterized jobs, the dispatched job's ID (`NOMAD_JOB_ID`) and name (`NOMAD_JOB_NAME`) will have a `/` in them. URL friendly versions of these variables are required when using them in the [`service` stanza](https://www.nomadproject.io/docs/job-specification/service#name). To allow for this, a URL friendly version of the `NOMAD_JOB_ID` and `NOMAD_JOB_NAME` can be found under `NOMAD_META_JOB_ID_SLUG` and `NOMAD_META_JOB_ID_SLUG` - the inspiration for `_SLUG` came from [Gitlab predefined variables](https://docs.gitlab.com/ee/ci/variables/predefined_variables.html). These meta variables are injected at the job level by the init task of nomad-pipeline, making them available to all the task groups that come after it. 208 | 209 | Although this feature was added specifically for use with the [`service` stanza](https://www.nomadproject.io/docs/job-specification/service#name), it could prove useful for other config fields. Note to developer: nomad-pipeline might not be the right vehicle for this feature, however the init task was a convenient place to put this functionality. 210 | -------------------------------------------------------------------------------- /cmd/agent.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | log "github.com/sirupsen/logrus" 5 | "github.com/spf13/cobra" 6 | 7 | "github.com/hyperbadger/nomad-pipeline/pkg/controller" 8 | ) 9 | 10 | var agentCmd = &cobra.Command{ 11 | Use: "agent", 12 | Run: func(cmd *cobra.Command, args []string) { 13 | cmd.Help() 14 | }, 15 | } 16 | 17 | var agentInitCmd = &cobra.Command{ 18 | Use: "init", 19 | Short: "Initialize job with nomad-pipeline hooks", 20 | Args: cobra.NoArgs, 21 | Run: func(cmd *cobra.Command, args []string) { 22 | pc := controller.NewPipelineController(cPath) 23 | 24 | update := pc.Init() 25 | if update { 26 | err := pc.UpdateJob() 27 | if err != nil { 28 | log.Fatalf("error updating job: %v", err) 29 | } 30 | } 31 | }, 32 | } 33 | 34 | var agentWaitCmd = &cobra.Command{ 35 | Use: "wait", 36 | Short: "Wait for previous task group(s)", 37 | Args: cobra.ArbitraryArgs, 38 | Run: func(cmd *cobra.Command, args []string) { 39 | pc := controller.NewPipelineController(cPath) 40 | 41 | pc.Wait(args) 42 | }, 43 | } 44 | 45 | var agentNextCmd = &cobra.Command{ 46 | Use: "next", 47 | Short: "Trigger next task group(s)", 48 | Args: cobra.ArbitraryArgs, 49 | Run: func(cmd *cobra.Command, args []string) { 50 | pc := controller.NewPipelineController(cPath) 51 | 52 | update := pc.Next(args, dynamicTasks) 53 | if update { 54 | err := pc.UpdateJob() 55 | if err != nil { 56 | log.Fatalf("error updating job: %v", err) 57 | } 58 | } 59 | }, 60 | } 61 | 62 | var dynamicTasks string 63 | 64 | func init() { 65 | agentNextCmd.Flags().StringVar(&dynamicTasks, "dynamic-tasks", "", "glob of task files relative to alloc dir") 66 | 67 | agentCmd.AddCommand(agentInitCmd) 68 | agentCmd.AddCommand(agentWaitCmd) 69 | agentCmd.AddCommand(agentNextCmd) 70 | 71 | rootCmd.AddCommand(agentCmd) 72 | } 73 | -------------------------------------------------------------------------------- /cmd/root.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | 7 | "github.com/spf13/cobra" 8 | ) 9 | 10 | var rootCmd = &cobra.Command{ 11 | Use: "nomad-pipeline", 12 | Short: "Run pipeline-style workloads in Nomad", 13 | Run: func(cmd *cobra.Command, args []string) { 14 | cmd.Help() 15 | }, 16 | } 17 | 18 | var cPath string 19 | 20 | func init() { 21 | rootCmd.PersistentFlags().StringVar(&cPath, "config", "config.yaml", "path to config") 22 | } 23 | 24 | func Execute() { 25 | if err := rootCmd.Execute(); err != nil { 26 | fmt.Println(err) 27 | os.Exit(1) 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /cmd/server.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "github.com/spf13/cobra" 5 | 6 | "github.com/hyperbadger/nomad-pipeline/pkg/api" 7 | "go.uber.org/zap" 8 | ) 9 | 10 | var serverCmd = &cobra.Command{ 11 | Use: "server", 12 | Run: func(cmd *cobra.Command, args []string) { 13 | _logger, _ := zap.NewProduction() 14 | defer _logger.Sync() 15 | 16 | logger := _logger.Sugar() 17 | 18 | ps, err := api.NewPipelineServer(logger) 19 | if err != nil { 20 | logger.Fatalf("error creating pipeline server: %w", err) 21 | } 22 | 23 | srv := ps.NewHTTPServer(addr) 24 | 25 | if err := srv.ListenAndServe(); err != nil { 26 | logger.Fatalf("server errored: %v", err) 27 | } 28 | }, 29 | } 30 | 31 | var addr string 32 | 33 | func init() { 34 | serverCmd.Flags().StringVar(&addr, "addr", "127.0.0.1:4656", "address server will listen on") 35 | 36 | rootCmd.AddCommand(serverCmd) 37 | } 38 | -------------------------------------------------------------------------------- /examples/dependencies.hcl: -------------------------------------------------------------------------------- 1 | variable "datacenters" { 2 | type = list(string) 3 | default = ["dc1"] 4 | } 5 | 6 | variable "image" { 7 | type = string 8 | default = "ghcr.io/hyperbadger/nomad-pipeline:main" 9 | } 10 | 11 | variable "nomad_addr" { 12 | type = string 13 | default = "http://host.docker.internal:4646" 14 | } 15 | 16 | variable "docker_extra_hosts" { 17 | type = list(string) 18 | default = ["host.docker.internal:host-gateway"] 19 | } 20 | 21 | job "dependencies" { 22 | name = "dependencies" 23 | datacenters = var.datacenters 24 | type = "batch" 25 | 26 | meta = { 27 | "nomad-pipeline.enabled" = "true" 28 | } 29 | 30 | group "▶️" { 31 | task "init" { 32 | driver = "docker" 33 | 34 | config { 35 | image = var.image 36 | args = ["agent", "init"] 37 | 38 | extra_hosts = var.docker_extra_hosts 39 | auth_soft_fail = true 40 | } 41 | 42 | env { 43 | NOMAD_ADDR = var.nomad_addr 44 | NOMAD_PIPELINE_DEBUG = "true" 45 | } 46 | } 47 | } 48 | 49 | group "1a-task" { 50 | count = 0 51 | 52 | meta = { 53 | "nomad-pipeline.root" = "true" 54 | "nomad-pipeline.next" = "2-dependent" 55 | } 56 | 57 | task "normal" { 58 | driver = "raw_exec" 59 | 60 | config { 61 | command = "/bin/bash" 62 | args = ["local/main.sh"] 63 | } 64 | 65 | template { 66 | data = <<-EOT 67 | #!/bin/bash 68 | 69 | echo "do something" 70 | sleep 5 71 | 72 | EOT 73 | 74 | destination = "local/main.sh" 75 | } 76 | } 77 | } 78 | 79 | group "1b-task" { 80 | count = 0 81 | 82 | meta = { 83 | "nomad-pipeline.root" = "true" 84 | "nomad-pipeline.next" = "2-dependent" 85 | } 86 | 87 | task "normal" { 88 | driver = "raw_exec" 89 | 90 | config { 91 | command = "/bin/bash" 92 | args = ["local/main.sh"] 93 | } 94 | 95 | template { 96 | data = <<-EOT 97 | #!/bin/bash 98 | 99 | echo "do something" 100 | sleep 10 101 | 102 | EOT 103 | 104 | destination = "local/main.sh" 105 | } 106 | } 107 | } 108 | 109 | group "1c-task" { 110 | count = 0 111 | 112 | meta = { 113 | "nomad-pipeline.root" = "true" 114 | "nomad-pipeline.next" = "2-dependent" 115 | } 116 | 117 | task "normal" { 118 | driver = "raw_exec" 119 | 120 | config { 121 | command = "/bin/bash" 122 | args = ["local/main.sh"] 123 | } 124 | 125 | template { 126 | data = <<-EOT 127 | #!/bin/bash 128 | 129 | echo "do something" 130 | sleep 60 131 | 132 | EOT 133 | 134 | destination = "local/main.sh" 135 | } 136 | } 137 | } 138 | 139 | group "2-dependent" { 140 | count = 0 141 | 142 | meta = { 143 | "nomad-pipeline.dependencies" = "1a-task, 1b-task, 1c-task" 144 | } 145 | 146 | task "dependent" { 147 | driver = "raw_exec" 148 | 149 | config { 150 | command = "/bin/bash" 151 | args = ["local/main.sh"] 152 | } 153 | 154 | template { 155 | data = <<-EOT 156 | #!/bin/bash 157 | echo "successfully waited for dependency" 158 | EOT 159 | 160 | destination = "local/main.sh" 161 | } 162 | } 163 | } 164 | } 165 | -------------------------------------------------------------------------------- /examples/dynamic-job.hcl: -------------------------------------------------------------------------------- 1 | variable "datacenters" { 2 | type = list(string) 3 | default = ["dc1"] 4 | } 5 | 6 | variable "image" { 7 | type = string 8 | default = "ghcr.io/hyperbadger/nomad-pipeline:main" 9 | } 10 | 11 | variable "nomad_addr" { 12 | type = string 13 | default = "http://host.docker.internal:4646" 14 | } 15 | 16 | variable "docker_extra_hosts" { 17 | type = list(string) 18 | default = ["host.docker.internal:host-gateway"] 19 | } 20 | 21 | job "dynamic" { 22 | name = "dynamic-job" 23 | datacenters = var.datacenters 24 | type = "batch" 25 | 26 | meta = { 27 | "nomad-pipeline.enabled" = "true" 28 | } 29 | 30 | group "▶️" { 31 | task "init" { 32 | driver = "docker" 33 | 34 | config { 35 | image = var.image 36 | args = ["agent", "init"] 37 | 38 | extra_hosts = var.docker_extra_hosts 39 | auth_soft_fail = true 40 | } 41 | 42 | env { 43 | NOMAD_ADDR = var.nomad_addr 44 | NOMAD_PIPELINE_DEBUG = "true" 45 | } 46 | } 47 | } 48 | 49 | group "1-generate-tasks" { 50 | count = 0 51 | 52 | meta = { 53 | "nomad-pipeline.root" = "true" 54 | "nomad-pipeline.dynamic-tasks" = "tasks/*" 55 | } 56 | 57 | task "generate-tasks" { 58 | driver = "raw_exec" 59 | 60 | config { 61 | command = "/bin/bash" 62 | args = ["${NOMAD_TASK_DIR}/generate-tasks.sh"] 63 | } 64 | 65 | template { 66 | data = <<-EOT 67 | [ 68 | { 69 | "Name": "${TASK_NAME}", 70 | "Count": 0, 71 | "Meta": { 72 | "nomad-pipeline.root": "true", 73 | "nomad-pipeline.next": "3-last" 74 | }, 75 | "Tasks": [ 76 | { 77 | "Name": "echo", 78 | "Driver": "raw_exec", 79 | "Config": { 80 | "command": "/bin/echo", 81 | "args": [ "hey" ] 82 | } 83 | } 84 | ] 85 | } 86 | ] 87 | EOT 88 | 89 | destination = "${NOMAD_TASK_DIR}/task.template.json" 90 | } 91 | 92 | template { 93 | data = <<-EOT 94 | [ 95 | { 96 | "Name": "3-last", 97 | "Count": 0, 98 | "Meta": { 99 | "nomad-pipeline.root": "true", 100 | "nomad-pipeline.dependencies": "2a-echo,2b-echo" 101 | }, 102 | "Tasks": [ 103 | { 104 | "Name": "echo", 105 | "Driver": "raw_exec", 106 | "Config": { 107 | "command": "/bin/echo", 108 | "args": [ "hey" ] 109 | } 110 | } 111 | ] 112 | } 113 | ] 114 | EOT 115 | 116 | destination = "${NOMAD_TASK_DIR}/last-task.template.json" 117 | } 118 | 119 | template { 120 | data = <<-EOT 121 | #!/bin/bash 122 | mkdir -p "${NOMAD_ALLOC_DIR}/tasks" 123 | cat ${NOMAD_TASK_DIR}/task.template.json | TASK_NAME="2a-echo" envsubst > ${NOMAD_ALLOC_DIR}/tasks/2a-echo.json 124 | cat ${NOMAD_TASK_DIR}/task.template.json | TASK_NAME="2b-echo" envsubst > ${NOMAD_ALLOC_DIR}/tasks/2b-echo.json 125 | cp ${NOMAD_TASK_DIR}/last-task.template.json ${NOMAD_ALLOC_DIR}/tasks/3-last.json 126 | sleep 10 127 | EOT 128 | 129 | destination = "${NOMAD_TASK_DIR}/generate-tasks.sh" 130 | } 131 | } 132 | } 133 | } -------------------------------------------------------------------------------- /examples/fan-out-fan-in.hcl: -------------------------------------------------------------------------------- 1 | variable "datacenters" { 2 | type = list(string) 3 | default = ["dc1"] 4 | } 5 | 6 | variable "image" { 7 | type = string 8 | default = "ghcr.io/hyperbadger/nomad-pipeline:main" 9 | } 10 | 11 | variable "nomad_addr" { 12 | type = string 13 | default = "http://host.docker.internal:4646" 14 | } 15 | 16 | variable "docker_extra_hosts" { 17 | type = list(string) 18 | default = ["host.docker.internal:host-gateway"] 19 | } 20 | 21 | job "fan-out-fan-in" { 22 | name = "fan-out-fan-in" 23 | datacenters = var.datacenters 24 | type = "batch" 25 | 26 | meta = { 27 | "nomad-pipeline.enabled" = "true" 28 | } 29 | 30 | group "▶️" { 31 | task "init" { 32 | driver = "docker" 33 | 34 | config { 35 | image = var.image 36 | args = ["agent", "init"] 37 | 38 | extra_hosts = var.docker_extra_hosts 39 | auth_soft_fail = true 40 | } 41 | 42 | env { 43 | NOMAD_ADDR = var.nomad_addr 44 | NOMAD_PIPELINE_DEBUG = "true" 45 | } 46 | } 47 | } 48 | 49 | group "1-submit-tasks" { 50 | count = 0 51 | 52 | meta = { 53 | "nomad-pipeline.root" = "true" 54 | "nomad-pipeline.next" = "2-do-work" 55 | } 56 | 57 | task "submit" { 58 | driver = "raw_exec" 59 | 60 | config { 61 | command = "/bin/bash" 62 | args = ["local/main.sh"] 63 | } 64 | 65 | template { 66 | data = <<-EOT 67 | #!/bin/bash 68 | 69 | sleep 5 70 | echo "alot of work" > queue 71 | 72 | EOT 73 | 74 | destination = "local/main.sh" 75 | } 76 | } 77 | } 78 | 79 | group "2-do-work" { 80 | count = 0 81 | 82 | meta = { 83 | "nomad-pipeline.count" = "5" 84 | "nomad-pipeline.next" = "3-process-output" 85 | } 86 | 87 | scaling { 88 | enabled = true 89 | max = 10 90 | } 91 | 92 | task "work" { 93 | driver = "raw_exec" 94 | 95 | config { 96 | command = "/bin/bash" 97 | args = ["local/main.sh"] 98 | } 99 | 100 | template { 101 | data = <<-EOT 102 | #!/bin/bash 103 | 104 | echo "pick things off queue and do work" 105 | # sleep 10 106 | sleep $((5 + RANDOM % 20)); 107 | 108 | EOT 109 | 110 | destination = "local/main.sh" 111 | } 112 | } 113 | } 114 | 115 | group "3-process-output" { 116 | count = 0 117 | 118 | meta = { 119 | "nomad-pipeline.dependencies" = "2-do-work" 120 | } 121 | 122 | task "process" { 123 | driver = "raw_exec" 124 | 125 | config { 126 | command = "/bin/bash" 127 | args = ["local/main.sh"] 128 | } 129 | 130 | template { 131 | data = <<-EOT 132 | #!/bin/bash 133 | echo "process output of work" 134 | EOT 135 | 136 | destination = "local/main.sh" 137 | } 138 | } 139 | } 140 | } 141 | -------------------------------------------------------------------------------- /examples/happy-job.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hyperbadger/nomad-pipeline/a7e234ea8ff38090b6bd92e2b12d1066c9121e72/examples/happy-job.gif -------------------------------------------------------------------------------- /examples/happy-job.hcl: -------------------------------------------------------------------------------- 1 | variable "datacenters" { 2 | type = list(string) 3 | default = ["dc1"] 4 | } 5 | 6 | variable "image" { 7 | type = string 8 | default = "ghcr.io/hyperbadger/nomad-pipeline:main" 9 | } 10 | 11 | variable "nomad_addr" { 12 | type = string 13 | default = "http://host.docker.internal:4646" 14 | } 15 | 16 | variable "docker_extra_hosts" { 17 | type = list(string) 18 | default = ["host.docker.internal:host-gateway"] 19 | } 20 | 21 | job "happy" { 22 | name = "happy-job" 23 | datacenters = var.datacenters 24 | type = "batch" 25 | 26 | meta = { 27 | "nomad-pipeline.enabled" = "true" 28 | } 29 | 30 | group "▶️" { 31 | count = 1 32 | 33 | task "init" { 34 | driver = "docker" 35 | 36 | config { 37 | image = var.image 38 | args = ["agent", "init"] 39 | 40 | extra_hosts = var.docker_extra_hosts 41 | auth_soft_fail = true 42 | } 43 | 44 | env { 45 | NOMAD_ADDR = var.nomad_addr 46 | NOMAD_PIPELINE_DEBUG = "true" 47 | } 48 | } 49 | } 50 | 51 | group "1-normal-task" { 52 | count = 0 53 | 54 | meta = { 55 | "nomad-pipeline.root" = "true" 56 | "nomad-pipeline.next" = "2-multi-task-group" 57 | } 58 | 59 | task "normal" { 60 | driver = "raw_exec" 61 | 62 | config { 63 | command = "/bin/bash" 64 | args = ["local/main.sh"] 65 | } 66 | 67 | template { 68 | data = <<-EOT 69 | #!/bin/bash 70 | 71 | echo "do something" 72 | sleep 5 73 | 74 | EOT 75 | 76 | destination = "local/main.sh" 77 | } 78 | } 79 | } 80 | 81 | group "2-multi-task-group" { 82 | count = 0 83 | 84 | meta = { 85 | "nomad-pipeline.next" = "3a-parallel,3b-parallel-i" 86 | } 87 | 88 | task "first_task" { 89 | driver = "raw_exec" 90 | config { 91 | command = "/bin/echo" 92 | args = ["first_task"] 93 | } 94 | } 95 | 96 | task "second_task" { 97 | driver = "raw_exec" 98 | config { 99 | command = "/bin/echo" 100 | args = ["second_task"] 101 | } 102 | } 103 | } 104 | 105 | group "3a-parallel" { 106 | count = 0 107 | 108 | meta = { 109 | "nomad-pipeline.next" = "4-dependent" 110 | } 111 | 112 | task "parallel" { 113 | driver = "raw_exec" 114 | 115 | config { 116 | command = "/bin/bash" 117 | args = ["local/main.sh"] 118 | } 119 | 120 | template { 121 | data = <<-EOT 122 | #!/bin/bash 123 | 124 | sleep 10 125 | 126 | EOT 127 | 128 | destination = "local/main.sh" 129 | } 130 | } 131 | } 132 | 133 | group "3b-parallel-i" { 134 | count = 0 135 | 136 | meta = { 137 | "nomad-pipeline.next" = "3b-parallel-ii" 138 | } 139 | 140 | task "parallel" { 141 | driver = "raw_exec" 142 | 143 | config { 144 | command = "/bin/bash" 145 | args = ["local/main.sh"] 146 | } 147 | 148 | template { 149 | data = <<-EOT 150 | #!/bin/bash 151 | 152 | sleep 15 153 | 154 | EOT 155 | 156 | destination = "local/main.sh" 157 | } 158 | } 159 | } 160 | 161 | group "3b-parallel-ii" { 162 | count = 0 163 | 164 | meta = { 165 | "nomad-pipeline.next" = "4-dependent" 166 | } 167 | 168 | task "parallel" { 169 | driver = "raw_exec" 170 | 171 | config { 172 | command = "/bin/bash" 173 | args = ["local/main.sh"] 174 | } 175 | 176 | template { 177 | data = <<-EOT 178 | #!/bin/bash 179 | 180 | sleep 10 181 | 182 | EOT 183 | 184 | destination = "local/main.sh" 185 | } 186 | } 187 | } 188 | 189 | group "4-dependent" { 190 | count = 0 191 | 192 | meta = { 193 | # BUG: when whole job is restarted, it will not wait for this task group, 194 | # 4-dependent will run as soon as 3b-parallel-i finishes 195 | "nomad-pipeline.dependencies" = "3b-parallel-ii" 196 | } 197 | 198 | task "dependent" { 199 | driver = "raw_exec" 200 | 201 | config { 202 | command = "/bin/bash" 203 | args = ["local/main.sh"] 204 | } 205 | 206 | template { 207 | data = <<-EOT 208 | #!/bin/bash 209 | echo "successfully waited for dependency" 210 | EOT 211 | 212 | destination = "local/main.sh" 213 | } 214 | } 215 | } 216 | } -------------------------------------------------------------------------------- /examples/leader-task-group.hcl: -------------------------------------------------------------------------------- 1 | variable "datacenters" { 2 | type = list(string) 3 | default = ["dc1"] 4 | } 5 | 6 | variable "image" { 7 | type = string 8 | default = "ghcr.io/hyperbadger/nomad-pipeline:main" 9 | } 10 | 11 | variable "nomad_addr" { 12 | type = string 13 | default = "http://host.docker.internal:4646" 14 | } 15 | 16 | variable "docker_extra_hosts" { 17 | type = list(string) 18 | default = ["host.docker.internal:host-gateway"] 19 | } 20 | 21 | job "leader-task-group" { 22 | name = "leader-task-group" 23 | datacenters = var.datacenters 24 | type = "batch" 25 | 26 | meta = { 27 | "nomad-pipeline.enabled" = "true" 28 | } 29 | 30 | group "▶️" { 31 | task "init" { 32 | driver = "docker" 33 | 34 | config { 35 | image = var.image 36 | args = ["agent", "init"] 37 | 38 | extra_hosts = var.docker_extra_hosts 39 | auth_soft_fail = true 40 | } 41 | 42 | env { 43 | NOMAD_ADDR = var.nomad_addr 44 | NOMAD_PIPELINE_DEBUG = "true" 45 | } 46 | } 47 | } 48 | 49 | group "leader" { 50 | count = 0 51 | 52 | meta = { 53 | "nomad-pipeline.root" = "true" 54 | "nomad-pipeline.leader" = "true" 55 | } 56 | 57 | task "some-process" { 58 | driver = "raw_exec" 59 | 60 | config { 61 | command = "/bin/bash" 62 | args = ["local/main.sh"] 63 | } 64 | 65 | template { 66 | data = <<-EOT 67 | #!/bin/bash 68 | 69 | sleep 5 70 | 71 | EOT 72 | 73 | destination = "local/main.sh" 74 | } 75 | } 76 | } 77 | 78 | group "some-long-running-process" { 79 | count = 0 80 | 81 | meta = { 82 | "nomad-pipeline.root" = "true" 83 | } 84 | 85 | task "forever-run" { 86 | driver = "raw_exec" 87 | 88 | config { 89 | command = "/bin/tail" 90 | args = ["-f", "/dev/null"] 91 | } 92 | } 93 | } 94 | } 95 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/hyperbadger/nomad-pipeline 2 | 3 | go 1.18 4 | 5 | require ( 6 | github.com/gin-contrib/zap v0.1.0 7 | github.com/gin-gonic/gin v1.8.1 8 | github.com/hashicorp/nomad/api v0.0.0-20220617091522-08811312cc87 9 | github.com/sirupsen/logrus v1.9.0 10 | github.com/spf13/cobra v1.5.0 11 | go.uber.org/zap v1.23.0 12 | gopkg.in/yaml.v3 v3.0.1 13 | ) 14 | 15 | require ( 16 | github.com/gin-contrib/sse v0.1.0 // indirect 17 | github.com/go-playground/locales v0.14.0 // indirect 18 | github.com/go-playground/universal-translator v0.18.0 // indirect 19 | github.com/go-playground/validator/v10 v10.10.0 // indirect 20 | github.com/goccy/go-json v0.9.7 // indirect 21 | github.com/gorilla/websocket v1.5.0 // indirect 22 | github.com/hashicorp/cronexpr v1.1.1 // indirect 23 | github.com/hashicorp/go-cleanhttp v0.5.2 // indirect 24 | github.com/hashicorp/go-rootcerts v1.0.2 // indirect 25 | github.com/inconshreveable/mousetrap v1.0.0 // indirect 26 | github.com/json-iterator/go v1.1.12 // indirect 27 | github.com/leodido/go-urn v1.2.1 // indirect 28 | github.com/mattn/go-isatty v0.0.14 // indirect 29 | github.com/mitchellh/go-homedir v1.1.0 // indirect 30 | github.com/mitchellh/mapstructure v1.5.0 // indirect 31 | github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 // indirect 32 | github.com/modern-go/reflect2 v1.0.2 // indirect 33 | github.com/pelletier/go-toml/v2 v2.0.1 // indirect 34 | github.com/spf13/pflag v1.0.5 // indirect 35 | github.com/ugorji/go/codec v1.2.7 // indirect 36 | go.opentelemetry.io/otel v1.10.0 // indirect 37 | go.opentelemetry.io/otel/trace v1.10.0 // indirect 38 | go.uber.org/atomic v1.7.0 // indirect 39 | go.uber.org/multierr v1.6.0 // indirect 40 | golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 // indirect 41 | golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 // indirect 42 | golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 // indirect 43 | golang.org/x/text v0.3.6 // indirect 44 | google.golang.org/protobuf v1.28.0 // indirect 45 | gopkg.in/yaml.v2 v2.4.0 // indirect 46 | ) 47 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= 2 | github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= 3 | github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= 4 | github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= 5 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 6 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 7 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 8 | github.com/docker/go-units v0.3.3 h1:Xk8S3Xj5sLGlG5g67hJmYMmUgXv5N4PhkjJHHqrwnTk= 9 | github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= 10 | github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= 11 | github.com/gin-contrib/zap v0.1.0 h1:RMSFFJo34XZogV62OgOzvrlaMNmXrNxmJ3bFmMwl6Cc= 12 | github.com/gin-contrib/zap v0.1.0/go.mod h1:hvnZaPs478H1PGvRP8w89ZZbyJUiyip4ddiI/53WG3o= 13 | github.com/gin-gonic/gin v1.8.1 h1:4+fr/el88TOO3ewCmQr8cx/CtZ/umlIRIs5M4NTNjf8= 14 | github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk= 15 | github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= 16 | github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= 17 | github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= 18 | github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A= 19 | github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= 20 | github.com/go-playground/locales v0.14.0 h1:u50s323jtVGugKlcYeyzC0etD1HifMjqmJqb8WugfUU= 21 | github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs= 22 | github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/jYrnRPArHwAcmLoJZxyho= 23 | github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA= 24 | github.com/go-playground/validator/v10 v10.10.0 h1:I7mrTYv78z8k8VXa/qJlOlEXn/nBh+BF8dHX5nt/dr0= 25 | github.com/go-playground/validator/v10 v10.10.0/go.mod h1:74x4gJWsvQexRdW8Pn3dXSGrTK4nAUsbPlLADvpJkos= 26 | github.com/goccy/go-json v0.9.7 h1:IcB+Aqpx/iMHu5Yooh7jEzJk1JZ7Pjtmys2ukPr7EeM= 27 | github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= 28 | github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= 29 | github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= 30 | github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= 31 | github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= 32 | github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= 33 | github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= 34 | github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= 35 | github.com/hashicorp/cronexpr v1.1.1 h1:NJZDd87hGXjoZBdvyCF9mX4DCq5Wy7+A/w+A7q0wn6c= 36 | github.com/hashicorp/cronexpr v1.1.1/go.mod h1:P4wA0KBl9C5q2hABiMO7cp6jcIg96CDh1Efb3g1PWA4= 37 | github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= 38 | github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= 39 | github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= 40 | github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= 41 | github.com/hashicorp/nomad/api v0.0.0-20220617091522-08811312cc87 h1:OIzs4HfBl9gRRTldyOtRLhEJPcz/Sa9Rg0pw+MCLJzU= 42 | github.com/hashicorp/nomad/api v0.0.0-20220617091522-08811312cc87/go.mod h1:b/AoT79m3PEpb6tKCFKva/M+q1rKJNUk5mdu1S8DymM= 43 | github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= 44 | github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= 45 | github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= 46 | github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= 47 | github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= 48 | github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= 49 | github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= 50 | github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= 51 | github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= 52 | github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= 53 | github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= 54 | github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= 55 | github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w= 56 | github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY= 57 | github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= 58 | github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= 59 | github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= 60 | github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= 61 | github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= 62 | github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= 63 | github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= 64 | github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc= 65 | github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= 66 | github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= 67 | github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= 68 | github.com/pelletier/go-toml/v2 v2.0.1 h1:8e3L2cCQzLFi2CR4g7vGFuFxX7Jl1kKX8gW+iV0GUKU= 69 | github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= 70 | github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= 71 | github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= 72 | github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 73 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 74 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 75 | github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= 76 | github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8= 77 | github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= 78 | github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= 79 | github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= 80 | github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= 81 | github.com/spf13/cobra v1.5.0 h1:X+jTBEBqF0bHN+9cSMgmfuvv2VHJ9ezmFNf9Y/XstYU= 82 | github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM= 83 | github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= 84 | github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= 85 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 86 | github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= 87 | github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= 88 | github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 89 | github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 90 | github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 91 | github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= 92 | github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= 93 | github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M= 94 | github.com/ugorji/go/codec v1.2.7 h1:YPXUKf7fYbp/y8xloBqZOw2qaVggbfwMlI8WM3wZUJ0= 95 | github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY= 96 | github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= 97 | go.opentelemetry.io/otel v1.10.0 h1:Y7DTJMR6zs1xkS/upamJYk0SxxN4C9AqRd77jmZnyY4= 98 | go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= 99 | go.opentelemetry.io/otel/trace v1.10.0 h1:npQMbR8o7mum8uF95yFbOEJffhs1sbCOfDh8zAJiH5E= 100 | go.opentelemetry.io/otel/trace v1.10.0/go.mod h1:Sij3YYczqAdz+EhmGhE6TpTxUO5/F/AzrK+kxfGqySM= 101 | go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= 102 | go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= 103 | go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI= 104 | go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= 105 | go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= 106 | go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= 107 | go.uber.org/zap v1.23.0 h1:OjGQ5KQDEUawVHxNwQgPpiypGHOxo2mNZsOqTak4fFY= 108 | go.uber.org/zap v1.23.0/go.mod h1:D+nX8jyLsMHMYrln8A0rJjFt/T/9/bGgIhAqxv5URuY= 109 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 110 | golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= 111 | golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 h1:/UOmuWzQfxxo9UtlXMwuQU8CMgg1eZXqTRwkSQJWKOI= 112 | golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= 113 | golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= 114 | golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= 115 | golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 116 | golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 117 | golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 118 | golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= 119 | golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 h1:4nGaVu0QrbjT/AK2PRLuQfQuh6DJve+pELhqTdAj3x0= 120 | golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= 121 | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 122 | golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 123 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 124 | golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 125 | golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 126 | golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 127 | golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 128 | golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 129 | golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 130 | golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 131 | golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 h1:0A+M6Uqn+Eje4kHMK80dtF3JCXC4ykBgQG4Fe06QRhQ= 132 | golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 133 | golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= 134 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 135 | golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 136 | golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= 137 | golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 138 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 139 | golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= 140 | golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= 141 | golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= 142 | golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 143 | golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 144 | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 145 | golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 146 | google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= 147 | google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= 148 | google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= 149 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 150 | gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 151 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= 152 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= 153 | gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= 154 | gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= 155 | gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= 156 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 157 | gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 158 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 159 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 160 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "os" 5 | 6 | log "github.com/sirupsen/logrus" 7 | 8 | "github.com/hyperbadger/nomad-pipeline/cmd" 9 | ) 10 | 11 | func init() { 12 | if len(os.Getenv("NOMAD_PIPELINE_LOG_JSON")) > 0 { 13 | log.SetFormatter(&log.JSONFormatter{}) 14 | } 15 | 16 | if len(os.Getenv("NOMAD_PIPELINE_DEBUG")) > 0 { 17 | log.SetLevel(log.DebugLevel) 18 | } 19 | } 20 | 21 | func main() { 22 | cmd.Execute() 23 | } 24 | -------------------------------------------------------------------------------- /pkg/api/error.go: -------------------------------------------------------------------------------- 1 | package api 2 | 3 | import ( 4 | "net/http" 5 | 6 | "github.com/gin-gonic/gin" 7 | "go.uber.org/zap" 8 | ) 9 | 10 | type Error struct { 11 | Code int `json:"code"` 12 | Status string `json:"status"` 13 | Type string `json:"type"` 14 | Message string `json:"message"` 15 | Details string `json:"details"` 16 | } 17 | 18 | const ( 19 | ErrorTypeNomadUpstream = "nomad_upstream" 20 | ) 21 | 22 | type ErrorOption func(*Error) 23 | 24 | func NewError(opts ...ErrorOption) *Error { 25 | err := &Error{ 26 | Code: http.StatusInternalServerError, 27 | Status: http.StatusText(http.StatusInternalServerError), 28 | } 29 | 30 | for _, opt := range opts { 31 | opt(err) 32 | } 33 | 34 | return err 35 | } 36 | 37 | func (err *Error) Apply(c *gin.Context, logger *zap.SugaredLogger) { 38 | c.JSON(err.Code, gin.H{"error": err}) 39 | 40 | logger.Errorw( 41 | err.Message, 42 | "code", err.Code, 43 | "status", err.Status, 44 | "type", err.Type, 45 | "details", err.Details, 46 | ) 47 | } 48 | 49 | func WithCode(code int) ErrorOption { 50 | return func(err *Error) { 51 | err.Code = code 52 | err.Status = http.StatusText(code) 53 | } 54 | } 55 | 56 | func WithType(errType string) ErrorOption { 57 | return func(err *Error) { 58 | err.Type = errType 59 | } 60 | } 61 | 62 | func WithMessage(msg string) ErrorOption { 63 | return func(err *Error) { 64 | err.Message = msg 65 | } 66 | } 67 | 68 | func WithError(err error) ErrorOption { 69 | return func(_err *Error) { 70 | _err.Details = err.Error() 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /pkg/api/job.go: -------------------------------------------------------------------------------- 1 | package api 2 | 3 | import ( 4 | nomad "github.com/hashicorp/nomad/api" 5 | "github.com/hyperbadger/nomad-pipeline/pkg/controller" 6 | ) 7 | 8 | type NomadJob struct { 9 | stub *nomad.JobListStub 10 | full *nomad.Job 11 | } 12 | 13 | func sumTGSummary(tgs nomad.TaskGroupSummary) int { 14 | sum := tgs.Queued + 15 | tgs.Complete + 16 | tgs.Failed + 17 | tgs.Running + 18 | tgs.Starting + 19 | tgs.Lost + 20 | tgs.Unknown 21 | 22 | return sum 23 | } 24 | 25 | type Job struct { 26 | ID string `json:"id"` 27 | Name string `json:"name"` 28 | Status string `json:"status"` 29 | } 30 | 31 | func (ps *PipelineServer) newJobFromNomadJob(njob NomadJob) (*Job, *Error) { 32 | jobsAPI := ps.nomad.Jobs() 33 | 34 | status := njob.stub.Status 35 | 36 | if status == "dead" { 37 | ftgs := make([]string, 0) 38 | 39 | for tg, tgs := range njob.stub.JobSummary.Summary { 40 | runs := sumTGSummary(tgs) 41 | 42 | if runs == 1 { 43 | if tgs.Failed == 1 || tgs.Lost == 1 || tgs.Unknown == 1 { 44 | ftgs = append(ftgs, tg) 45 | } 46 | } else if runs > 1 { 47 | allocs, _, err := jobsAPI.Allocations(njob.stub.ID, true, &nomad.QueryOptions{}) 48 | if err != nil { 49 | httpErr := NewError( 50 | WithType(ErrorTypeNomadUpstream), 51 | WithMessage("error listing job allocs"), 52 | WithError(err), 53 | ) 54 | return nil, httpErr 55 | } 56 | 57 | if !controller.TgDone(allocs, []string{tg}, true) { 58 | ftgs = append(ftgs, tg) 59 | } 60 | } else { 61 | ps.logger.Warnw("dead job without any runs", "job", njob.stub.ID, "taskgroup", tg) 62 | } 63 | } 64 | 65 | if len(ftgs) > 0 { 66 | status = "failed" 67 | } 68 | 69 | status = "success" 70 | } 71 | 72 | job := Job{ 73 | ID: njob.stub.ID, 74 | Name: njob.stub.Name, 75 | Status: status, 76 | } 77 | 78 | return &job, nil 79 | } 80 | 81 | type getJobsFilter func(*nomad.Job) bool 82 | 83 | func isPipeline(job *nomad.Job) bool { 84 | if _, ok := job.Meta[controller.TagEnabled]; ok { 85 | return true 86 | } 87 | 88 | return false 89 | } 90 | 91 | func isChild(ofParent ...string) getJobsFilter { 92 | return func(job *nomad.Job) bool { 93 | var parentID string 94 | 95 | if id, ok := job.Meta[controller.TagParentPipeline]; ok { 96 | parentID = id 97 | } 98 | 99 | if job.ParentID != nil { 100 | parentID = *job.ParentID 101 | } 102 | 103 | if len(ofParent) > 0 { 104 | return parentID == ofParent[0] 105 | } 106 | 107 | if len(parentID) > 0 { 108 | return true 109 | } 110 | 111 | return false 112 | } 113 | } 114 | 115 | func (ps *PipelineServer) getJobs(njobs []NomadJob, filters ...getJobsFilter) ([]NomadJob, *Error) { 116 | jobsAPI := ps.nomad.Jobs() 117 | 118 | fjobs := make([]NomadJob, 0) 119 | 120 | for _, njob := range njobs { 121 | job, _, err := jobsAPI.Info(njob.stub.ID, &nomad.QueryOptions{}) 122 | if err != nil { 123 | httpErr := NewError( 124 | WithType(ErrorTypeNomadUpstream), 125 | WithMessage("error getting job"), 126 | WithError(err), 127 | ) 128 | return nil, httpErr 129 | } 130 | 131 | truthy := 0 132 | for _, filter := range filters { 133 | if filter(job) { 134 | truthy += 1 135 | } 136 | } 137 | 138 | if len(filters) == truthy { 139 | fjobs = append(fjobs, NomadJob{stub: njob.stub, full: job}) 140 | } 141 | } 142 | 143 | return fjobs, nil 144 | } 145 | 146 | type listJobsFilter func(*nomad.JobListStub) bool 147 | 148 | func isParam(job *nomad.JobListStub) bool { 149 | return job.ParameterizedJob 150 | } 151 | 152 | func notParam(job *nomad.JobListStub) bool { 153 | return !job.ParameterizedJob 154 | } 155 | 156 | func (ps *PipelineServer) listJobs(filters ...listJobsFilter) ([]NomadJob, *Error) { 157 | jobsAPI := ps.nomad.Jobs() 158 | 159 | allJobs, _, err := jobsAPI.List(&nomad.QueryOptions{}) 160 | if err != nil { 161 | httpErr := NewError( 162 | WithType(ErrorTypeNomadUpstream), 163 | WithMessage("error listing jobs"), 164 | WithError(err), 165 | ) 166 | return nil, httpErr 167 | } 168 | 169 | fjobs := make([]NomadJob, 0) 170 | 171 | for _, job := range allJobs { 172 | truthy := 0 173 | for _, filter := range filters { 174 | if filter(job) { 175 | truthy += 1 176 | } 177 | } 178 | 179 | if len(filters) == truthy { 180 | fjobs = append(fjobs, NomadJob{stub: job}) 181 | } 182 | } 183 | 184 | return fjobs, nil 185 | } 186 | -------------------------------------------------------------------------------- /pkg/api/pipeline.go: -------------------------------------------------------------------------------- 1 | package api 2 | 3 | type Pipeline struct { 4 | ID string `json:"id"` 5 | Name string `json:"name"` 6 | } 7 | 8 | func (ps *PipelineServer) newPipelineFromNomadJob(njob NomadJob) *Pipeline { 9 | pipeline := Pipeline{ 10 | ID: *njob.full.ID, 11 | Name: *njob.full.Name, 12 | } 13 | 14 | return &pipeline 15 | } 16 | -------------------------------------------------------------------------------- /pkg/api/router.go: -------------------------------------------------------------------------------- 1 | package api 2 | 3 | import ( 4 | "fmt" 5 | "net/http" 6 | "os" 7 | "time" 8 | 9 | ginzap "github.com/gin-contrib/zap" 10 | "github.com/gin-gonic/gin" 11 | nomad "github.com/hashicorp/nomad/api" 12 | "go.uber.org/zap" 13 | ) 14 | 15 | type PipelineServer struct { 16 | nomad *nomad.Client 17 | logger *zap.SugaredLogger 18 | } 19 | 20 | func NewPipelineServer(logger *zap.SugaredLogger) (*PipelineServer, error) { 21 | nClient, err := nomad.NewClient(&nomad.Config{ 22 | Address: os.Getenv("NOMAD_ADDR"), 23 | }) 24 | if err != nil { 25 | return nil, fmt.Errorf("error creating client: %w", err) 26 | } 27 | 28 | ps := PipelineServer{ 29 | nomad: nClient, 30 | logger: logger, 31 | } 32 | 33 | return &ps, nil 34 | } 35 | 36 | func (ps *PipelineServer) NewHTTPServer(addr string) *http.Server { 37 | gin.SetMode(gin.ReleaseMode) 38 | 39 | r := gin.New() 40 | 41 | desugar := ps.logger.Desugar() 42 | 43 | // logging 44 | r.Use(ginzap.Ginzap(desugar, time.RFC3339, true)) 45 | r.Use(ginzap.RecoveryWithZap(desugar, true)) 46 | 47 | r.GET("/health", ps.health) 48 | r.GET("/jobs", ps.listAllJobs) 49 | r.GET("/pipelines", ps.listPipelines) 50 | r.GET("/pipelines/:pipelineID/jobs", ps.listPipelineJobs) 51 | 52 | srv := http.Server{ 53 | Addr: addr, 54 | Handler: r, 55 | } 56 | 57 | return &srv 58 | } 59 | 60 | func (ps *PipelineServer) health(c *gin.Context) { 61 | c.JSON(http.StatusOK, gin.H{"healthy": true}) 62 | } 63 | 64 | func (ps *PipelineServer) listAllJobs(c *gin.Context) { 65 | jobs, httpErr := ps.listJobs(notParam) 66 | if httpErr != nil { 67 | httpErr.Apply(c, ps.logger) 68 | return 69 | } 70 | 71 | njobs, httpErr := ps.getJobs(jobs, isPipeline) 72 | if httpErr != nil { 73 | httpErr.Apply(c, ps.logger) 74 | return 75 | } 76 | 77 | allJobs := make([]*Job, 0) 78 | 79 | for _, njob := range njobs { 80 | job, httpErr := ps.newJobFromNomadJob(njob) 81 | if httpErr != nil { 82 | httpErr.Apply(c, ps.logger) 83 | } 84 | 85 | allJobs = append(allJobs, job) 86 | } 87 | 88 | c.JSON(http.StatusOK, allJobs) 89 | } 90 | 91 | func (ps *PipelineServer) listPipelines(c *gin.Context) { 92 | paramJobs, httpErr := ps.listJobs(isParam) 93 | if httpErr != nil { 94 | httpErr.Apply(c, ps.logger) 95 | return 96 | } 97 | 98 | njobs, httpErr := ps.getJobs(paramJobs, isPipeline) 99 | if httpErr != nil { 100 | httpErr.Apply(c, ps.logger) 101 | return 102 | } 103 | 104 | pipelines := make([]*Pipeline, 0) 105 | 106 | for _, job := range njobs { 107 | pipelines = append(pipelines, ps.newPipelineFromNomadJob(job)) 108 | } 109 | 110 | c.JSON(http.StatusOK, pipelines) 111 | } 112 | 113 | func (ps *PipelineServer) listPipelineJobs(c *gin.Context) { 114 | pipelineID := c.Params.ByName("pipelineID") 115 | 116 | jobs, httpErr := ps.listJobs(notParam) 117 | if httpErr != nil { 118 | httpErr.Apply(c, ps.logger) 119 | return 120 | } 121 | 122 | njobs, httpErr := ps.getJobs(jobs, isPipeline, isChild(pipelineID)) 123 | if httpErr != nil { 124 | httpErr.Apply(c, ps.logger) 125 | return 126 | } 127 | 128 | allJobs := make([]*Job, 0) 129 | 130 | for _, njob := range njobs { 131 | job, httpErr := ps.newJobFromNomadJob(njob) 132 | if httpErr != nil { 133 | httpErr.Apply(c, ps.logger) 134 | } 135 | 136 | allJobs = append(allJobs, job) 137 | } 138 | 139 | c.JSON(http.StatusOK, allJobs) 140 | } 141 | -------------------------------------------------------------------------------- /pkg/controller/pipeline.go: -------------------------------------------------------------------------------- 1 | package controller 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "errors" 7 | "fmt" 8 | "os" 9 | "path/filepath" 10 | "regexp" 11 | "sort" 12 | "strconv" 13 | "strings" 14 | 15 | nomad "github.com/hashicorp/nomad/api" 16 | log "github.com/sirupsen/logrus" 17 | yaml "gopkg.in/yaml.v3" 18 | ) 19 | 20 | const ( 21 | TagPrefix = "nomad-pipeline" 22 | TagEnabled = TagPrefix + ".enabled" 23 | TagCount = TagPrefix + ".count" 24 | TagDependencies = TagPrefix + ".dependencies" 25 | TagDynamicMemoryMB = TagPrefix + ".dynamic-memory-mb" 26 | TagDynamicTasks = TagPrefix + ".dynamic-tasks" 27 | TagLeader = TagPrefix + ".leader" 28 | TagNext = TagPrefix + ".next" 29 | TagRoot = TagPrefix + ".root" 30 | 31 | // internal tags, not meant to be set by user 32 | TagInternalPrefix = TagPrefix + ".internal" 33 | TagParentTask = TagInternalPrefix + ".parent-task" 34 | TagParentPipeline = TagInternalPrefix + ".parent-pipeline" 35 | ) 36 | 37 | func i2p(i int) *int { 38 | return &i 39 | } 40 | 41 | func dedupStr(dup []string) []string { 42 | seen := make(map[string]bool) 43 | dedup := make([]string, 0) 44 | 45 | for _, item := range dup { 46 | if _, ok := seen[item]; !ok { 47 | seen[item] = true 48 | dedup = append(dedup, item) 49 | } 50 | } 51 | 52 | return dedup 53 | } 54 | 55 | func dedupAllocs(dup []*nomad.AllocationListStub) []*nomad.AllocationListStub { 56 | seen := make(map[string]bool) 57 | dedup := make([]*nomad.AllocationListStub, 0) 58 | 59 | for _, item := range dup { 60 | if _, ok := seen[item.Name]; !ok { 61 | seen[item.Name] = true 62 | dedup = append(dedup, item) 63 | } 64 | } 65 | 66 | return dedup 67 | } 68 | 69 | func equalStr(a, b []string) bool { 70 | if len(a) != len(b) { 71 | return false 72 | } 73 | 74 | for i, v := range a { 75 | if v != b[i] { 76 | return false 77 | } 78 | } 79 | 80 | return true 81 | } 82 | 83 | func copyMapInterface(og map[string]interface{}) map[string]interface{} { 84 | nm := make(map[string]interface{}) 85 | for k, v := range og { 86 | nm[k] = v 87 | } 88 | return nm 89 | } 90 | 91 | func copyMapString(og map[string]string) map[string]string { 92 | nm := make(map[string]string) 93 | for k, v := range og { 94 | nm[k] = v 95 | } 96 | return nm 97 | } 98 | 99 | func clearECh(ch <-chan *nomad.Events) { 100 | for len(ch) > 0 { 101 | <-ch 102 | } 103 | } 104 | 105 | func lookupTask(tg *nomad.TaskGroup, tName string) *nomad.Task { 106 | for _, t := range tg.Tasks { 107 | if t.Name == tName { 108 | return t 109 | } 110 | } 111 | 112 | return nil 113 | } 114 | 115 | func split(toSplit string) []string { 116 | parts := strings.Split(toSplit, ",") 117 | tParts := make([]string, len(parts)) 118 | for i, part := range parts { 119 | tPart := strings.TrimSpace(part) 120 | tParts[i] = tPart 121 | } 122 | return tParts 123 | } 124 | 125 | func tgAllocated(allocs []*nomad.AllocationListStub, groups []string) (allocated bool) { 126 | for _, alloc := range allocs { 127 | for _, group := range groups { 128 | if alloc.TaskGroup == group { 129 | allocated = true 130 | } 131 | } 132 | } 133 | 134 | return 135 | } 136 | 137 | func successState(state *nomad.TaskState) bool { 138 | if state.Failed { 139 | return false 140 | } 141 | 142 | tEvents := make([]*nomad.TaskEvent, 0) 143 | for _, event := range state.Events { 144 | if event.Type == nomad.TaskTerminated { 145 | tEvents = append(tEvents, event) 146 | } 147 | } 148 | 149 | if len(tEvents) == 0 { 150 | log.Warnf("job not marked as failed and no events") 151 | return false 152 | } 153 | 154 | // sort by time to get latest "Terminated" event 155 | sort.Slice(tEvents, func(i, j int) bool { return tEvents[i].Time < tEvents[j].Time }) 156 | 157 | // check exit code of the latest "Terminated" 158 | if codeStr, ok := tEvents[len(tEvents)-1].Details["exit_code"]; ok { 159 | code, err := strconv.Atoi(codeStr) 160 | if err != nil { 161 | log.Errorf("error converting exit code (%v) to integer: %v", codeStr, err) 162 | return false 163 | } 164 | 165 | if code > 0 { 166 | log.Warnf("exit code (%v) and task not marked as failed, likely the job was stopped by signal", code) 167 | } 168 | 169 | return code == 0 170 | } 171 | 172 | return true 173 | } 174 | 175 | func TgDone(allocs []*nomad.AllocationListStub, groups []string, success bool) bool { 176 | if len(groups) == 0 || len(allocs) == 0 { 177 | return false 178 | } 179 | 180 | // sort allocations from newest to oldest job version 181 | sort.Slice(allocs, func(i, j int) bool { return allocs[i].JobVersion > allocs[j].JobVersion }) 182 | // deduping will use the latest job version and remove older ones 183 | // hence the prior sorting 184 | allocs = dedupAllocs(allocs) 185 | 186 | // keeps track of how many allocations a task group expects to be complete 187 | groupCount := make(map[string]int, 0) 188 | for _, alloc := range allocs { 189 | groupCount[alloc.TaskGroup] += 1 190 | } 191 | 192 | dGroupCount := make(map[string]int, 0) 193 | for _, alloc := range allocs { 194 | for _, group := range groups { 195 | if alloc.TaskGroup == group { 196 | tasks := 0 197 | dTasks := 0 198 | for task, state := range alloc.TaskStates { 199 | if task != "wait" && task != "next" { 200 | tasks++ 201 | 202 | if state.State == "dead" && !state.FinishedAt.IsZero() { 203 | if success && successState(state) { 204 | dTasks++ 205 | } else if !success { 206 | dTasks++ 207 | } 208 | } 209 | } 210 | } 211 | 212 | if tasks == dTasks { 213 | dGroupCount[group] += 1 214 | } 215 | } 216 | } 217 | } 218 | 219 | dGroups := make([]string, 0) 220 | for group, count := range dGroupCount { 221 | log.Debugf("%v => %v completions", group, count) 222 | 223 | if count >= groupCount[group] { 224 | dGroups = append(dGroups, group) 225 | } 226 | } 227 | 228 | if len(dGroups) == 0 { 229 | return false 230 | } 231 | 232 | sort.Strings(groups) 233 | sort.Strings(dGroups) 234 | 235 | dGroups = dedupStr(dGroups) 236 | 237 | return equalStr(groups, dGroups) 238 | } 239 | 240 | func generateEnvVarSlugs() map[string]string { 241 | envVars := []string{"JOB_ID", "JOB_NAME"} 242 | 243 | exp := "[^A-Za-z0-9]+" 244 | reg, err := regexp.Compile(exp) 245 | if err != nil { 246 | log.Fatalf("error compiling regular expression (%s): %v", exp, err) 247 | } 248 | 249 | slugs := make(map[string]string, 0) 250 | for _, envVar := range envVars { 251 | orig := os.Getenv(fmt.Sprintf("NOMAD_%s", envVar)) 252 | slugKey := fmt.Sprintf("%s_SLUG", envVar) 253 | slug := reg.ReplaceAllString(orig, "-") 254 | slug = strings.Trim(slug, "-") 255 | slugs[slugKey] = slug 256 | } 257 | 258 | return slugs 259 | } 260 | 261 | func lookupMetaTagInt(meta map[string]string, tag string) (int, error) { 262 | var value int 263 | var err error 264 | 265 | if valueStr, ok := meta[tag]; ok { 266 | valuee := os.ExpandEnv(valueStr) 267 | value, err = strconv.Atoi(valuee) 268 | if err != nil { 269 | return value, fmt.Errorf("can't convert tag (%v) of value (%v) to an int", tag, valuee) 270 | } 271 | } 272 | return value, nil 273 | } 274 | 275 | func lookupMetaTagStr(meta map[string]string, tag string) string { 276 | var value string 277 | 278 | if valueStr, ok := meta[tag]; ok { 279 | value = os.ExpandEnv(valueStr) 280 | } 281 | return value 282 | } 283 | 284 | func lookupMetaTagBool(meta map[string]string, tag string) (bool, error) { 285 | var value bool 286 | var err error 287 | 288 | if valueStr, ok := meta[tag]; ok { 289 | valuee := os.ExpandEnv(valueStr) 290 | value, err = strconv.ParseBool(valuee) 291 | if err != nil { 292 | return value, fmt.Errorf("can't convert tag (%v) of value (%v) to a bool", tag, valuee) 293 | } 294 | } 295 | return value, nil 296 | } 297 | 298 | func loadConfig(cPath string) *Config { 299 | cBytes, err := os.ReadFile(cPath) 300 | 301 | if errors.Is(err, os.ErrNotExist) { 302 | log.Warnf("config file doesn't exist (path: %v)", cPath) 303 | return nil 304 | } 305 | 306 | if err != nil { 307 | log.Warnf("error loading config (path: %v): %v", cPath, err) 308 | return nil 309 | } 310 | 311 | c := Config{} 312 | err = yaml.Unmarshal(cBytes, &c) 313 | if err != nil { 314 | log.Fatalf("error reading config yaml: %v", err) 315 | } 316 | 317 | return &c 318 | } 319 | 320 | type Task struct { 321 | Name string 322 | Next []string 323 | Dependencies []string 324 | } 325 | 326 | type Tasks []Task 327 | 328 | func (ts Tasks) LookupTask(name string) *Task { 329 | for _, t := range ts { 330 | if t.Name == name { 331 | return &t 332 | } 333 | } 334 | return nil 335 | } 336 | 337 | type TaskGroups []nomad.TaskGroup 338 | 339 | type Config struct { 340 | } 341 | 342 | type PipelineController struct { 343 | JobID string 344 | GroupName string 345 | TaskName string 346 | AllocID string 347 | Job *nomad.Job 348 | Nomad *nomad.Client 349 | JobsAPI *nomad.Jobs 350 | AllocsAPI *nomad.Allocations 351 | Config *Config 352 | Image string 353 | } 354 | 355 | func NewPipelineController(cPath string) *PipelineController { 356 | pc := PipelineController{ 357 | JobID: os.Getenv("NOMAD_JOB_ID"), 358 | GroupName: os.Getenv("NOMAD_GROUP_NAME"), 359 | TaskName: os.Getenv("NOMAD_TASK_NAME"), 360 | AllocID: os.Getenv("NOMAD_ALLOC_ID"), 361 | Config: loadConfig(cPath), 362 | } 363 | 364 | nClient, err := nomad.NewClient(&nomad.Config{ 365 | Address: os.Getenv("NOMAD_ADDR"), 366 | Namespace: os.Getenv("NOMAD_NAMESPACE"), 367 | Region: os.Getenv("NOMAD_REGION"), 368 | }) 369 | if err != nil { 370 | log.Fatalf("error creating client: %v", err) 371 | } 372 | 373 | pc.Nomad = nClient 374 | pc.JobsAPI = nClient.Jobs() 375 | pc.AllocsAPI = nClient.Allocations() 376 | 377 | log.Infof("getting job: %q", pc.JobID) 378 | job, _, err := pc.JobsAPI.Info(pc.JobID, &nomad.QueryOptions{}) 379 | if err != nil { 380 | log.Fatalf("error getting job: %v", err) 381 | } 382 | 383 | pc.Job = job 384 | 385 | return &pc 386 | } 387 | 388 | func (pc *PipelineController) UpdateJob() error { 389 | log.Debugf("updating job with job modify index: %v", *pc.Job.JobModifyIndex) 390 | r, _, err := pc.JobsAPI.RegisterOpts( 391 | pc.Job, 392 | &nomad.RegisterOptions{ 393 | EnforceIndex: true, 394 | ModifyIndex: *pc.Job.JobModifyIndex, 395 | }, 396 | &nomad.WriteOptions{}, 397 | ) 398 | if err != nil { 399 | return err 400 | } 401 | 402 | log.Debugf("updated job, new job modify index: %v", r.JobModifyIndex) 403 | pc.Job.JobModifyIndex = &r.JobModifyIndex 404 | 405 | return nil 406 | } 407 | 408 | func (pc *PipelineController) ProcessTaskGroups(filters ...map[string]string) ([]string, error) { 409 | filter := make(map[string]string) 410 | for _, _filter := range filters { 411 | for k, v := range _filter { 412 | filter[k] = v 413 | } 414 | } 415 | 416 | rTasks := make([]string, 0) 417 | tasks := make(Tasks, 0, len(pc.Job.TaskGroups)) 418 | 419 | procTG := pc.Job.LookupTaskGroup(pc.GroupName) 420 | procTask := lookupTask(procTG, pc.TaskName) 421 | 422 | for _, tGroup := range pc.Job.TaskGroups { 423 | // skip init group 424 | if *tGroup.Name == pc.GroupName { 425 | continue 426 | } 427 | 428 | for k, v := range filter { 429 | if tag, ok := tGroup.Meta[k]; ok { 430 | if tag == v { 431 | continue 432 | } 433 | } 434 | } 435 | 436 | task := Task{ 437 | Name: *tGroup.Name, 438 | } 439 | 440 | if next := lookupMetaTagStr(tGroup.Meta, TagNext); len(next) > 0 { 441 | task.Next = split(next) 442 | } 443 | 444 | if dependencies := lookupMetaTagStr(tGroup.Meta, TagDependencies); len(dependencies) > 0 { 445 | task.Dependencies = split(dependencies) 446 | } 447 | 448 | tasks = append(tasks, task) 449 | 450 | root, err := lookupMetaTagBool(tGroup.Meta, TagRoot) 451 | if err != nil { 452 | return nil, fmt.Errorf("error parsing root tag: %v", err) 453 | } 454 | if root { 455 | rTasks = append(rTasks, *tGroup.Name) 456 | } 457 | 458 | // not sure if this should be here 459 | for _, t := range tGroup.Tasks { 460 | mem, err := lookupMetaTagInt(t.Meta, TagDynamicMemoryMB) 461 | if err != nil { 462 | return nil, fmt.Errorf("error parsing dynamic memory tag: %v", err) 463 | } 464 | if mem > 0 { 465 | t.Resources.MemoryMB = i2p(mem) 466 | log.Debugf("setting dynamic memory for task (%v) in task group (%v) to (%v)", t.Name, *tGroup.Name, mem) 467 | } 468 | } 469 | } 470 | 471 | for _, task := range tasks { 472 | tGroup := pc.Job.LookupTaskGroup(task.Name) 473 | if tGroup == nil { 474 | return nil, fmt.Errorf("task not found in job: %v", task.Name) 475 | } 476 | 477 | for _, nTask := range task.Next { 478 | if nTGroup := pc.Job.LookupTaskGroup(nTask); nTGroup == nil { 479 | return nil, fmt.Errorf("next task specified in task (%v) not found in job: %v", task.Name, nTask) 480 | } 481 | } 482 | 483 | for _, dTask := range task.Dependencies { 484 | if dTGroup := pc.Job.LookupTaskGroup(dTask); dTGroup == nil { 485 | return nil, fmt.Errorf("dependent task specified in task (%v) not found in job: %v", task.Name, dTask) 486 | } 487 | } 488 | 489 | if *tGroup.Count > 0 { 490 | return nil, fmt.Errorf("dag controlled task must have a zero count: %v", task.Name) 491 | } 492 | 493 | env := copyMapString(procTask.Env) 494 | 495 | dTask := nomad.NewTask("wait", "docker") 496 | 497 | dTask.Lifecycle = &nomad.TaskLifecycle{ 498 | Hook: nomad.TaskLifecycleHookPrestart, 499 | } 500 | 501 | dTaskCfg := copyMapInterface(procTask.Config) 502 | dTaskCfg["args"] = append([]string{"agent", "wait"}, task.Dependencies...) 503 | dTask.Config = dTaskCfg 504 | 505 | dTask.Env = env 506 | 507 | if len(task.Dependencies) > 0 { 508 | tGroup.AddTask(dTask) 509 | } 510 | 511 | nTask := nomad.NewTask("next", "docker") 512 | 513 | nTask.Lifecycle = &nomad.TaskLifecycle{ 514 | Hook: nomad.TaskLifecycleHookPoststop, 515 | } 516 | 517 | nArgs := append([]string{"agent", "next"}, task.Next...) 518 | 519 | if dynTasks := lookupMetaTagStr(tGroup.Meta, TagDynamicTasks); len(dynTasks) > 0 { 520 | nArgs = append([]string{"agent", "next", "--dynamic-tasks", dynTasks}, task.Next...) 521 | } 522 | 523 | nTaskCfg := copyMapInterface(procTask.Config) 524 | nTaskCfg["args"] = nArgs 525 | nTask.Config = nTaskCfg 526 | 527 | nTask.Env = env 528 | 529 | tGroup.AddTask(nTask) 530 | } 531 | 532 | return rTasks, nil 533 | } 534 | 535 | func (pc *PipelineController) Init() bool { 536 | envVarSlugs := generateEnvVarSlugs() 537 | 538 | for k, v := range envVarSlugs { 539 | pc.Job.SetMeta(k, v) 540 | } 541 | 542 | rTasks, err := pc.ProcessTaskGroups() 543 | if err != nil { 544 | log.Fatalf("error processing task groups: %v", err) 545 | } 546 | 547 | if len(rTasks) == 0 { 548 | log.Fatalf("couldn't find a root task group, need to set the root meta tag (%v)", TagRoot) 549 | } 550 | 551 | return pc.Next(rTasks, "") 552 | } 553 | 554 | func (pc *PipelineController) Wait(groups []string) { 555 | log.Infof("waiting for following groups: %v", groups) 556 | 557 | jAllocs, meta, err := pc.JobsAPI.Allocations(pc.JobID, true, &nomad.QueryOptions{}) 558 | if err != nil { 559 | log.Fatalf("error getting job allocations: %v", err) 560 | } 561 | 562 | if TgDone(jAllocs, groups, true) { 563 | log.Info("all dependent task groups finished successfully") 564 | return 565 | } 566 | 567 | allocStubStore := make(map[string]*nomad.AllocationListStub) 568 | 569 | // initialized alloc store with current state 570 | for _, alloc := range jAllocs { 571 | allocStubStore[alloc.ID] = alloc 572 | } 573 | 574 | eClient := pc.Nomad.EventStream() 575 | 576 | topics := map[nomad.Topic][]string{ 577 | nomad.TopicAllocation: {pc.JobID}, 578 | } 579 | 580 | idx := meta.LastIndex 581 | log.Debug("event start index: %v", idx) 582 | 583 | eCh := make(<-chan *nomad.Events, 10) 584 | sub := make(chan bool, 1) 585 | cancel := func() {} 586 | 587 | // initial subscription 588 | sub <- true 589 | 590 | eErrs := 0 591 | for { 592 | select { 593 | case <-sub: 594 | log.Debug("subscribing to event stream") 595 | ctx := context.Background() 596 | ctx, cancel = context.WithCancel(ctx) 597 | defer cancel() 598 | eCh, err = eClient.Stream(ctx, topics, idx, &nomad.QueryOptions{}) 599 | if err != nil { 600 | log.Fatalf("error subscribing to event stream: %v", err) 601 | } 602 | case es := <-eCh: 603 | if eErrs > 5 { 604 | log.Fatalf("too many errors in event stream") 605 | } 606 | if es.Err != nil && strings.Contains(es.Err.Error(), "invalid character 's' looking for beginning of value") { 607 | log.Warn("server disconnected, resubscribing") 608 | cancel() 609 | clearECh(eCh) 610 | sub <- true 611 | continue 612 | } 613 | if es.Err != nil { 614 | log.Error("error in event stream: %v", es.Err) 615 | eErrs++ 616 | continue 617 | } 618 | 619 | for _, e := range es.Events { 620 | log.Debugf("==> idx: %v, topic: %v, type: %v", e.Index, e.Topic, e.Type) 621 | 622 | idx = e.Index 623 | 624 | if e.Type != "AllocationUpdated" { 625 | continue 626 | } 627 | 628 | alloc, err := e.Allocation() 629 | if err != nil { 630 | log.Errorf("error getting allocation from event stream: %v", err) 631 | eErrs++ 632 | continue 633 | } 634 | if alloc == nil { 635 | log.Errorf("allocation in event stream shouldn't be nil") 636 | eErrs++ 637 | continue 638 | } 639 | 640 | log.Debugf(" |-> task group: %v, client status: %v", alloc.TaskGroup, alloc.ClientStatus) 641 | for t, ts := range alloc.TaskStates { 642 | log.Debugf(" |-> task: %v, state: %v, restarts: %v, failed: %v", t, ts.State, ts.Restarts, ts.Failed) 643 | } 644 | 645 | // workaround for alloc.Stub() to work 646 | alloc.Job = pc.Job 647 | 648 | allocStub := alloc.Stub() 649 | allocStubStore[alloc.ID] = allocStub 650 | 651 | log.Debugf("alloc store size %v", len(allocStubStore)) 652 | 653 | allocList := make([]*nomad.AllocationListStub, 0, len(allocStubStore)) 654 | for _, v := range allocStubStore { 655 | allocList = append(allocList, v) 656 | } 657 | 658 | if TgDone(allocList, groups, true) { 659 | log.Info("all dependent task groups finished successfully") 660 | return 661 | } 662 | } 663 | } 664 | } 665 | } 666 | 667 | func (pc *PipelineController) Next(groups []string, dynTasks string) bool { 668 | log.Infof("triggering the following groups: %v", groups) 669 | 670 | jAllocs, _, err := pc.JobsAPI.Allocations(pc.JobID, true, nil) 671 | if err != nil { 672 | log.Fatalf("error getting job allocations: %v", err) 673 | } 674 | 675 | cAlloc, _, err := pc.AllocsAPI.Info(pc.AllocID, nil) 676 | if err != nil { 677 | log.Fatalf("error getting current allocation: %v", err) 678 | } 679 | 680 | cGroup := pc.Job.LookupTaskGroup(pc.GroupName) 681 | if cGroup == nil { 682 | log.Fatalf("could not find current group (%v), this shouldn't happen!", pc.GroupName) 683 | } 684 | 685 | leader, err := lookupMetaTagBool(cGroup.Meta, TagLeader) 686 | if err != nil { 687 | log.Warnf("error parsing leader, default to false: %v", err) 688 | } 689 | if leader { 690 | for _, tg := range pc.Job.TaskGroups { 691 | tg.Count = i2p(0) 692 | } 693 | return true 694 | } 695 | 696 | cTasks := []string{} 697 | 698 | for _, t := range cGroup.Tasks { 699 | if t.Name != "init" && t.Name != "next" { 700 | cTasks = append(cTasks, t.Name) 701 | } 702 | } 703 | 704 | for _, t := range cTasks { 705 | if !successState(cAlloc.TaskStates[t]) { 706 | log.Warnf("task %v didn't run successfully, not triggering next group", t) 707 | return false 708 | } 709 | } 710 | 711 | if len(dynTasks) > 0 { 712 | glob := filepath.Join(os.Getenv("NOMAD_ALLOC_DIR"), dynTasks) 713 | tgsFiles, err := filepath.Glob(glob) 714 | if err != nil { 715 | log.Fatalf("error finding dynamic tasks files using provided glob (%v): %v", dynTasks, err) 716 | } 717 | 718 | log.Infof("found following dynamic tasks files: %v", tgsFiles) 719 | 720 | tgs := make(TaskGroups, 0) 721 | for _, tgsFile := range tgsFiles { 722 | tgsBytes, err := os.ReadFile(tgsFile) 723 | if err != nil { 724 | log.Fatalf("error reading dynamic tasks file at path (%v): %v", tgsFile, err) 725 | } 726 | 727 | var _tgs TaskGroups 728 | err = json.Unmarshal(tgsBytes, &_tgs) 729 | if err != nil { 730 | log.Fatalf("error parsing dynamic tasks json at path (%v): %v", tgsFile, err) 731 | } 732 | 733 | tgs = append(tgs, _tgs...) 734 | } 735 | 736 | log.Debugf("dynamic tasks to add: %v", tgs) 737 | 738 | for _, _tg := range tgs { 739 | tg := _tg 740 | 741 | tg.SetMeta(TagParentTask, pc.GroupName) 742 | pc.Job.AddTaskGroup(&tg) 743 | } 744 | 745 | filter := map[string]string{ 746 | TagParentTask: pc.GroupName, 747 | } 748 | 749 | rTasks, err := pc.ProcessTaskGroups(filter) 750 | if err != nil { 751 | log.Fatalf("error processing task groups: %v", err) 752 | } 753 | 754 | if len(rTasks) == 0 { 755 | log.Fatalf("no root task group found, atleast one task in dynamic tasks must have root meta tag (%v)", TagRoot) 756 | } 757 | 758 | groups = append(groups, rTasks...) 759 | } 760 | 761 | for _, group := range groups { 762 | tg := pc.Job.LookupTaskGroup(group) 763 | if tg == nil { 764 | log.Warnf("could not find next group %v", group) 765 | continue 766 | } 767 | if tgAllocated(jAllocs, []string{group}) && !TgDone(jAllocs, []string{group}, false) { 768 | log.Warnf("next group already has allocations, skipping trigger: %v", group) 769 | continue 770 | } 771 | 772 | tg.Count = i2p(1) 773 | 774 | count, err := lookupMetaTagInt(tg.Meta, TagCount) 775 | if err != nil { 776 | log.Warn("error parsing count tag, defaulting to 1: %v", err) 777 | count = 1 778 | } 779 | if count > 0 { 780 | tg.Count = i2p(count) 781 | } 782 | } 783 | 784 | if pc.TaskName == "init" || TgDone(jAllocs, []string{pc.GroupName}, true) { 785 | cGroup.Count = i2p(0) 786 | } 787 | 788 | return true 789 | } 790 | --------------------------------------------------------------------------------