├── .envrc ├── .gitignore ├── HCL2 ├── add_local_file │ ├── README.md │ ├── doc │ │ └── emoji-delimiters.png │ ├── input.file │ ├── raw_file_b64.nomad │ ├── raw_file_delims.nomad │ ├── raw_file_json.nomad │ └── use_file.nomad ├── always_change │ ├── README.md │ ├── before.nomad │ ├── uuid.nomad │ └── variable.nomad ├── dynamic │ ├── README.md │ └── example.nomad ├── object_to_template │ ├── README.md │ └── example.nomad └── variable_jobs │ ├── README.md │ ├── decode-external-file │ ├── README.MD │ ├── env.json │ ├── job1.nomad │ └── job2.nomad │ ├── env-vars │ ├── README.MD │ ├── env.vars │ ├── job1.nomad │ └── job2.nomad │ ├── job.nomad │ ├── job.vars │ └── multiple-var-files │ ├── README.MD │ ├── job1.nomad │ ├── job1.vars │ ├── job2.nomad │ ├── job2.vars │ ├── job3.nomad │ ├── job3.vars │ └── shared.vars ├── README.md ├── alloc_folder ├── mount_alloc.nomad └── sidecar.nomad ├── applications ├── artifactory_oss │ ├── README.md │ └── registry.nomad ├── cluster-broccoli │ └── example.nomad ├── docker_registry │ ├── README.md │ └── registry.nomad ├── docker_registry_v2 │ ├── README.md │ ├── htpasswd │ ├── make_password.sh │ └── registry.nomad ├── docker_registry_v3 │ ├── README.md │ ├── make_password.sh │ └── registry.nomad ├── mariadb │ └── mariadb.nomad ├── membrane-soa │ ├── README.md │ ├── soap-proxy-v1-linux.nomad │ ├── soap-proxy-v1-windows.nomad │ └── soap-proxy.nomad ├── minio │ ├── README.md │ ├── minio.nomad │ └── secure-variables │ │ ├── README.md │ │ ├── minio-data │ │ └── .gitkeep │ │ ├── minio.nomad │ │ ├── start.sh │ │ ├── stop.sh │ │ └── volume.hcl ├── postgres │ ├── README.md │ └── postgres.nomad ├── prometheus │ ├── README.md │ ├── fabio-service.nomad │ ├── grafana │ │ ├── README.md │ │ └── nomad_jobs.json │ ├── node-exporter.nomad │ └── prometheus.nomad ├── vms │ ├── freedos │ │ ├── .gitignore │ │ ├── README.md │ │ ├── freedos.img.tgz │ │ ├── freedos.img.tgz.SHASUM │ │ └── freedos.nomad │ └── tinycore │ │ ├── README.md │ │ ├── tc_ssh.nomad │ │ └── tinycore.qcow2.tgz └── wordpress │ ├── README.md │ ├── distributed │ ├── README.md │ ├── build-site.nomad │ ├── nginx.nomad │ ├── reset.sh │ ├── wordpress-db.nomad │ └── wordpress.nomad │ └── simple │ ├── README.md │ └── wordpress.nomad ├── artifact_sleepyecho ├── README.md ├── SleepyEcho.sh ├── artifact_sleepyecho.nomad └── vault_sleepyecho.nomad ├── batch ├── batch_gc │ └── example.nomad ├── dispatch │ ├── sleepy.nomad │ ├── sleepy1.nomad │ ├── sleepy10.nomad │ ├── sleepy2.nomad │ ├── sleepy3.nomad │ ├── sleepy4.nomad │ ├── sleepy5.nomad │ ├── sleepy6.nomad │ ├── sleepy7.nomad │ ├── sleepy8.nomad │ └── sleepy9.nomad ├── dont_restart_fail │ ├── README.md │ └── example.nomad ├── lost_batch │ ├── README.md │ ├── batch.nomad │ └── periodic.nomad ├── lots_of_batches │ ├── README.md │ └── payload.nomad.template ├── periodic │ ├── prohibit-overlap.nomad │ └── template.nomad └── spread_batch │ ├── example.nomad │ └── example2.nomad ├── batch_overload ├── example.nomad └── periodic.nomad ├── blocked_eval ├── README.md └── example.nomad ├── check.sh ├── cni ├── README.md ├── diy_brige │ ├── README.md │ ├── diybridge.conflist │ ├── example.nomad │ └── repro.nomad └── example.nomad ├── complex_meta ├── template_env.nomad └── template_meta.nomad ├── connect ├── consul.nomad ├── discuss │ ├── blocky.yaml │ └── job.nomad ├── dns-via-mesh │ ├── README.md │ ├── consul-dns.nomad │ ├── consul-dns2.nomad │ └── go-resolv-test │ │ ├── .gitignore │ │ ├── build.sh │ │ └── main.go ├── ingress_gateways │ └── ingress_gateway.nomad ├── native │ └── cn-demo.nomad ├── nginx_ingress │ ├── countdash.nomad │ └── ingress.nomad └── sidecar │ ├── countdash.nomad │ └── countdash2.nomad ├── consul-template ├── coordination │ ├── README.md │ └── sample.nomad ├── missing_vault_value │ └── sample.nomad └── my_first_kv │ ├── README.md │ └── example.nomad ├── consul ├── add_check │ ├── README.md │ ├── e1.nomad │ ├── e2.nomad │ ├── e3.nomad │ └── images │ │ ├── e2.png │ │ └── e3.png └── use_consul_for_kv_path │ ├── README.md │ └── template.nomad ├── countdash ├── connect │ └── countdash.nomad └── simple │ └── countdash.nomad ├── csi ├── aws │ ├── ebs │ │ ├── README.md │ │ ├── busybox.nomad │ │ ├── mysql-server.nomad │ │ ├── plugin-ebs-controller.nomad │ │ ├── plugin-ebs-nodes.nomad │ │ └── volume.hcl │ └── efs │ │ ├── README.md │ │ ├── busybox.nomad │ │ ├── node.nomad │ │ └── volume.hcl ├── gcp │ └── gce-pd │ │ ├── README.md │ │ ├── config.nomad │ │ ├── controller.nomad │ │ ├── cv-nomad.hcl │ │ ├── disk.hcl │ │ ├── job.nomad │ │ └── nodes.nomad ├── hetzner │ └── volume │ │ ├── README.md │ │ ├── config.nomad │ │ ├── job.nomad │ │ ├── node.nomad │ │ └── volume.hcl └── hostpath │ ├── block │ ├── README.md │ ├── csi-hostpath-driver.nomad │ ├── job.nomad │ └── test.sh │ ├── file │ ├── README.md │ ├── csi-hostpath-driver.nomad │ ├── job.nomad │ └── test.sh │ └── volume.hcl ├── deployments └── failing_deployment │ └── example.nomad ├── docker ├── auth_from_template │ ├── README.md │ └── auth.nomad ├── datadog │ ├── container_network.nomad │ ├── ex3.nomad │ ├── example2.nomad │ └── http_echo ├── docker+host_volume │ ├── README.md │ ├── task_deps.nomad │ └── unsafe.nomad ├── docker_dynamic_hostname │ ├── README.md │ ├── finished.nomad │ ├── res_file │ └── view.sh ├── docker_entrypoint │ ├── Dockerfile │ └── example.nomad ├── docker_image_not_found │ ├── README.md │ ├── reschedule.nomad │ └── restart.nomad ├── docker_interpolated_image_name │ ├── README.md │ ├── example.nomad │ └── hostname.nomad ├── docker_logging │ └── example.nomad ├── docker_mac_address │ └── example.nomad ├── docker_network │ ├── example1.nomad │ └── example2.nomad ├── docker_nfs │ ├── README.md │ └── example.nomad ├── docker_template │ └── example.nomad ├── docker_twice_in_alloc │ └── example.nomad ├── docker_windows_abs_mount │ ├── Dockerfile │ ├── README.md │ ├── SleepyEcho.ps1 │ └── repro.nomad ├── env_var_args │ ├── Dockerfile │ ├── README.md │ ├── cmd.sh │ ├── cmd_alt.sh │ ├── entrypoint.sh │ ├── start.nomad │ └── test.nomad ├── get_fact_from_consul │ ├── README.md │ ├── args.nomad │ └── image.nomad ├── host-volumes-and-users │ ├── README.md │ └── scratch.nomad ├── labels │ ├── README.md │ ├── heredoc.nomad │ ├── interpolation.nomad │ └── literal.nomad └── mount_alloc │ ├── README.md │ └── example.nomad ├── drain └── example.nomad ├── dummy └── example.nomad ├── echo_stack ├── README.md ├── fabio-system.nomad ├── login-service.nomad └── profile-service.nomad ├── env └── escaped_env_vars │ ├── Dockerfile │ ├── README.md │ ├── entrypoint.sh │ └── example.nomad ├── environment ├── README.md └── example.nomad ├── exec-zip ├── README.md ├── example.nomad └── folder.tgz ├── exec └── host-volumes-and-users │ ├── README.md │ └── scratch.nomad ├── fabio-ssl └── fabio-ssl.nomad ├── fabio ├── README.md ├── fabio-docker.nomad ├── fabio-service.nomad └── fabio-system.nomad ├── failing_jobs ├── README.md ├── failing_sidecar │ ├── README.md │ └── example.nomad └── impossible_constratint │ ├── README.md │ └── example.nomad ├── giant └── example.nomad ├── guide └── TUTORIAL_TEMPLATE.mdx ├── host_volume ├── README.md ├── mariadb │ └── mariadb.nomad ├── prometheus │ ├── README.md │ ├── grafana │ │ ├── README.md │ │ └── nomad_jobs.json │ └── prometheus.nomad └── read_only │ └── read_only.nomad ├── http_echo ├── arm-service.nomad ├── bar-service.nomad ├── car-service-broken-check.nomad ├── foo-service.deployment.nomad ├── foo-service.nomad ├── foo-test.nomad └── template │ ├── echo_template.nomad │ ├── ets.nomad │ ├── ets2.nomad │ └── ets3.nomad ├── httpd_site ├── README.md ├── httpd.nomad ├── make_site.sh ├── site-content.tgz └── site-content │ ├── about.html │ ├── css │ └── style.css │ └── index.html ├── ipv6 └── SimpleHTTPServer │ └── sample.nomad ├── java ├── JavaDriverTest │ ├── java-driver-test.nomad │ └── test2.nomad ├── README.md ├── SampleWebApp.war ├── apache_camel │ ├── camel-standalone-helloworld-1.0-SNAPSHOT.jar │ └── java_files.nomad └── jar-test │ ├── README.md │ ├── bin │ └── Count.class │ ├── jar-test.nomad │ ├── jar │ └── Count.jar │ └── src │ └── Count.java ├── job_examples ├── base-batch.nomad └── meta │ ├── README.md │ └── meta-batch.nomad ├── json-jobs ├── example.nomad └── job.json ├── load_balancers └── traefik │ ├── README.md │ ├── traefik.nomad │ ├── webapp.nomad │ └── webapp2.nomad ├── meta ├── README.md └── example.nomad ├── microservice └── example.nomad ├── minecraft ├── minecraft.nomad ├── minecraft_exec.nomad └── plugin.nomad ├── monitoring └── sensu │ ├── fabio-docker.nomad │ └── sensu.nomad ├── nginx-fabio-clone ├── README.md ├── bar-service.nomad ├── e.ct ├── e.out ├── example.nomad ├── foo-service.nomad ├── tj.ct └── tj.out ├── oom └── example.nomad ├── output.html ├── parameterized ├── README.md ├── docker_hello_world │ └── hello-world.nomad ├── template.nomad └── to_specific_client │ ├── example.nomad │ └── workaround │ ├── README.md │ ├── example.nomad │ ├── rolling_run.sh │ └── watch.py ├── ports ├── README.md └── example.nomad ├── preserve_state ├── bar-service.jsonjob ├── example.jsonjob ├── fabio.jsonjob ├── foo-service.jsonjob ├── hashi-ui.jsonjob ├── jam.sh ├── nomad_debug └── preserve.sh ├── qemu ├── README.md ├── hass │ └── hass.nomad ├── imagebuilder │ ├── Core-current.iso │ ├── Dockerfile │ ├── NOTES.md │ └── core-image.qcow2 ├── job.json ├── tc.qcow2 ├── tc_ssh.nomad ├── tc_ssh2.nomad ├── tc_ssh_arm.nomad └── tinycore.qcow2 ├── raw_exec ├── env.nomad ├── mkdir │ ├── README.md │ ├── mkdir-bash.nomad │ └── mkdir.nomad ├── ps.nomad ├── quoted_args │ ├── quoted_args.nomad │ └── quoted_args_2.nomad └── user │ └── example.nomad ├── reproductions └── cpu_rescheduling │ ├── README.md │ └── repro.nomad ├── reschedule └── ex.nomad ├── restart └── restart.nomad ├── rolling_upgrade ├── README.md ├── cv-new.nomad ├── cv.nomad ├── example-new.nomad └── example.nomad ├── sentinel ├── README.md ├── alwaysFalse.sentinel ├── example.nomad ├── exampleGroupMissingNodeClass.nomad ├── exampleGroupNodeClass.nomad ├── exampleJobNodeClass.nomad ├── exampleNoNodeClass.nomad ├── payload.json └── requireNodeClass.sentinel ├── server-variables ├── README.md ├── build-site.nomad ├── nginx.nomad ├── reset.sh ├── wordpress-db.nomad └── wordpress.nomad ├── sleepy ├── README.md ├── sleepy_bash │ └── sleepy.nomad └── sleepy_python │ ├── README.md │ ├── batch_sleepy_python.nomad │ └── sleepy_python.nomad ├── spread ├── example.nomad ├── scheduler.json └── scheduler_b.json ├── stress ├── README.md └── cpu_throttled_time │ ├── README.md │ └── stress.nomad ├── super_big ├── README.md ├── super_big.nomad └── super_big2.nomad ├── system_jobs ├── sleepy │ ├── README.md │ ├── sleepy_bash │ │ └── sleepy.nomad │ └── sleepy_python │ │ ├── README.md │ │ ├── batch_sleepy_python.nomad │ │ └── sleepy_python.nomad ├── system_deployment │ ├── deploy_jdk.nomad │ ├── fabio-system.nomad │ ├── fabio-system.nomad2 │ ├── foo-system.nomad │ └── foo-system.nomad2 └── system_filter │ ├── filtered.nomad │ └── host_vol.nomad ├── task_deps ├── consul-lock │ └── myapp.nomad ├── disk_check │ ├── README.md │ └── disk.nomad ├── init_artifact │ ├── README.md │ ├── batch-init-artifact.nomad │ └── service-init-artifact.nomad ├── interjob │ ├── README.md │ ├── myapp.nomad │ └── myservice.nomad ├── k8sdoc │ ├── README.md │ ├── init.nomad │ ├── k8sdoc1.nomad │ ├── myapp.nomad │ └── myservice.nomad └── sidecar │ └── example.nomad ├── template ├── batch │ ├── README.md │ ├── context.nomad │ ├── parameter.nomad │ ├── services.nomad │ └── template.nomad ├── from_consul │ ├── README.md │ ├── artifact.nomad │ ├── init.nomad │ └── issue.nomad ├── learning │ └── README.md ├── rerender │ └── example.nomad ├── secure_variables │ ├── README.md │ ├── example.nomad │ ├── img │ │ └── template.html.screenshot.png │ ├── interpolated_job │ │ ├── README.md │ │ ├── interpolated_job.hcl │ │ └── makeJobVars.sh │ ├── makeJobVars.sh │ ├── makeVars.sh │ ├── multiregion │ │ ├── start.sh │ │ ├── stop.sh │ │ ├── template.nomad │ │ ├── test.out │ │ └── test.tmpl │ ├── template copy.tmpl │ ├── template-playground.nomad │ ├── template.html │ ├── template.tmpl │ ├── variable_view.nomad │ └── write │ │ ├── t0.out │ │ ├── t0.tmpl │ │ ├── t1.out │ │ ├── t1.tmpl │ │ ├── t2.out │ │ └── t2.tmpl ├── services │ ├── README.md │ └── byTag.nomad ├── template-system │ ├── README.md │ ├── composed_keys.nomad │ ├── services-on-nomad-client.nomad │ └── template.nomad ├── template_handoff │ ├── README.md │ ├── handoff.nomad │ └── handoff_restart.nomad ├── template_into_docker │ └── example.nomad ├── template_playground │ ├── composed_keys.nomad │ ├── template-exec.nomad │ ├── template-hcl2.nomad │ └── template.nomad └── use_whitespace │ └── byTag.nomad ├── test.sh ├── vault ├── deleted_policy │ ├── README.md │ ├── break_it.sh │ ├── nomad-cluster-role.broken.json │ ├── nomad-cluster-role.json │ ├── nomad-server-policy.hcl │ ├── setup.sh │ ├── temp1.nomad │ └── workload.nomad ├── pki │ ├── README.md │ ├── sleepy_bash_pki.nomad │ └── test.nomad └── sleepy_vault_bash │ ├── sleepy_bash.nomad │ └── test.nomad ├── vault_reload_triggered_by_consul ├── README.md ├── SleepyEcho.sh └── sample.nomad ├── victoriametrics └── vm.nomad ├── win_rawexec_restart ├── SleepyEcho.ps1 └── artifact_sleepyecho.nomad └── windows_docker ├── docker-iis.nomad └── windows-test.nomad /.envrc: -------------------------------------------------------------------------------- 1 | echo "Processing .direnv..." 2 | function template { 3 | echo "Creating a skeleton tutorial in $1." 4 | mkdir -p $1 5 | cp $(pwd)/guide/TUTORIAL_TEMPLATE.mdx $1/README.md 6 | } 7 | echo "Done." 8 | 9 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | -------------------------------------------------------------------------------- /HCL2/add_local_file/doc/emoji-delimiters.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/angrycub/nomad_example_jobs/034cc3998d19e3ee2a573c0220e26e5dc9949299/HCL2/add_local_file/doc/emoji-delimiters.png -------------------------------------------------------------------------------- /HCL2/add_local_file/input.file: -------------------------------------------------------------------------------- 1 | This is the input file content 2 | 3 | Particularly evil stuff: 4 | 5 | Single quotes: 'hello' 6 | Double quotes: "howdy" 7 | Go-template: {{ "hello" }} 8 | Backticks: `this is a raw-string in go, but raw strings can't be in rawstrings` 9 | JSON: 10 | { 11 | "object": { 12 | "foo": true, 13 | "bar": 5, 14 | "baz": [1,2,3] 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /HCL2/add_local_file/raw_file_b64.nomad: -------------------------------------------------------------------------------- 1 | variable "input_file" { 2 | type = string 3 | description = "local path to the redis configuration to inject into the job." 4 | } 5 | 6 | job "raw_file_b64.nomad" { 7 | datacenters = ["dc1"] 8 | 9 | group "services" { 10 | task "alpine" { 11 | driver = "docker" 12 | 13 | template { 14 | destination = "local/file.out" 15 | } 16 | 17 | config { 18 | image = "alpine" 19 | command = "bash" 20 | args = [ 21 | "-c", 22 | "cat local/file.out; while true; do sleep 30; done", 23 | ] 24 | } 25 | 26 | template { 27 | destination = "local/file.out" 28 | data = "{{base64Decode \"${base64encode(file(var.input_file))}\"}}" 29 | } 30 | } 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /HCL2/add_local_file/raw_file_delims.nomad: -------------------------------------------------------------------------------- 1 | variable "input_file" { 2 | type = string 3 | description = "local path to the redis configuration to inject into the job." 4 | } 5 | 6 | job "raw_file_delims.nomad" { 7 | datacenters = ["dc1"] 8 | 9 | group "services" { 10 | task "alpine" { 11 | driver = "docker" 12 | 13 | config { 14 | image = "alpine" 15 | command = "sh" 16 | args = [ 17 | "-c", 18 | "cat local/file.out; while true; do sleep 30; done", 19 | ] 20 | } 21 | 22 | template { 23 | destination = "local/file.out" 24 | data = file(var.input_file) 25 | left_delimiter = "🚫" 26 | right_delimiter = "🚫" 27 | } 28 | } 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /HCL2/add_local_file/raw_file_json.nomad: -------------------------------------------------------------------------------- 1 | variable "input_file" { 2 | type = string 3 | description = "local path to the redis configuration to inject into the job." 4 | } 5 | 6 | job "raw_file_json.nomad" { 7 | datacenters = ["dc1"] 8 | 9 | group "services" { 10 | task "alpine" { 11 | driver = "docker" 12 | 13 | template { 14 | destination = "local/file.out" 15 | } 16 | 17 | config { 18 | image = "alpine" 19 | command = "bash" 20 | args = [ 21 | "-c", 22 | "cat local/file.out; while true; do sleep 30; done", 23 | ] 24 | } 25 | 26 | template { 27 | destination = "local/file.out" 28 | data = "{{jsonDecode \"${jsonencode(file(var.input_file))}\"}}" 29 | } 30 | } 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /HCL2/add_local_file/use_file.nomad: -------------------------------------------------------------------------------- 1 | variable "input_file" { 2 | type = string 3 | description = "local path to the redis configuration to inject into the job." 4 | } 5 | 6 | job "use_file.nomad" { 7 | datacenters = ["dc1"] 8 | 9 | group "services" { 10 | task "alpine" { 11 | driver = "docker" 12 | 13 | config { 14 | image = "alpine" 15 | command = "sh" 16 | args = [ 17 | "-c", 18 | "cat local/file.out; while true; do sleep 30; done", 19 | ] 20 | } 21 | 22 | template { 23 | destination = "local/file.out" 24 | data = file(var.input_file) 25 | } 26 | } 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /HCL2/always_change/before.nomad: -------------------------------------------------------------------------------- 1 | job "before.nomad" { 2 | datacenters = ["dc1"] 3 | type = "batch" 4 | 5 | group "before" { 6 | task "hello-world" { 7 | driver = "docker" 8 | 9 | config { 10 | image = "hello-world:latest" 11 | } 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /HCL2/always_change/uuid.nomad: -------------------------------------------------------------------------------- 1 | job "uuid.nomad" { 2 | datacenters = ["dc1"] 3 | type = "batch" 4 | 5 | meta { 6 | run_uuid = "${uuidv4()}" 7 | } 8 | 9 | group "uuid" { 10 | task "hello-world" { 11 | driver = "docker" 12 | 13 | config { 14 | image = "hello-world:latest" 15 | } 16 | } 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /HCL2/always_change/variable.nomad: -------------------------------------------------------------------------------- 1 | job "variable.nomad" { 2 | datacenters = ["dc1"] 3 | type = "batch" 4 | 5 | meta { 6 | run_index = "${floor(var.run_index)}" 7 | } 8 | 9 | group "variable" { 10 | task "hello-world" { 11 | driver = "docker" 12 | 13 | config { 14 | image = "hello-world:latest" 15 | } 16 | } 17 | } 18 | } 19 | 20 | variable "run_index" { 21 | type = number 22 | description = "An integer that, when changed from the current value causes the job to restart." 23 | validation { 24 | condition = var.run_index == floor(var.run_index) 25 | error_message = "The run_index must be an integer." 26 | } 27 | } -------------------------------------------------------------------------------- /HCL2/dynamic/README.md: -------------------------------------------------------------------------------- 1 | # HCL2 dynamic blocks 2 | 3 | This job specification leverages the `dynamic` HCL2 blocks and HCL2 variables to 4 | create a multi-task job specification. -------------------------------------------------------------------------------- /HCL2/object_to_template/README.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/angrycub/nomad_example_jobs/034cc3998d19e3ee2a573c0220e26e5dc9949299/HCL2/object_to_template/README.md -------------------------------------------------------------------------------- /HCL2/object_to_template/example.nomad: -------------------------------------------------------------------------------- 1 | variable "datacenters" { 2 | type = list(string) 3 | default = ["dc1"] 4 | } 5 | 6 | variable "ports" { 7 | type = list(object({ 8 | name = string 9 | internal = number 10 | external = number 11 | })) 12 | default = [ 13 | { 14 | name = "db" 15 | internal = 8300 16 | external = 8300 17 | }, 18 | { 19 | name = "db2" 20 | internal = 8301 21 | external = 8301 22 | } 23 | ] 24 | } 25 | 26 | job "example" { 27 | datacenters = var.datacenters 28 | type = "batch" 29 | 30 | group "group" { 31 | task "task" { 32 | driver = "exec" 33 | 34 | config { 35 | command = "bash" 36 | args = ["-c", "cat template.out"] 37 | } 38 | 39 | template { 40 | destination = "template.out" 41 | data = <{{.internal}}{{println}}{{end}} 44 | EOT 45 | } 46 | } 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /HCL2/variable_jobs/decode-external-file/README.MD: -------------------------------------------------------------------------------- 1 | # Decode the contents of an external file into a `local` variable 2 | 3 | The HCL2 `file` function when paired with the `jsondecode` or `yamldecode` function enables you to externalize shared configuration elements for Nomad jobs to a JSON or YAML file. 4 | 5 | This example contains two jobs that read the `env.json` file to and use values from it to configure the Nomad job during submission from the CLI. 6 | 7 | 8 | ## Run the examples 9 | 10 | ```bash 11 | nomad job run -var="config=env.json" job1.nomad 12 | ``` 13 | 14 | Nomad will start a Redis 3 container 15 | 16 | ```bash 17 | nomad job run -var="config=env.json" job2.nomad 18 | ``` 19 | 20 | Nomad will start a Redis 4 container 21 | 22 | ## Stop the examples 23 | 24 | ```bash 25 | nomad job stop job1 26 | nomad job stop job2 27 | ``` 28 | -------------------------------------------------------------------------------- /HCL2/variable_jobs/decode-external-file/env.json: -------------------------------------------------------------------------------- 1 | { 2 | "datacenters": [ 3 | "dc1" 4 | ], 5 | "docker_image_job1": "redis:3", 6 | "docker_image_job2": "redis:4" 7 | } 8 | -------------------------------------------------------------------------------- /HCL2/variable_jobs/decode-external-file/job1.nomad: -------------------------------------------------------------------------------- 1 | #---------------------------------------------------------------------------- 2 | # This value can be supplied as a flag to nomad job run. 3 | # `nomad job run -var config_file=«path to config» job1.nomad` 4 | # or as an environment variable 5 | # `export NOMAD_VAR_config_file=«path to config»` 6 | # `nomad job run job1.nomad` 7 | #---------------------------------------------------------------------------- 8 | variable "config_file" { 9 | type = string 10 | description = "Path to JSON formatted shared job configuration." 11 | } 12 | 13 | locals { 14 | config = jsondecode(file(var.config_file)) 15 | } 16 | 17 | job "job1" { 18 | datacenters = local.config.datacenters 19 | 20 | group "job1" { 21 | task "job1" { 22 | driver = "docker" 23 | 24 | config { 25 | image = local.config.docker_image_job1 26 | } 27 | } 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /HCL2/variable_jobs/decode-external-file/job2.nomad: -------------------------------------------------------------------------------- 1 | #---------------------------------------------------------------------------- 2 | # This value can be supplied as a flag to nomad job run. 3 | # `nomad job run -var config_file=«path to config» job2.nomad` 4 | # or as an environment variable 5 | # `export NOMAD_VAR_config_file=«path to config»` 6 | # `nomad job run job2.nomad` 7 | #---------------------------------------------------------------------------- 8 | variable "config_file" { 9 | type = string 10 | description = "Path to JSON formatted shared job configuration." 11 | } 12 | 13 | locals { 14 | config = jsondecode(file(var.config_file)) 15 | } 16 | 17 | job "job2" { 18 | datacenters = local.config.datacenters 19 | 20 | group "job2" { 21 | task "job2" { 22 | driver = "docker" 23 | 24 | config { 25 | image = local.config.docker_image_job2 26 | } 27 | } 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /HCL2/variable_jobs/env-vars/README.MD: -------------------------------------------------------------------------------- 1 | # Provide HCL2 variable values using environment variables 2 | 3 | This example contains two jobs that read HCL2 variable values from the 4 | environment and populates the Nomad job with them during submission from the 5 | CLI. This can be a very powerful feature when paired with [`direnv`], 6 | [`envconsul`], and other tools that can manipulate environment variables. 7 | 8 | ## Run the sample 9 | 10 | ### Read in the environment variables 11 | ```bash 12 | source ./env.vars 13 | ``` 14 | 15 | ```bash 16 | nomad job run job1.nomad 17 | ``` 18 | Nomad will start a Redis 3 container 19 | 20 | ```bash 21 | nomad job run job2.nomad 22 | ``` 23 | 24 | Nomad will start a Redis 4 container 25 | 26 | ## Stop the example 27 | 28 | ```bash 29 | nomad job stop job1 30 | nomad job stop job2 31 | unset NOMAD_VAR_datacenters \ 32 | NOMAD_VAR_docker_image_job1 \ 33 | NOMAD_VAR_docker_image_job2 34 | ``` 35 | 36 | [`envconsul`]: https://github.com/hashicorp/envconsul 37 | [`direnv`]: https://direnv.net/ 38 | -------------------------------------------------------------------------------- /HCL2/variable_jobs/env-vars/env.vars: -------------------------------------------------------------------------------- 1 | export NOMAD_VAR_datacenters='["dc1"]' 2 | export NOMAD_VAR_docker_image_job1="redis:3" 3 | export NOMAD_VAR_docker_image_job2="redis:4" 4 | -------------------------------------------------------------------------------- /HCL2/variable_jobs/env-vars/job1.nomad: -------------------------------------------------------------------------------- 1 | variable "datacenters" { 2 | type = list(string) 3 | description = "Path to JSON formatted shared job configuration." 4 | } 5 | 6 | variable "docker_image_job1" { 7 | type = string 8 | description = "Image for job1 to run" 9 | } 10 | 11 | job "job1" { 12 | datacenters = var.datacenters 13 | 14 | group "job1" { 15 | task "job1" { 16 | driver = "docker" 17 | 18 | config { 19 | image = var.docker_image_job1 20 | } 21 | } 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /HCL2/variable_jobs/env-vars/job2.nomad: -------------------------------------------------------------------------------- 1 | variable "datacenters" { 2 | type = list(string) 3 | description = "Path to JSON formatted shared job configuration." 4 | } 5 | 6 | variable "docker_image_job2" { 7 | type = string 8 | description = "Image for job2 to run" 9 | } 10 | 11 | job "job2" { 12 | datacenters = var.datacenters 13 | 14 | group "job2" { 15 | task "job2" { 16 | driver = "docker" 17 | 18 | config { 19 | image = var.docker_image_job2 20 | } 21 | } 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /HCL2/variable_jobs/job.nomad: -------------------------------------------------------------------------------- 1 | variable "datacenters" { 2 | type = list(string) 3 | description = "List of Nomad datacenters to run the job in. Defaults to `[\"dc1\"]`" 4 | default = ["dc1"] 5 | } 6 | 7 | variable "docker_image" { 8 | type = string 9 | description = "Docker image for the job to run" 10 | } 11 | 12 | variable "image_version" { 13 | type = string 14 | description = "Version of the docker image to run" 15 | } 16 | 17 | job "job1" { 18 | datacenters = var.datacenters 19 | 20 | group "job1" { 21 | task "job1" { 22 | driver = "docker" 23 | 24 | config { 25 | image = "${var.docker_image}:${var.image_version}" 26 | } 27 | } 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /HCL2/variable_jobs/job.vars: -------------------------------------------------------------------------------- 1 | image_version = "99" 2 | 3 | -------------------------------------------------------------------------------- /HCL2/variable_jobs/multiple-var-files/job1.nomad: -------------------------------------------------------------------------------- 1 | variable "datacenters" { 2 | type = list(string) 3 | description = "Path to JSON formatted shared job configuration." 4 | } 5 | 6 | variable "docker_image" { 7 | type = string 8 | description = "Shared docker image" 9 | } 10 | 11 | variable "image_version_job1" { 12 | type = string 13 | description = "Docker image version to run for job1" 14 | } 15 | 16 | job "job1" { 17 | datacenters = var.datacenters 18 | 19 | group "job1" { 20 | task "job1" { 21 | driver = "docker" 22 | 23 | config { 24 | image = "${var.docker_image}:${var.image_version_job1}" 25 | } 26 | } 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /HCL2/variable_jobs/multiple-var-files/job1.vars: -------------------------------------------------------------------------------- 1 | image_version_job1 = "3" 2 | -------------------------------------------------------------------------------- /HCL2/variable_jobs/multiple-var-files/job2.nomad: -------------------------------------------------------------------------------- 1 | variable "datacenters" { 2 | type = list(string) 3 | description = "Path to JSON formatted shared job configuration." 4 | } 5 | 6 | variable "docker_image" { 7 | type = string 8 | description = "Shared docker image" 9 | } 10 | 11 | variable "image_version_job2" { 12 | type = string 13 | description = "Docker image version to run for job2" 14 | } 15 | 16 | job "job2" { 17 | datacenters = var.datacenters 18 | 19 | group "job2" { 20 | task "job2" { 21 | driver = "docker" 22 | 23 | config { 24 | image = "${var.docker_image}:${var.image_version_job2}" 25 | } 26 | } 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /HCL2/variable_jobs/multiple-var-files/job2.vars: -------------------------------------------------------------------------------- 1 | image_version_job2 = "4" 2 | -------------------------------------------------------------------------------- /HCL2/variable_jobs/multiple-var-files/job3.nomad: -------------------------------------------------------------------------------- 1 | variable "datacenters" { 2 | type = list(string) 3 | description = "Path to JSON formatted shared job configuration." 4 | } 5 | 6 | variable "docker_image" { 7 | type = string 8 | description = "Shared docker image" 9 | } 10 | 11 | variable "image_version_job3" { 12 | type = string 13 | description = "Docker image version to run for job3" 14 | } 15 | 16 | job "job3" { 17 | datacenters = var.datacenters 18 | 19 | group "job3" { 20 | task "job3" { 21 | driver = "docker" 22 | 23 | config { 24 | image = "${var.docker_image}:${var.image_version_job3}" 25 | } 26 | } 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /HCL2/variable_jobs/multiple-var-files/job3.vars: -------------------------------------------------------------------------------- 1 | docker_image = "hello-world" 2 | image_version_job3 = "latest" 3 | -------------------------------------------------------------------------------- /HCL2/variable_jobs/multiple-var-files/shared.vars: -------------------------------------------------------------------------------- 1 | datacenters = [ "dc1" ] 2 | docker_image = "redis" 3 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Nomad Example Jobs 2 | 3 | This repository holds jobs and job skeletons that I have used to create 4 | reproducers or minimum viable cases. I use them when creating guides as 5 | simple workloads as well. 6 | 7 | Some specifically useful bits: 8 | 9 | - **csi** - Example jobs that use CSI to connect to external resources such as 10 | block devices. 11 | 12 | - **fabio** - Several different fabio configurations that can be used to spin up 13 | consul-aware load balancing in your Nomad cluster. 14 | 15 | - **sleepy** - Jobs that do a thing and then sleep (perhaps redoing the thing 16 | when they wake up). 17 | 18 | - **template_playground** - a batch job that can be used to practice iterative 19 | template development. 20 | -------------------------------------------------------------------------------- /alloc_folder/mount_alloc.nomad: -------------------------------------------------------------------------------- 1 | job "alloc_folder" { 2 | datacenters = ["dc1"] 3 | 4 | group "group" { 5 | task "docker" { 6 | driver = "docker" 7 | 8 | config { 9 | image = "busybox:latest" 10 | command = "sh" 11 | args = ["-c", "while true; do echo $(date) | tee -a /my_data/output.txt; sleep 2; done"] 12 | volumes = ["alloc/data:/my_data"] 13 | 14 | } 15 | 16 | resources { 17 | cpu = 100 18 | memory = 100 19 | } 20 | } 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /alloc_folder/sidecar.nomad: -------------------------------------------------------------------------------- 1 | job "alloc_folder" { 2 | datacenters = ["dc1"] 3 | 4 | group "group" { 5 | task "docker" { 6 | driver = "docker" 7 | 8 | config { 9 | image = "busybox:latest" 10 | command = "sh" 11 | args = ["-c", "while true; do echo $(date) | tee -a /alloc/output.txt; sleep 2; done"] 12 | } 13 | 14 | resources { 15 | cpu = 100 16 | memory = 100 17 | } 18 | } 19 | 20 | task "exec" { 21 | driver = "exec" 22 | 23 | config { 24 | command = "tail" 25 | args = ["-f", "/alloc/output.txt"] 26 | } 27 | 28 | resources { 29 | cpu = 100 30 | memory = 100 31 | } 32 | } 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /applications/artifactory_oss/registry.nomad: -------------------------------------------------------------------------------- 1 | job "registry" { 2 | datacenters = ["dc1"] 3 | priority = 80 4 | 5 | group "docker" { 6 | network { 7 | port "registry" { 8 | to = 5000 9 | static = 5000 10 | } 11 | } 12 | 13 | service { 14 | name = "registry" 15 | port = "registry" 16 | 17 | check { 18 | type = "tcp" 19 | port = "registry" 20 | interval = "10s" 21 | timeout = "2s" 22 | } 23 | } 24 | 25 | volume "artifactory-registry" { 26 | type = "host" 27 | source = "artifactory-registry" 28 | read_only = false 29 | } 30 | 31 | task "container" { 32 | driver = "docker" 33 | 34 | volume_mount { 35 | volume = "artifactory-registry" 36 | destination = "/var/lib/registry" 37 | } 38 | 39 | config { 40 | image = "docker.bintray.io/jfrog/artifactory-oss:latest" 41 | ports = ["registry"] 42 | } 43 | 44 | resources { 45 | cpu = 500 46 | memory = 256 47 | } 48 | } 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /applications/cluster-broccoli/example.nomad: -------------------------------------------------------------------------------- 1 | job "example" { 2 | datacenters = ["dc1"] 3 | 4 | group "cache" { 5 | network { 6 | port "db" { 7 | to = 6379 8 | } 9 | } 10 | 11 | task "redis" { 12 | driver = "docker" 13 | 14 | config { 15 | image = "redis:7" 16 | ports = ["db"] 17 | auth_soft_fail = true 18 | } 19 | 20 | resources { 21 | cpu = 500 22 | memory = 256 23 | } 24 | } 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /applications/docker_registry/registry.nomad: -------------------------------------------------------------------------------- 1 | job "registry" { 2 | datacenters = ["dc1"] 3 | priority = 80 4 | 5 | group "docker" { 6 | network { 7 | port "registry" { 8 | to = 5000 9 | static = 5000 10 | } 11 | } 12 | 13 | service { 14 | name = "registry" 15 | port = "registry" 16 | 17 | check { 18 | type = "tcp" 19 | port = "registry" 20 | interval = "10s" 21 | timeout = "2s" 22 | } 23 | } 24 | 25 | volume "docker-registry" { 26 | type = "host" 27 | source = "docker-registry" 28 | read_only = false 29 | } 30 | 31 | task "container" { 32 | driver = "docker" 33 | 34 | volume_mount { 35 | volume = "docker-registry" 36 | destination = "/var/lib/registry" 37 | } 38 | 39 | config { 40 | image = "registry" 41 | ports = ["registry"] 42 | } 43 | 44 | resources { 45 | cpu = 500 46 | memory = 256 47 | } 48 | } 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /applications/docker_registry_v2/htpasswd: -------------------------------------------------------------------------------- 1 | user:$2y$05$kyEyguS/Sisz7SMjqKQZ1eQDCM7pSFiItkL9yiVIDOVyQfj8XTCAS 2 | -------------------------------------------------------------------------------- /applications/docker_registry_v2/make_password.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | docker run --rm -it -v $(pwd):/out --entrypoint="htpasswd" xmartlabs/htpasswd -Bbc /out/$1 $2 $3 4 | -------------------------------------------------------------------------------- /applications/docker_registry_v3/make_password.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cmd="htpasswd -Bbn $1 $2" 4 | if ! [ -x "$(command -v htpasswd)" ]; then 5 | if ! [ -x "$(command -v docker)" ]; then 6 | echo 'Notice: this script requires htpasswd or docker.' >&2 7 | exit 1 8 | fi 9 | 10 | echo 'Notice: htpasswd is not installed. Using docker to run it.' >&2 11 | fetchedDocker=true 12 | cmd="docker run --rm -it -v $(pwd):/out --entrypoint="htpasswd" xmartlabs/htpasswd -Bbn $1 $2" 13 | fi 14 | 15 | user=$1 16 | password=$(eval $cmd | tr -d "\n"| tr ":" " " | awk '{print $2}') 17 | 18 | varPath="nomad/jobs/registry/docker/container" 19 | nomad var get $varPath | nomad var put - "$user"="$password" 20 | -------------------------------------------------------------------------------- /applications/minio/README.md: -------------------------------------------------------------------------------- 1 | # Minio S3-compatible Storage 2 | 3 | This job uses Nomad Host Volumes to provide an internal s3 compatible storage 4 | environment which can be used to host private artifacts for a Nomad clusters. 5 | 6 | ## Prerequisites 7 | 8 | - **Consul** - This job leverages Consul service registrations for locating the 9 | MinIO instance. 10 | 11 | ## Necessary configuration 12 | 13 | ### Create the host volume in the configuration 14 | 15 | Create a folder on one of your Nomad clients to host your registry files. This 16 | example uses `/opt/volumes/minio-data` 17 | 18 | ```shell-session 19 | $ mkdir -p /opt/volumes/minio-data 20 | ``` 21 | 22 | Add the host_volume information to the client stanza in the Nomad configuration. 23 | 24 | ```hcl 25 | client { 26 | # ... 27 | host_volume "minio-data" { 28 | path = "/opt/volumes/minio-data" 29 | read_only = false 30 | } 31 | } 32 | ``` 33 | 34 | Restart Nomad to read the new configuration. 35 | 36 | ```shell-session 37 | $ systemctl restart nomad 38 | ``` 39 | -------------------------------------------------------------------------------- /applications/minio/secure-variables/README.md: -------------------------------------------------------------------------------- 1 | # Minio S3-compatible Storage 2 | 3 | This job uses Nomad Host Volumes to provide an internal s3 compatible storage 4 | environment which can be used to host private artifacts for a Nomad clusters. 5 | 6 | ## Prerequisites 7 | 8 | - **Nomad 1.4** - This job leverages Nomad service registrations for locating the 9 | MinIO instance and used Nomad Variables. 10 | 11 | ## Necessary configuration 12 | 13 | ### Create the host volume in the configuration 14 | 15 | Create a folder on one of your Nomad clients to host your registry files. This 16 | example uses `/opt/volumes/minio-data` 17 | 18 | ```shell-session 19 | $ mkdir -p /opt/volumes/minio-data 20 | ``` 21 | 22 | Add the host_volume information to the client stanza in the Nomad configuration. 23 | 24 | ```hcl 25 | client { 26 | # ... 27 | host_volume "minio-data" { 28 | path = "/opt/volumes/minio-data" 29 | read_only = false 30 | } 31 | } 32 | ``` 33 | 34 | Restart Nomad to read the new configuration. 35 | 36 | ```shell-session 37 | $ systemctl restart nomad 38 | ``` 39 | -------------------------------------------------------------------------------- /applications/minio/secure-variables/minio-data/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/angrycub/nomad_example_jobs/034cc3998d19e3ee2a573c0220e26e5dc9949299/applications/minio/secure-variables/minio-data/.gitkeep -------------------------------------------------------------------------------- /applications/minio/secure-variables/start.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | mkdir -p minio-data 4 | sed "s|«/absolute/path/to»|$(pwd)|g" volume.hcl > .volume_patch.hcl 5 | nohup nomad agent -dev -config=.volume_patch.hcl -acl-enabled >nomad.log 2>&1 & 6 | 7 | echo -n $! > .nomad.pid 8 | echo "Nomad PID is $(cat .nomad.pid)" 9 | disown 10 | 11 | # wait for leadership 12 | sleep 3 13 | 14 | echo '{"BootstrapSecret": "2b778dd9-f5f1-6f29-b4b4-9a5fa948757a"}' | nomad operator api /v1/acl/bootstrap 15 | echo '' 16 | 17 | export NOMAD_TOKEN=2b778dd9-f5f1-6f29-b4b4-9a5fa948757a 18 | echo -n ${NOMAD_TOKEN} > .nomad.token 19 | 20 | 21 | nomad var put nomad/jobs/minio/storage/minio \ 22 | root_user="AKIAIOSFODNN7EXAMPLE" \ 23 | root_password="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" 24 | 25 | nomad job run -detach minio.nomad 26 | 27 | echo 'export NOMAD_TOKEN=2b778dd9-f5f1-6f29-b4b4-9a5fa948757a' 28 | -------------------------------------------------------------------------------- /applications/minio/secure-variables/stop.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | PID=$(cat .nomad.pid) 4 | echo "Stopping Nomad (pid: ${PID})" 5 | rm -rf .nomad.pid 6 | rm -rf .nomad.token 7 | rm -rf .volume_patch.hcl 8 | rm -rf nomad.log 9 | rm -rf minio_data 10 | echo "Done." -------------------------------------------------------------------------------- /applications/minio/secure-variables/volume.hcl: -------------------------------------------------------------------------------- 1 | # The host volume configuration for the minio task. The start.sh 2 | # script will make a derived copy of this file with the place- 3 | # holder--«/absolute/path/to»--replaced with the output of `pwd` 4 | 5 | client { 6 | host_volume "minio-data" { 7 | path = "«/absolute/path/to»/minio-data" 8 | read_only = false 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /applications/prometheus/README.md: -------------------------------------------------------------------------------- 1 | # Prometheus 2 | 3 | 4 | On the client, you will need a rule to allow the docker containers to talk to the local 5 | consul agents. 6 | 7 | ``` 8 | firewall-cmd --permanent --zone=public --add-rich-rule='rule family=ipv4 source address=172.17.0.0/16 accept' && firewall-cmd --reload 9 | ``` 10 | 11 | 12 | ## Connecting to the instances 13 | 14 | 15 | -------------------------------------------------------------------------------- /applications/prometheus/grafana/README.md: -------------------------------------------------------------------------------- 1 | Thanks to [Nextty](https://grafana.com/orgs/derekamz) for two great grafana dashboards to start with: 2 | 3 | * Nomad Jobs - https://grafana.com/dashboards/6281 4 | * Nomad Cluster - 5 | -------------------------------------------------------------------------------- /applications/vms/freedos/.gitignore: -------------------------------------------------------------------------------- 1 | *.img 2 | 3 | 4 | # Created by https://www.toptal.com/developers/gitignore/api/macos 5 | # Edit at https://www.toptal.com/developers/gitignore?templates=macos 6 | 7 | ### macOS ### 8 | # General 9 | .DS_Store 10 | .AppleDouble 11 | .LSOverride 12 | 13 | # Icon must end with two \r 14 | Icon 15 | 16 | 17 | # Thumbnails 18 | ._* 19 | 20 | # Files that might appear in the root of a volume 21 | .DocumentRevisions-V100 22 | .fseventsd 23 | .Spotlight-V100 24 | .TemporaryItems 25 | .Trashes 26 | .VolumeIcon.icns 27 | .com.apple.timemachine.donotpresent 28 | 29 | # Directories potentially created on remote AFP share 30 | .AppleDB 31 | .AppleDesktop 32 | Network Trash Folder 33 | Temporary Items 34 | .apdisk 35 | 36 | # End of https://www.toptal.com/developers/gitignore/api/macos 37 | -------------------------------------------------------------------------------- /applications/vms/freedos/README.md: -------------------------------------------------------------------------------- 1 | ## FreeDOS VM 2 | 3 | This job fetches a small remote VM image and starts it in your Nomad cluster. It 4 | also contains a task that starts a web-browser based VNC viewer. 5 | 6 | TODO: This job requires network namespaces for QEMU, which currently does not 7 | work in a released version of Nomad. 8 | -------------------------------------------------------------------------------- /applications/vms/freedos/freedos.img.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/angrycub/nomad_example_jobs/034cc3998d19e3ee2a573c0220e26e5dc9949299/applications/vms/freedos/freedos.img.tgz -------------------------------------------------------------------------------- /applications/vms/freedos/freedos.img.tgz.SHASUM: -------------------------------------------------------------------------------- 1 | 8d2817126bf46ba2b4fca0b0c49eed2cc208c6f6448651e82c6d973fcba36569 freedos.img.tgz 2 | -------------------------------------------------------------------------------- /applications/vms/tinycore/README.md: -------------------------------------------------------------------------------- 1 | # TinyCore QEMU example 2 | 3 | This sample will start a TinyCore Linux VM configured with the SSH daemon 4 | enabled. It performs port forwarding using the QEMU commands so that Nomad can 5 | dynamically assign a HTTP and SSH port for the VM. 6 | 7 | You will need to serve the `tinycore.qcow2` image someplace so that it can be 8 | retrieved using the artifact stanza. 9 | 10 | -------------------------------------------------------------------------------- /applications/vms/tinycore/tinycore.qcow2.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/angrycub/nomad_example_jobs/034cc3998d19e3ee2a573c0220e26e5dc9949299/applications/vms/tinycore/tinycore.qcow2.tgz -------------------------------------------------------------------------------- /applications/wordpress/distributed/reset.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/angrycub/nomad_example_jobs/034cc3998d19e3ee2a573c0220e26e5dc9949299/applications/wordpress/distributed/reset.sh -------------------------------------------------------------------------------- /artifact_sleepyecho/README.md: -------------------------------------------------------------------------------- 1 | ## artifact_sleepyecho 2 | 3 | Purpose: 4 | 5 | This sample was designed to pull a shell script from an AWS S3 bucket and 6 | run it locally. Some additional smarts were included in the shell script 7 | to enable it to simulate more conditions. 8 | 9 | The job as committed is somewhat uninteresting, but can be changed up to 10 | add Vault Support, Template Stanza testing, Consul KV output. This should 11 | be considered a building block to be used for more robust reproducers. 12 | 13 | -------------------------------------------------------------------------------- /artifact_sleepyecho/SleepyEcho.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | if [ -z "$1" ] 4 | then 5 | SLEEP_SECS="2" 6 | else 7 | SLEEP_SECS="$1" 8 | fi 9 | 10 | if [ -z "${EXTRAS}" ] 11 | then 12 | extras_part="" 13 | else 14 | extras_part="EXTRAS: [${EXTRAS}]" 15 | fi 16 | 17 | echo "$(date) -- Starting SleepyEcho. Sleep interval is ${SLEEP_SECS} sec. ${extras_part}" 18 | 19 | if [ ! -f "/alloc/data/time.txt" ] 20 | then 21 | echo "$(date) -- Writing date to /alloc/data/time.txt" 22 | echo -n "$(date)" > /alloc/data/time.txt 23 | else 24 | echo "$(date) -- Found time.txt file in /alloc/data -- $(cat /alloc/data/time.txt)" 25 | fi 26 | 27 | while true 28 | do 29 | echo "$(date) -- Alive... going back to sleep for ${SLEEP_SECS}. ${extras_part}" 30 | sleep ${SLEEP_SECS} 31 | done 32 | -------------------------------------------------------------------------------- /artifact_sleepyecho/artifact_sleepyecho.nomad: -------------------------------------------------------------------------------- 1 | job "repro" { 2 | datacenters = ["dc1"] 3 | type = "service" 4 | group "group" { 5 | count = 1 6 | 7 | # constraint { 8 | # attribute = "${attr.kernel.name}" 9 | # value = "darwin" 10 | # } 11 | 12 | task "echo-task" { 13 | driver = "exec" 14 | 15 | config { 16 | command = "local/bin/SleepyEcho.sh" 17 | args = ["2"] 18 | } 19 | 20 | artifact { 21 | source = "https://angrycub-hc.s3.amazonaws.com/public/SleepyEcho.sh" 22 | destination = "local/bin" 23 | } 24 | } 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /artifact_sleepyecho/vault_sleepyecho.nomad: -------------------------------------------------------------------------------- 1 | job "repro" { 2 | datacenters = ["dc1"] 3 | type = "service" 4 | group "group" { 5 | count = 1 6 | 7 | task "echo-task" { 8 | driver = "exec" 9 | env { 10 | EXTRAS = "${VAULT_TOKEN}" 11 | } 12 | config { 13 | command = "local/bin/SleepyEcho.sh" 14 | args = ["2"] 15 | } 16 | vault { 17 | policies = ["nomad-client"] 18 | change_mode = "signal" 19 | change_signal = "SIGUSR1" 20 | } 21 | artifact { 22 | source = "https://angrycub-hc.s3.amazonaws.com/public/SleepyEcho.sh" 23 | destination = "local/bin" 24 | } 25 | } 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /batch/batch_gc/example.nomad: -------------------------------------------------------------------------------- 1 | variable "body" { 2 | type = string 3 | default = "Template Rendered" 4 | } 5 | 6 | job "example" { 7 | datacenters = ["dc1"] 8 | type = "batch" 9 | 10 | group "group" { 11 | task "output" { 12 | driver = "docker" 13 | 14 | config { 15 | image = "busybox" 16 | auth_soft_fail = true 17 | command = "cat" 18 | args = ["/local/template.out"] 19 | } 20 | 21 | template { 22 | destination = "${NOMAD_TASK_DIR}/template.out" 23 | data = var.body 24 | } 25 | } 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /batch/dispatch/sleepy.nomad: -------------------------------------------------------------------------------- 1 | job sleepy { 2 | datacenters = ["dc1"] 3 | 4 | group "group" { 5 | task "sleepy.sh" { 6 | driver = "exec" 7 | 8 | config { 9 | command = "${NOMAD_TASK_DIR}/sleepy.sh" 10 | } 11 | 12 | template { 13 | destination = "local/sleepy.sh" 14 | data = < /tmp/payload.txt"] 22 | } 23 | } 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /batch/spread_batch/example2.nomad: -------------------------------------------------------------------------------- 1 | job "example" { 2 | datacenters = ["dc1"] 3 | type = "batch" 4 | 5 | meta { 6 | "version" = "2" 7 | } 8 | 9 | group "nodes" { 10 | count = 6 11 | 12 | constraint { 13 | distinct_hosts = true 14 | } 15 | 16 | task "payload" { 17 | driver = "exec" 18 | 19 | config { 20 | command = "/bin/bash" 21 | args = ["-c", "echo $VAULT_ADDR > test.txt"] 22 | } 23 | } 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /batch_overload/example.nomad: -------------------------------------------------------------------------------- 1 | job "example" { 2 | datacenters = ["dc1"] 3 | type = "batch" 4 | group "sleepers" { 5 | count = 2000 6 | task "wait" { 7 | driver = "raw_exec" 8 | config { 9 | command = "bash" 10 | args = [ 11 | "-c", 12 | "echo Starting; sleep=`shuf -i5-10 -n1`; echo Sleeping $sleep seconds.; sleep $sleep; echo Done; exit 0" 13 | ] 14 | } 15 | resources { 16 | # This will cause us to have to create blocking allocs. 17 | memory = 200 18 | } 19 | } 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /batch_overload/periodic.nomad: -------------------------------------------------------------------------------- 1 | job "example" { 2 | datacenters = ["dc1"] 3 | type = "batch" 4 | periodic { 5 | cron = "*/15 * * * * *" 6 | prohibit_overlap = true 7 | } 8 | group "sleepers" { 9 | count = 5 10 | task "wait" { 11 | driver = "raw_exec" 12 | config { 13 | command = "bash" 14 | args = [ 15 | "-c", 16 | "echo Starting; sleep=`shuf -i5-10 -n1`; echo Sleeping $sleep seconds.; sleep $sleep; echo Done; exit 0" 17 | ] 18 | } 19 | resources { 20 | # This will cause us to have to create blocking allocs. 21 | memory = 200 22 | } 23 | } 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /blocked_eval/README.md: -------------------------------------------------------------------------------- 1 | # Blocked jobs 2 | 3 | This job can be used to experiment with job behaviors when a job is waiting for 4 | a client that is able to serve the request. This is simulated using a constraint 5 | on a client metadata item. 6 | 7 | It will block until a client comes up with `meta.waituntil = "charlie"`. 8 | -------------------------------------------------------------------------------- /blocked_eval/example.nomad: -------------------------------------------------------------------------------- 1 | job "example" { 2 | datacenters = ["dc1"] 3 | 4 | constraint { 5 | attribute = "${meta.waituntil}" 6 | operator = "=" 7 | value = "charlie" 8 | } 9 | 10 | group "cache" { 11 | network { 12 | port "db" { 13 | to = 6379 14 | } 15 | } 16 | 17 | task "redis" { 18 | driver = "docker" 19 | 20 | config { 21 | image = "redis:7" 22 | ports = ["db"] 23 | auth_soft_fail = true 24 | } 25 | 26 | resources { 27 | cpu = 500 28 | memory = 256 29 | } 30 | } 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /cni/README.md: -------------------------------------------------------------------------------- 1 | # Nomad CNI examples 2 | 3 | This folder contains Nomad job specifications and configuration files that show 4 | how Nomad can use [Container Network Interface (CNI)](https://cni.dev) plugins 5 | and network configurations for running workloads. 6 | 7 | ## Examples 8 | 9 | - [`diy_bridge`](diy_bridge) - Create your own bridge network similar to the one Nomad makes 10 | for `network_mode = "bridge"` jobs. 11 | -------------------------------------------------------------------------------- /cni/diy_brige/README.md: -------------------------------------------------------------------------------- 1 | # DIY CNI bridge network 2 | 3 | ## About 4 | 5 | This example uses a CNI configuration based on Nomad's internal CNI template 6 | used to implement the `network_mode = "bridge"` behavior. 7 | 8 | ## Requirements 9 | 10 | This demonstration requires a Linux Nomad client. 11 | 12 | ## Running 13 | 14 | ### Validate CNI plugins are installed 15 | 16 | Generally you will install the CNI plugins as part of setting up a Nomad client, 17 | so this step may already be complete. However, for development clients that 18 | aren't using Nomad's `bridge` network mode, these might not have been installed. 19 | 20 | Nomad clients look for CNI plugins in the path given in the client's [`cni_path`], 21 | `/opt/cni/bin` by default. Check your client configuration to see if this value 22 | has been overridden. 23 | 24 | Check these folders for the CNI plugins. Verify that you have all the following binaries somewhere in the folders listed in your `cni_path`. 25 | 26 | - `bridge` 27 | - `firewall` 28 | - `host-local` 29 | - `loopback` 30 | -------------------------------------------------------------------------------- /cni/diy_brige/diybridge.conflist: -------------------------------------------------------------------------------- 1 | { 2 | "cniVersion": "0.4.0", 3 | "name": "diybridge", 4 | "plugins": [ 5 | { 6 | "type": "loopback" 7 | }, 8 | { 9 | "type": "bridge", 10 | "bridge": "diybridge", 11 | "ipMasq": true, 12 | "isGateway": true, 13 | "forceAddress": true, 14 | "hairpinMode": true, 15 | "ipam": { 16 | "type": "host-local", 17 | "ranges": [ 18 | [ 19 | { 20 | "subnet": "192.168.1.0/24" 21 | } 22 | ] 23 | ], 24 | "routes": [ 25 | { "dst": "0.0.0.0/0" } 26 | ] 27 | } 28 | }, 29 | { 30 | "type": "firewall", 31 | "backend": "iptables", 32 | "iptablesAdminChainName": "DIY-BRIDGE" 33 | }, 34 | { 35 | "type": "portmap", 36 | "capabilities": {"portMappings": true}, 37 | "snat": true 38 | } 39 | ] 40 | } 41 | -------------------------------------------------------------------------------- /cni/diy_brige/example.nomad: -------------------------------------------------------------------------------- 1 | variable "dcs" { 2 | description = "Datacenters to run job in." 3 | type = list(string) 4 | default = ["dc1"] 5 | } 6 | 7 | job "example" { 8 | datacenters = ["dc1"] 9 | 10 | group "test" { 11 | network { 12 | mode = "cni/diybridge" 13 | } 14 | 15 | task "alpine" { 16 | driver = "docker" 17 | 18 | config { 19 | image = "busybox:latest" 20 | command = "sleep" 21 | args = ["infinity"] 22 | } 23 | } 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /cni/diy_brige/repro.nomad: -------------------------------------------------------------------------------- 1 | variable "dcs" { 2 | type = list(string) 3 | default = ["dc1"] 4 | description = "Nomad datacenters in which to run the job." 5 | } 6 | 7 | job "example" { 8 | datacenters = ["dc1"] 9 | 10 | group "g1" { 11 | 12 | network { 13 | mode = "bridge" 14 | port "foo" { 15 | to = 1337 16 | } 17 | } 18 | 19 | task "nc-alpine" { 20 | driver = "docker" 21 | config { 22 | image = "alpine" 23 | args = ["nc", "-lk", "-p", "${NOMAD_PORT_foo}", "-e", "cat"] 24 | } 25 | 26 | resources { 27 | cpu = 100 28 | memory = 64 29 | } 30 | } 31 | } 32 | } 33 | 34 | -------------------------------------------------------------------------------- /cni/example.nomad: -------------------------------------------------------------------------------- 1 | job "example" { 2 | datacenters = ["dc1"] 3 | 4 | group "test" { 5 | network { 6 | mode = "cni/mynet3" 7 | } 8 | 9 | task "alpine" { 10 | driver = "docker" 11 | 12 | config { 13 | image = "alpine:latest" 14 | config { 15 | command = "sh" 16 | args = ["-c", "while true; do sleep 300; done "] 17 | } 18 | } 19 | } 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /complex_meta/template_env.nomad: -------------------------------------------------------------------------------- 1 | job "template" { 2 | datacenters = ["dc1"] 3 | type = "batch" 4 | 5 | group "group" { 6 | task "meta-output" { 7 | driver = "raw_exec" 8 | 9 | config { 10 | command = "bash" 11 | args=["-c", "echo $RULES | jq ."] 12 | } 13 | 14 | template { 15 | destination = "secrets/rules.env" 16 | env = true 17 | data = < volume.hcl 27 | 28 | nomad volume register volume.hcl 29 | 30 | echo "querying volume $UUID..." 31 | nomad volume status $UUID 32 | -------------------------------------------------------------------------------- /csi/hostpath/file/csi-hostpath-driver.nomad: -------------------------------------------------------------------------------- 1 | job "csi-hostpath-driver" { 2 | datacenters = ["dc1"] 3 | 4 | group "csi" { 5 | task "driver" { 6 | driver = "docker" 7 | 8 | config { 9 | image = "quay.io/k8scsi/hostpathplugin:v1.2.0" 10 | 11 | args = [ 12 | "--drivername=csi-hostpath", 13 | "--v=5", 14 | "--endpoint=unix://csi/csi.sock", 15 | "--nodeid=foo", 16 | ] 17 | 18 | // all known CSI plugins will require privileged=true 19 | // because they need add mountpoints. in the ACLs 20 | // design we may make csi_plugin implicitly add the 21 | // appropriate privileges. 22 | privileged = true 23 | } 24 | 25 | csi_plugin { 26 | id = "csi-hostpath" 27 | type = "monolith" 28 | mount_dir = "/csi" 29 | } 30 | } 31 | } 32 | } 33 | 34 | -------------------------------------------------------------------------------- /csi/hostpath/file/job.nomad: -------------------------------------------------------------------------------- 1 | job "alpine" { 2 | datacenters = ["dc1"] 3 | 4 | group "alloc" { 5 | restart { 6 | attempts = 10 7 | interval = "5m" 8 | delay = "25s" 9 | mode = "delay" 10 | } 11 | 12 | volume "jobVolume" { 13 | type = "csi" 14 | read_only = false 15 | source = "test-volume0" 16 | } 17 | 18 | task "docker" { 19 | driver = "docker" 20 | 21 | volume_mount { 22 | volume = "jobVolume" 23 | destination = "/srv" 24 | read_only = false 25 | } 26 | 27 | config { 28 | image = "alpine" 29 | command = "sh" 30 | args = ["-c","while true; do sleep 10; done"] 31 | } 32 | } 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /csi/hostpath/file/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # create the volume in the "external provider" 4 | 5 | PLUGIN_ID=$1 6 | VOLUME_NAME=$2 7 | 8 | # non-dev mode 9 | # CSI_ENDPOINT="/var/nomad/client/csi/monolith/$PLUGIN_ID/csi.sock" 10 | 11 | # dev mode path is going to be in a tempdir 12 | PLUGIN_DOCKER_ID=$(docker ps | grep hostpath | awk -F' +' '{print $1}') 13 | CSI_ENDPOINT=$(docker inspect $PLUGIN_DOCKER_ID | jq -r '.[0].Mounts[] | select(.Destination == "/csi") | .Source')/csi.sock 14 | 15 | echo "creating volume..." 16 | UUID=$(sudo csc --endpoint $CSI_ENDPOINT controller create-volume $VOLUME_NAME --cap 1,2,ext4 | grep -o '".*"' | tr -d '"') 17 | 18 | echo "registering volume $UUID..." 19 | 20 | echo $(printf 'id = "%s" 21 | name = "%s" 22 | type = "csi" 23 | external_id = "%s" 24 | plugin_id = "%s" 25 | access_mode = "single-node-writer" 26 | attachment_mode = "file-system"' $VOLUME_NAME $VOLUME_NAME $UUID $PLUGIN_ID) > volume.hcl 27 | 28 | nomad volume register volume.hcl 29 | 30 | echo "querying volume $UUID..." 31 | nomad volume status $UUID 32 | -------------------------------------------------------------------------------- /deployments/failing_deployment/example.nomad: -------------------------------------------------------------------------------- 1 | job "example" { 2 | datacenters = ["dc1"] 3 | 4 | group "cache" { 5 | network { 6 | port "db" { 7 | to = 6379 8 | } 9 | } 10 | 11 | service { 12 | name = "redis-cache" 13 | tags = ["global", "cache"] 14 | port = "db" 15 | check { 16 | name = "alive" 17 | type = "tcp" 18 | interval = "10s" 19 | timeout = "2s" 20 | } 21 | } 22 | 23 | task "redis" { 24 | driver = "docker" 25 | 26 | config { 27 | image = "redis:7" 28 | ports = ["db"] 29 | } 30 | 31 | resources { 32 | cpu = 500 33 | memory = 256 34 | } 35 | } 36 | } 37 | } -------------------------------------------------------------------------------- /docker/auth_from_template/auth.nomad: -------------------------------------------------------------------------------- 1 | job "auth" { 2 | 3 | type = "service" 4 | datacenters = ["dc1"] 5 | 6 | group "docker" { 7 | 8 | task "redis" { 9 | driver = "docker" 10 | 11 | template { 12 | destination = "secrets/secret.env" 13 | env = true 14 | change_mode = "noop" 15 | data = < "$res_file" 10 | 11 | for ALLOC_INFO in $(getJobAllocIds example) 12 | do 13 | NODENAME=${ALLOC_INFO##*|} 14 | ALLOC_ID=${ALLOC_INFO%%|*} 15 | DOCKERNAME=$(nomad alloc exec ${ALLOC_ID} cat /etc/hostname) 16 | printf "%s\t%s\t%s\n" $ALLOC_ID $NODENAME $DOCKERNAME >> "$res_file" 17 | done 18 | 19 | column -t -s"$(printf "\t")" $res_file 20 | rm -rf "$res_file" 21 | -------------------------------------------------------------------------------- /docker/docker_entrypoint/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine 2 | ENTRYPOINT ["ping"] 3 | CMD ["www.google.com"] 4 | 5 | -------------------------------------------------------------------------------- /docker/docker_image_not_found/README.md: -------------------------------------------------------------------------------- 1 | # Docker Image Not Found 2 | 3 | This folder containse examples that demonstrate what happens when a requested Docker image can not be found. 4 | 5 | * **restart.nomad** - contains a restart stanza that will cause this to restart infinitely on the same client 6 | * **reschedule.nomad** - will utilize the defaults and reschedule onto other nodes in nomad 0.8+ 7 | 8 | -------------------------------------------------------------------------------- /docker/docker_image_not_found/reschedule.nomad: -------------------------------------------------------------------------------- 1 | job "example" { 2 | datacenters = ["dc1"] 3 | group "group" { 4 | task "broken" { 5 | driver = "docker" 6 | config { 7 | image = "this_is_not_an_image:latest" 8 | } 9 | } 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /docker/docker_image_not_found/restart.nomad: -------------------------------------------------------------------------------- 1 | job "restart" { 2 | datacenters = ["dc1"] 3 | meta { 4 | "serial_num" = "2" 5 | } 6 | group "group" { 7 | restart { 8 | attempts = 2 9 | delay = "30s" 10 | interval = "1m" 11 | mode = "delay" 12 | } 13 | task "broken" { 14 | driver = "docker" 15 | config { 16 | image = "this_is_not_an_image:latest" 17 | } 18 | } 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /docker/docker_interpolated_image_name/README.md: -------------------------------------------------------------------------------- 1 | # Using interpolated Docker image versions 2 | 3 | Prerequisites: 4 | 5 | - Nomad 6 | - Docker 7 | - Consul 8 | 9 | Rough Notes: 10 | 11 | - The docker image path is interpolated 12 | - The Nomad `template` block can be used to create environment variables and has access to Consul values 13 | - You can use the `keyOrDefault` template function to fetch a value from Consul KV 14 | - You can set and update the value using the `consul kv put` command. 15 | - Depending on template `change_mode`, this might restart the job. 16 | - Image caching is at play, so immutable tags help this scenario 17 | 18 | ```shell-session 19 | consul kv put service/redis/version 3.2 20 | ``` 21 | -------------------------------------------------------------------------------- /docker/docker_interpolated_image_name/example.nomad: -------------------------------------------------------------------------------- 1 | job "example" { 2 | datacenters = ["dc1"] 3 | 4 | group "cache" { 5 | network { 6 | port "db" { 7 | to = 6379 8 | } 9 | } 10 | 11 | service { 12 | tags = ["redis", "cache"] 13 | port = "db" 14 | 15 | check { 16 | name = "alive" 17 | type = "tcp" 18 | interval = "10s" 19 | timeout = "2s" 20 | } 21 | } 22 | task "redis" { 23 | template { 24 | data = <

Welcome to the Bar Service.


You are on ${NOMAD_IP_http}.", 30 | ] 31 | } 32 | 33 | artifact { 34 | source = "https://github.com/hashicorp/http-echo/releases/download/v0.2.3/http-echo_0.2.3_linux_amd64.tar.gz" 35 | 36 | options { 37 | checksum = "sha256:e30b29b72ad5ec1f6dfc8dee0c2fcd162f47127f2251b99e47b9ae8af1d7b917" 38 | } 39 | } 40 | } 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /httpd_site/README.md: -------------------------------------------------------------------------------- 1 | # httpd site 2 | 3 | This job will download a website tarball into the allocation and spin up 4 | the Apache webserver docker image (2.4-alpine) and mount this container 5 | into place. 6 | 7 | -------------------------------------------------------------------------------- /httpd_site/httpd.nomad: -------------------------------------------------------------------------------- 1 | job "httpd_site" { 2 | datacenters = ["dc1"] 3 | type = "service" 4 | update { 5 | stagger = "5s" 6 | max_parallel = 1 7 | } 8 | group "httpd" { 9 | count = 1 10 | network { 11 | port "http" { 12 | to = 80 13 | } 14 | } 15 | 16 | task "httpd-docker" { 17 | artifact { 18 | source = "https://raw.githubusercontent.com/angrycub/nomad_example_jobs/master/httpd_site/site-content.tgz" 19 | destination = "tarball" 20 | } 21 | driver = "docker" 22 | config { 23 | image = "httpd:2.4-alpine" 24 | volumes = [ 25 | "tarball:/usr/local/apache2/htdocs" 26 | ] 27 | ports = ["http"] 28 | } 29 | resources { 30 | cpu = 200 31 | memory = 32 32 | } 33 | } 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /httpd_site/make_site.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | echo "📦 Creating site tarball..." 4 | cd site-content && 5 | tar -zcvf ../site-content.tgz * && 6 | cd .. 7 | -------------------------------------------------------------------------------- /httpd_site/site-content.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/angrycub/nomad_example_jobs/034cc3998d19e3ee2a573c0220e26e5dc9949299/httpd_site/site-content.tgz -------------------------------------------------------------------------------- /httpd_site/site-content/about.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | About the job 6 | 7 | 8 |

About the job

9 |

This repository that contains this job can be found on GitHub at 10 | angrycub/nomad_example_jobs/httpd_site. The specific site code is in the site-content folder. 11 |

12 |

13 | Return to Home. 14 |

15 | 16 | -------------------------------------------------------------------------------- /httpd_site/site-content/css/style.css: -------------------------------------------------------------------------------- 1 | body { 2 | font-family: "Helvetica Neue","Helvetica","Arial", sans-serif; 3 | } 4 | h1 { 5 | color: white; 6 | text-shadow: 1px 1px 2px black, 0 0 25px blue, 0 0 5px darkblue; 7 | width: auto; 8 | border-bottom: 1px solid #333; 9 | } 10 | code { 11 | background: #EEE; 12 | border: 1px solid #CCC; 13 | border-radius: 5px; 14 | padding: 3px; 15 | } -------------------------------------------------------------------------------- /httpd_site/site-content/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Welcome to the site 6 | 7 | 8 |

Howdy!

9 |

This is an example site to demonstrate fetching a resource as a tarball into 10 | a Nomad job and mounting it to a Docker Container.

11 |

There's an About page too, for fun. 12 | 13 | -------------------------------------------------------------------------------- /ipv6/SimpleHTTPServer/sample.nomad: -------------------------------------------------------------------------------- 1 | # This job will create a SimpleHTTPServer that is IPV6 enabled. This will allow 2 | # a user to browse around in an alloc dir. Not spectacularly useful, but is a 3 | # reasonable facsimile of a real workload. 4 | job http6 { 5 | datacenters = ["dc1"] 6 | group "group" { 7 | count = 1 8 | 9 | task "server" { 10 | template { 11 | data = < Monitoring evaluation "b2d818af" 9 | Evaluation triggered by job "jar-test.nomad" 10 | ==> Monitoring evaluation "b2d818af" 11 | Evaluation within deployment: "a2ba8e63" 12 | Allocation "6027314e" created: node "14ab9290", group "java" 13 | Evaluation status changed: "pending" -> "complete" 14 | ==> Evaluation "b2d818af" finished with status "complete" 15 | ``` 16 | 17 | ```shell-session 18 | $ nomad alloc logs 6027314e 19 | Counted 1515 chars. 20 | ``` 21 | 22 | ## Building the source 23 | 24 | ```shell-session 25 | $ javac --source=7 --target=7 -d bin src/Count.java 26 | $ jar cf jar/Count.jar -C bin . 27 | ``` 28 | 29 | Upload the jarfile where you like and update the source in the artifact stanza 30 | -------------------------------------------------------------------------------- /java/jar-test/bin/Count.class: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/angrycub/nomad_example_jobs/034cc3998d19e3ee2a573c0220e26e5dc9949299/java/jar-test/bin/Count.class -------------------------------------------------------------------------------- /java/jar-test/jar/Count.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/angrycub/nomad_example_jobs/034cc3998d19e3ee2a573c0220e26e5dc9949299/java/jar-test/jar/Count.jar -------------------------------------------------------------------------------- /job_examples/base-batch.nomad: -------------------------------------------------------------------------------- 1 | job "example" { 2 | datacenters = ["dc1"] 3 | // because the sample payload terminates, running it as a 4 | // `batch` job allows for that without having to sleep loop 5 | type = "batch" 6 | group "group" { 7 | task "task" { 8 | driver = "exec" 9 | config { 10 | command = "env" 11 | } 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /job_examples/meta/README.md: -------------------------------------------------------------------------------- 1 | ## The `meta` Stanza 2 | 3 | The meta stanza can be used to provide unstructured key-value data to a Nomad job as an automatically-exported environment variable. This variable can be used as provided or can be used for more complex expressions via the Nomad `template` stanza 4 | 5 | Documentation for the meta stanza can be found [here](https://www.nomadproject.io/docs/job-specification/meta) in the official Nomad documentation. 6 | -------------------------------------------------------------------------------- /job_examples/meta/meta-batch.nomad: -------------------------------------------------------------------------------- 1 | job "example" { 2 | datacenters = ["dc1"] 3 | // because the sample payload terminates, running it as a 4 | // `batch` job allows for that without having to sleep loop 5 | type = "batch" 6 | 7 | meta { 8 | "meta_key_1" = "meta_value_1" 9 | } 10 | 11 | group "group" { 12 | task "task" { 13 | driver = "exec" 14 | config { 15 | command = "env" 16 | } 17 | } 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /json-jobs/example.nomad: -------------------------------------------------------------------------------- 1 | job "example" { 2 | datacenters = ["dc1"] 3 | 4 | group "cache" { 5 | network { 6 | port "db" { 7 | to = 6379 8 | } 9 | } 10 | 11 | task "redis" { 12 | driver = "docker" 13 | 14 | config { 15 | image = "redis:7" 16 | 17 | ports = ["db"] 18 | } 19 | 20 | resources { 21 | cpu = 500 22 | memory = 256 23 | } 24 | } 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /load_balancers/traefik/README.md: -------------------------------------------------------------------------------- 1 | ## Load Balancing with Traefik 2 | 3 | This material is from the HashiCorp [Learn tutorial][] 4 | 5 | 6 | [learn tutorial]: https://learn.hashicorp.com/nomad/load-balancing/traefik 7 | -------------------------------------------------------------------------------- /load_balancers/traefik/webapp.nomad: -------------------------------------------------------------------------------- 1 | job "demo-webapp" { 2 | datacenters = ["dc1"] 3 | 4 | group "demo" { 5 | count = 3 6 | 7 | network { 8 | port "http" {} 9 | } 10 | 11 | service { 12 | name = "demo-webapp" 13 | port = "http" 14 | tags = [ 15 | "charlie.enable=true", 16 | "charlie.http.routers.http.rule=Path(`/myapp`)", 17 | ] 18 | 19 | check { 20 | type = "http" 21 | path = "/" 22 | interval = "2s" 23 | timeout = "2s" 24 | } 25 | } 26 | 27 | task "server" { 28 | driver = "docker" 29 | 30 | config { 31 | image = "hashicorp/demo-webapp-lb-guide" 32 | } 33 | 34 | env { 35 | PORT = "${NOMAD_PORT_http}" 36 | NODE_IP = "${NOMAD_IP_http}" 37 | } 38 | } 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /load_balancers/traefik/webapp2.nomad: -------------------------------------------------------------------------------- 1 | job "demo-webapp" { 2 | datacenters = ["dc1"] 3 | 4 | group "demo" { 5 | count = 3 6 | 7 | task "server" { 8 | env { 9 | PORT = "${NOMAD_PORT_http}" 10 | NODE_IP = "${NOMAD_IP_http}" 11 | } 12 | 13 | driver = "docker" 14 | 15 | config { 16 | image = "hashicorp/demo-webapp-lb-guide" 17 | } 18 | 19 | resources { 20 | network { 21 | mbits = 10 22 | port "http"{} 23 | } 24 | } 25 | 26 | service { 27 | name = "demo-webapp" 28 | port = "http" 29 | 30 | tags = [ 31 | "traefik.enable=true", 32 | "traefik.http.routers.http.rule=Path(`/myapp`)", 33 | ] 34 | 35 | check { 36 | type = "http" 37 | path = "/" 38 | interval = "2s" 39 | timeout = "2s" 40 | } 41 | } 42 | } 43 | } 44 | } 45 | 46 | -------------------------------------------------------------------------------- /meta/README.md: -------------------------------------------------------------------------------- 1 | ## Meta Interpolation 2 | 3 | This example attempts to perform interpolation in the meta stanza which as of Nomad 0.10 does not work. 4 | 5 | You can run this example in your cluster and then run: 6 | 7 | ```console 8 | $ nomad alloc exec «allocation id» /bin/sh -c env 9 | ``` 10 | 11 | The goal is to have the two variables to contain the same data, however: 12 | 13 | ```text 14 | ENV_TEST_INTERPOLATION=dc1-meta-stanza-test-job 15 | TEST_INTERPOLATION={{ env NOMAD_DC }}-{{ env NOMAD_JOB_NAME }} 16 | ``` 17 | 18 | -------------------------------------------------------------------------------- /meta/example.nomad: -------------------------------------------------------------------------------- 1 | job "meta-stanza-test-job" { 2 | datacenters = ["dc1"] 3 | 4 | meta { 5 | TEST_NUMBER = 1 6 | TEST_STRING = "string" 7 | # Interpolation here fails for both node attributes (e.g. ${node.datacenter}) and runtime environment variables 8 | TEST_INTERPOLATION = "{{ env NOMAD_DC }}-{{ env NOMAD_JOB_NAME }}" 9 | } 10 | 11 | group "meta-stanza-test-group" { 12 | network { 13 | port "http" { 14 | to = 5000 15 | } 16 | } 17 | 18 | task "meta-stanza-test-task" { 19 | driver = "docker" 20 | 21 | config { 22 | image = "registry:latest" 23 | ports = ["http"] 24 | } 25 | 26 | env { 27 | TEST_NUMBER = "${NOMAD_META_TEST_NUMBER}" 28 | TEST_STRING = "${NOMAD_META_TEST_STRING}" 29 | TEST_INTERPOLATION = "${NOMAD_META_TEST_INTERPOLATION}" 30 | ENV_TEST_INTERPOLATION = "${NOMAD_DC}-${NOMAD_JOB_NAME}" 31 | } 32 | } 33 | } 34 | } -------------------------------------------------------------------------------- /minecraft/plugin.nomad: -------------------------------------------------------------------------------- 1 | job "csi-plugin" { 2 | datacenters = ["dc1"] 3 | 4 | group "csi" { 5 | task "plugin" { 6 | driver = "docker" 7 | 8 | config { 9 | image = "quay.io/k8scsi/hostpathplugin:v1.2.0" 10 | privileged = true 11 | args = [ 12 | "--drivername=csi-hostpath", 13 | "--v=5", 14 | "--endpoint=unix://csi/csi.sock", 15 | "--nodeid=foo", 16 | ] 17 | } 18 | 19 | csi_plugin { 20 | id = "hostpath-plugin0" 21 | type = "monolith" 22 | mount_dir = "/csi" 23 | } 24 | } 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /monitoring/sensu/fabio-docker.nomad: -------------------------------------------------------------------------------- 1 | job "fabio" { 2 | datacenters = ["dc1"] 3 | type = "system" 4 | 5 | update { 6 | stagger = "5s" 7 | max_parallel = 1 8 | } 9 | 10 | group "fabio" { 11 | network { 12 | port "proxy" { 13 | static = 9999 14 | to = 9999 15 | } 16 | 17 | port "ui" { 18 | static = 9998 19 | to = 9998 20 | } 21 | } 22 | 23 | task "fabio-docker" { 24 | driver = "docker" 25 | 26 | config { 27 | image = "fabiolb/fabio:latest" 28 | network_mode = "host" 29 | ports = ["proxy","ui"] 30 | } 31 | 32 | env { 33 | # FABIO_registry_consul_addr="${attr.unique.network.ip-address}:8500" 34 | } 35 | 36 | resources { 37 | cpu = 200 38 | memory = 32 39 | } 40 | } 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /nginx-fabio-clone/README.md: -------------------------------------------------------------------------------- 1 | # Creating a nginx configuration from fabio-style tagging 2 | 3 | ### Files 4 | * foo-service.nomad - the foo job. Exercises the path stripping 5 | * bar-service.nomad - the bar job 6 | * tj.out = john's template rendered works, but fugly 7 | * tj.ct = john's template 8 | * e.ct example consul template trying to be fancy af 9 | * e.out rendered template 10 | 11 | ### Render template 12 | 13 | ``` 14 | consul-template --template="e.ct:e.out" --once 15 | ``` 16 | -------------------------------------------------------------------------------- /nginx-fabio-clone/tj.ct: -------------------------------------------------------------------------------- 1 | {{range services}} {{$name := .Name}} {{$service := service .Name}}{{if ne $name "nginx-wdg-lb-aus"}}{{if ne $name "nginx-wdg-lb"}} 2 | upstream {{$name}} { 3 | {{range $service}} server {{.Address}}:{{.Port}} max_fails=3 fail_timeout=60 weight=1; 4 | {{end}}} {{end}}{{end}}{{end}} 5 | 6 | server { 7 | listen 80; 8 | 9 | location / { 10 | root /usr/share/nginx/html/; 11 | index index.html; 12 | } 13 | 14 | location /status { 15 | stub_status; 16 | } 17 | 18 | {{range $services := services}} {{$name := .Name}}{{range $s_index, $service := service $name}}{{if eq $s_index 0}}{{range $tags := .Tags}}{{$portmap := . | regexMatch "urlprefix-:"}}{{if not $portmap}}{{if . | regexMatch "urlprefix-"}} 19 | location {{$tags | regexReplaceAll "urlprefix-" "" | regexReplaceAll "strip=.*$" ""}} { 20 | rewrite {{ $tags | regexReplaceAll "urlprefix-" "" | regexReplaceAll "\\s*strip\\s*=.*\\s*$" "" }}/(.*)$ /$1 break; 21 | proxy_pass http://{{$name}}; 22 | }{{end}}{{end}}{{end}}{{end}}{{end}}{{end}} 23 | } 24 | -------------------------------------------------------------------------------- /oom/example.nomad: -------------------------------------------------------------------------------- 1 | job "example" { 2 | datacenters = ["dc1"] 3 | 4 | group "cache" { 5 | network { 6 | port "db" { 7 | to = 6379 8 | } 9 | } 10 | 11 | task "redis" { 12 | driver = "docker" 13 | 14 | config { 15 | image = "redis:7" 16 | ports = ["db"] 17 | auth_soft_fail = true 18 | } 19 | 20 | resources { 21 | memory = 10 22 | } 23 | } 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /parameterized/docker_hello_world/hello-world.nomad: -------------------------------------------------------------------------------- 1 | job "hello-world.nomad" { 2 | datacenters = ["dc1"] 3 | type = "batch" 4 | 5 | parameterized { } 6 | 7 | group "containers" { 8 | task "hello" { 9 | driver = "docker" 10 | 11 | config { 12 | image = "hello-world:latest" 13 | } 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /parameterized/template.nomad: -------------------------------------------------------------------------------- 1 | job "«job_name»" { 2 | datacenters = ["«datacenter»"] 3 | 4 | group "«group_name»" { 5 | task "«job_name»" { 6 | driver = "«driver_type»" 7 | } 8 | } 9 | } -------------------------------------------------------------------------------- /parameterized/to_specific_client/example.nomad: -------------------------------------------------------------------------------- 1 | job "example.nomad" { 2 | datacenters = ["dc1"] 3 | type = "batch" 4 | 5 | parameterized { 6 | meta_required = ["input_node_id"] 7 | meta_optional = [] 8 | payload = "forbidden" 9 | } 10 | 11 | group "cache" { 12 | 13 | constraint { 14 | attribute = "${node.unique.id}" 15 | value = "${NOMAD_META_INPUT_NODE_ID}" 16 | } 17 | 18 | task "task" { 19 | driver = "docker" 20 | 21 | config { 22 | image = "alpine" 23 | command = "sh" 24 | args = [ 25 | "-c", 26 | "env; while true; do sleep 300; done" 27 | ] 28 | } 29 | } 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /parameterized/to_specific_client/workaround/README.md: -------------------------------------------------------------------------------- 1 | # A gross workaround 2 | 3 | This is a very gross workaround to synthesize some things I have learned recently. 4 | 5 | 6 | It leverages: 7 | 8 | - ugly shell script 9 | - python 10 | - Nomad HCL2 11 | 12 | 13 | ```bash 14 | RunOutput=`nomad job run -var node_id=f7bc1f2d-34b1-eaf8-b7d3-253f2e7de4d6 example.nomad` 15 | AllocId=$(echo "$RunOutput" | awk '/Allocation/{ print $2}'| tr -d "\"") 16 | if [] 17 | then 18 | echo "No allocation found" 19 | exit 1 20 | fi 21 | 22 | FullAllocId=$(nomad alloc status -verbose $AllocId | grep -e '^ID' | awk '{print $3}') -------------------------------------------------------------------------------- /parameterized/to_specific_client/workaround/example.nomad: -------------------------------------------------------------------------------- 1 | variable "node_id" { 2 | type = string 3 | description = "The destination's Nomad node ID. Must be the full ID from `nomad node status -verbose`" 4 | } 5 | 6 | job "example.nomad" { 7 | datacenters = ["dc1"] 8 | type = "batch" 9 | 10 | group "cache" { 11 | 12 | constraint { 13 | attribute = "${node.unique.id}" 14 | value = var.node_id 15 | } 16 | 17 | task "task" { 18 | driver = "docker" 19 | 20 | config { 21 | image = "alpine" 22 | command = "sh" 23 | args = [ 24 | "-c", 25 | "env; sleep 5;" 26 | ] 27 | } 28 | } 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /parameterized/to_specific_client/workaround/rolling_run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | ClientNodeIds=$(nomad node status -t '{{ range .}}{{printf "%s\n" .ID}}{{end}}') 5 | 6 | RunOutput=$(nomad job run -var node_id=f7bc1f2d-34b1-eaf8-b7d3-253f2e7de4d6 example.nomad) 7 | AllocId=$(echo "$RunOutput" | awk '/Allocation/{ print $2}'| tr -d "\" \t") 8 | if [ "$AllocId" == "" ] 9 | then 10 | echo "No allocation found" 11 | exit 1 12 | fi 13 | 14 | FullAllocId=$(nomad alloc status -verbose $AllocId | grep -e '^ID' | awk '{print $3}') 15 | 16 | ExitCode=./watch.py $FullAllocId 17 | 18 | if [ $ExitCode -ne 0 ] 19 | then 20 | echo "Bailing out because of an error..." 21 | exit 2 22 | fi 23 | 24 | -------------------------------------------------------------------------------- /ports/README.md: -------------------------------------------------------------------------------- 1 | # Mapping ports into Nomad 2 | 3 | This example will show a job that uses both static and dynamic ports 4 | -------------------------------------------------------------------------------- /ports/example.nomad: -------------------------------------------------------------------------------- 1 | job "example" { 2 | datacenters = ["dc1"] 3 | 4 | group "cache" { 5 | network { 6 | # the label for the `port` block is used to refer to that port in the rest of the job: 7 | # interpolation, docker port maps, etc. 8 | port "dynamic" { 9 | to = 6379 10 | } 11 | 12 | port "_443" { 13 | static = 443 14 | to = 6379 15 | } 16 | 17 | port "444" { 18 | static = 444 19 | to = 6379 20 | } 21 | } 22 | 23 | service { 24 | name = "redis-cache" 25 | tags = ["global", "cache"] 26 | port = "db" 27 | 28 | check { 29 | name = "alive" 30 | type = "tcp" 31 | interval = "10s" 32 | timeout = "2s" 33 | } 34 | } 35 | 36 | task "redis" { 37 | driver = "docker" 38 | 39 | config { 40 | image = "redis:7" 41 | ports = ["dynamic","_443", "444"] 42 | } 43 | } 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /preserve_state/jam.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | jobs=$(ls *.jsonjob) 4 | 5 | for I in ${jobs}; do 6 | echo "Jamming $I" 7 | curl -X PUT -d @$I http://127.0.0.1:4646/v1/jobs 8 | echo "" 9 | done 10 | -------------------------------------------------------------------------------- /preserve_state/nomad_debug: -------------------------------------------------------------------------------- 1 | #! /usr/bin/python 2 | 3 | import urllib, json 4 | baseUrl = "http://127.0.0.1:4646" 5 | url = baseUrl+"/v1/jobs" 6 | response = urllib.urlopen(url) 7 | data = json.loads(response.read()) 8 | for job in data: 9 | print(job['Name'], job['Status'], job['Stop']) 10 | 11 | -------------------------------------------------------------------------------- /preserve_state/preserve.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | jobs=$(nomad status | grep ing | grep -v "/periodic-" |awk '{print $1}') 3 | echo $(echo "${jobs}" |wc -l) 4 | for I in ${jobs}; do 5 | echo "Exporting $I" 6 | nomad inspect $I > $I.jsonjob 7 | done 8 | -------------------------------------------------------------------------------- /qemu/README.md: -------------------------------------------------------------------------------- 1 | # TinyCore QEMU example 2 | 3 | This sample will start a TinyCore Linux VM configured 4 | with the SSH daemon enabled. It performs port forwarding 5 | using the QEMU commands so that Nomad can dynamically 6 | assign a HTTP and SSH port for the VM. 7 | 8 | You will need to serve the image some place so that it 9 | can be retrieved using the artifact stanza. 10 | 11 | The default SSH user is `tc` with `tinycore` as password. 12 | -------------------------------------------------------------------------------- /qemu/imagebuilder/Core-current.iso: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/angrycub/nomad_example_jobs/034cc3998d19e3ee2a573c0220e26e5dc9949299/qemu/imagebuilder/Core-current.iso -------------------------------------------------------------------------------- /qemu/imagebuilder/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu 2 | 3 | RUN export DEBIAN_FRONTEND=noninteractive && \ 4 | apt update && \ 5 | apt install -y \ 6 | qemu \ 7 | qemu-utils \ 8 | libguestfs-tools \ 9 | linux-image-generic \ 10 | nbdfuse \ 11 | nbd-client \ 12 | nbdkit \ 13 | nbdkit-plugin-guestfs 14 | 15 | RUN mkdir -p /mnt/cdrom /mnt/tinycore 16 | 17 | -------------------------------------------------------------------------------- /qemu/imagebuilder/core-image.qcow2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/angrycub/nomad_example_jobs/034cc3998d19e3ee2a573c0220e26e5dc9949299/qemu/imagebuilder/core-image.qcow2 -------------------------------------------------------------------------------- /qemu/tc.qcow2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/angrycub/nomad_example_jobs/034cc3998d19e3ee2a573c0220e26e5dc9949299/qemu/tc.qcow2 -------------------------------------------------------------------------------- /qemu/tinycore.qcow2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/angrycub/nomad_example_jobs/034cc3998d19e3ee2a573c0220e26e5dc9949299/qemu/tinycore.qcow2 -------------------------------------------------------------------------------- /raw_exec/env.nomad: -------------------------------------------------------------------------------- 1 | job "env" { 2 | datacenters = ["dc1"] 3 | type = "batch" 4 | group "group" { 5 | count = 1 6 | task "env" { 7 | driver = "raw_exec" 8 | config { 9 | command = "env" 10 | args = [] 11 | } 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /raw_exec/mkdir/README.md: -------------------------------------------------------------------------------- 1 | # Using mkdir 2 | 3 | This example demonstrates using mkdir to create a few directories on the host before running a job. 4 | 5 | - [mkdir.nomad](mkdir.nomad) - demonstrates the use of mkdir; however, it also illustrates that there is no bash expansion because there is no shell running to perform the expansion. 6 | 7 | - [mkdir-bash.nomad](mkdir-bash.nomad) - corrects the job to allow the creation of multiple directories via shell expansion by starting a shell and _then_ calling mkdir. 8 | 9 | -------------------------------------------------------------------------------- /raw_exec/mkdir/mkdir-bash.nomad: -------------------------------------------------------------------------------- 1 | job "template" { 2 | datacenters = ["dc1"] 3 | type = "batch" 4 | group "group" { 5 | count = 1 6 | task "mkdir" { 7 | driver = "raw_exec" 8 | config { 9 | command = "bash" 10 | args = ["-c", "rm -rf /var/log/service; mkdir -p /var/log/service/{watch,export}"] 11 | } 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /raw_exec/mkdir/mkdir.nomad: -------------------------------------------------------------------------------- 1 | job "mkdir" { 2 | datacenters = ["dc1"] 3 | type = "batch" 4 | group "group" { 5 | count = 1 6 | task "mkdir" { 7 | driver = "raw_exec" 8 | config { 9 | command = "mkdir" 10 | # This will create a directory named `/var/log/service/{watch,export}` 11 | # which is probably not what you want. 12 | args = ["-p", "/var/log/service/{watch,export}"] 13 | } 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /raw_exec/ps.nomad: -------------------------------------------------------------------------------- 1 | job "mkdir" { 2 | datacenters = ["dc1"] 3 | type = "batch" 4 | group "group" { 5 | count = 1 6 | task "mkdir" { 7 | driver = "raw_exec" 8 | config { 9 | command = "ps" 10 | args = ["-aef", "--forest"] 11 | } 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /raw_exec/quoted_args/quoted_args.nomad: -------------------------------------------------------------------------------- 1 | job "template" { 2 | datacenters = ["dc1"] 3 | type = "batch" 4 | group "group" { 5 | count = 1 6 | task "mkdir" { 7 | driver = "raw_exec" 8 | config { 9 | command = "bash" 10 | args = ["-c", "bash -c \"tail -f /dev/null\""] 11 | } 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /raw_exec/quoted_args/quoted_args_2.nomad: -------------------------------------------------------------------------------- 1 | job "quoted" { 2 | datacenters = ["dc1"] 3 | type = "service" 4 | group "group" { 5 | count = 1 6 | task "payload" { 7 | driver = "exec" 8 | config { 9 | command = "bash" 10 | args = ["-c", "bash -c \"tail -f /dev/null\""] 11 | } 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /raw_exec/user/example.nomad: -------------------------------------------------------------------------------- 1 | job "raw_exec" { 2 | datacenters = ["dc1"] 3 | type = "batch" 4 | group "user" { 5 | task "test" { 6 | driver = "raw_exec" 7 | user = "nomad" 8 | 9 | config { 10 | command = "/usr/bin/whoami" 11 | args = [] 12 | } 13 | 14 | resources { 15 | cpu = 100 16 | memory = 100 17 | } 18 | } 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /reproductions/cpu_rescheduling/repro.nomad: -------------------------------------------------------------------------------- 1 | job "example" { 2 | datacenters = ["dc1"] 3 | 4 | group "cache" { 5 | network { 6 | port "db" { 7 | to = 6379 8 | } 9 | } 10 | 11 | task "redis" { 12 | driver = "docker" 13 | 14 | config { 15 | image = "redis:7" 16 | 17 | ports = ["db"] 18 | } 19 | 20 | resources { 21 | cpu = 500 22 | memory = 256 23 | } 24 | } 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /rolling_upgrade/README.md: -------------------------------------------------------------------------------- 1 | ## Rolling Upgrades 2 | 3 | This sample demonstrates the behavior of rolling upgrades in a Nomad cluster. 4 | 5 | Instructions: 6 | 7 | Run the sample job: 8 | 9 | ``` 10 | nomad run example.nomad 11 | ``` 12 | 13 | This will deploy three instances of the sample redis container to the cluster. 14 | 15 | Upgrade the instances: 16 | 17 | ``` 18 | nomad run example-new.nomad 19 | ``` 20 | 21 | Nomad should perform a rolling upgrade of the three instances. It should wait for an instance to be healthy for one minute before moving to the next instance. 22 | 23 | > **NOTE:** The example job is currently sad and will not upgrade properly. The cv version presents an alternative configuration file structure that upgrades as expected. 24 | 25 | -------------------------------------------------------------------------------- /rolling_upgrade/cv-new.nomad: -------------------------------------------------------------------------------- 1 | job "rolling-upgrade-test" { 2 | datacenters = ["dc1"] 3 | type = "service" 4 | 5 | update { 6 | max_parallel = 1 7 | min_healthy_time = "1m" 8 | health_check = "task_states" 9 | } 10 | 11 | group "zookeeper" { 12 | restart { 13 | attempts = 2 14 | delay = "15s" 15 | interval = "1m" 16 | mode = "delay" 17 | } 18 | 19 | count = 3 20 | task "redis" { 21 | driver = "docker" 22 | config { 23 | image = "redis:4.0" 24 | } 25 | } 26 | } 27 | } 28 | 29 | -------------------------------------------------------------------------------- /rolling_upgrade/cv.nomad: -------------------------------------------------------------------------------- 1 | job "rolling-upgrade-test" { 2 | datacenters = ["dc1"] 3 | type = "service" 4 | 5 | update { 6 | max_parallel = 1 7 | min_healthy_time = "1m" 8 | health_check = "task_states" 9 | } 10 | 11 | group "zookeeper" { 12 | restart { 13 | attempts = 2 14 | delay = "15s" 15 | interval = "1m" 16 | mode = "delay" 17 | } 18 | 19 | count = 3 20 | task "redis" { 21 | driver = "docker" 22 | config { 23 | image = "redis:7" 24 | } 25 | } 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /sentinel/README.md: -------------------------------------------------------------------------------- 1 | ## Sentinel Samples 2 | 3 | These jobs utilize Sentinel for enforcement. To use Sentinel, ACLs must be enabled on all of the nodes and bootstrapped. 4 | 5 | -------------------------------------------------------------------------------- /sentinel/alwaysFalse.sentinel: -------------------------------------------------------------------------------- 1 | # Test policy always fails for demonstration purposes 2 | main = rule { false } 3 | 4 | -------------------------------------------------------------------------------- /sentinel/example.nomad: -------------------------------------------------------------------------------- 1 | job "example" { 2 | datacenters = ["dc1"] 3 | 4 | constraint { 5 | distinct_hosts = true 6 | } 7 | 8 | constraint { 9 | attribute = "${node.class}" 10 | value = "gpu" 11 | } 12 | group "cache" { 13 | network { 14 | port "db" {} 15 | } 16 | 17 | service { 18 | name = "global-redis-check" 19 | tags = ["global", "cache"] 20 | port = "db" 21 | check { 22 | name = "alive" 23 | type = "tcp" 24 | interval = "10s" 25 | timeout = "2s" 26 | } 27 | } 28 | 29 | task "redis" { 30 | driver = "docker" 31 | config { 32 | image = "redis:7" 33 | ports = ["db"] 34 | } 35 | } 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /sentinel/exampleGroupMissingNodeClass.nomad: -------------------------------------------------------------------------------- 1 | job "example" { 2 | datacenters = ["dc1"] 3 | type = "service" 4 | constraint { distinct_hosts = true } 5 | group "cache" { 6 | count = 1 7 | task "redis" { 8 | driver = "docker" 9 | config { 10 | image = "redis:7" 11 | port_map { 12 | db = 6379 13 | } 14 | } 15 | resources { 16 | network { 17 | port "db" {} 18 | } 19 | } 20 | } 21 | } 22 | group "cache2" { 23 | count = 1 24 | constraint { attribute = "${node.class}" value = "gpu" } 25 | task "redis" { 26 | driver = "docker" 27 | config { 28 | image = "redis:7" 29 | port_map { 30 | db = 6379 31 | } 32 | } 33 | resources { 34 | network { 35 | port "db" {} 36 | } 37 | } 38 | } 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /sentinel/exampleGroupNodeClass.nomad: -------------------------------------------------------------------------------- 1 | job "example" { 2 | datacenters = ["dc1"] 3 | type = "service" 4 | constraint { distinct_hosts = true } 5 | group "cache" { 6 | count = 1 7 | constraint { attribute = "${node.class}" value = "gpu" } 8 | task "redis" { 9 | driver = "docker" 10 | config { 11 | image = "redis:7" 12 | port_map { 13 | db = 6379 14 | } 15 | } 16 | resources { 17 | network { 18 | port "db" {} 19 | } 20 | } 21 | } 22 | } 23 | group "cache2" { 24 | count = 1 25 | constraint { attribute = "${node.class}" value = "gpu" } 26 | task "redis" { 27 | driver = "docker" 28 | config { 29 | image = "redis:7" 30 | port_map { 31 | db = 6379 32 | } 33 | } 34 | resources { 35 | network { 36 | port "db" {} 37 | } 38 | } 39 | } 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /sentinel/exampleJobNodeClass.nomad: -------------------------------------------------------------------------------- 1 | job "example" { 2 | datacenters = ["dc1"] 3 | 4 | constraint { 5 | distinct_hosts = true 6 | } 7 | 8 | constraint { 9 | attribute = "${node.class}" 10 | value = "gpu" 11 | } 12 | 13 | group "cache" { 14 | network { 15 | port "db" { 16 | to = 6379 17 | } 18 | } 19 | 20 | service { 21 | name = "global-redis-check" 22 | tags = ["global", "cache"] 23 | port = "db" 24 | check { 25 | name = "alive" 26 | type = "tcp" 27 | interval = "10s" 28 | timeout = "2s" 29 | } 30 | } 31 | 32 | task "redis" { 33 | driver = "docker" 34 | 35 | config { 36 | image = "redis:7" 37 | ports = ["db"] 38 | } 39 | } 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /sentinel/exampleNoNodeClass.nomad: -------------------------------------------------------------------------------- 1 | job "example" { 2 | datacenters = ["dc1"] 3 | 4 | constraint { 5 | distinct_hosts = true 6 | } 7 | 8 | group "cache" { 9 | network { 10 | port "db" { 11 | to = 6379 12 | } 13 | } 14 | 15 | task "redis" { 16 | driver = "docker" 17 | 18 | config { 19 | image = "redis:7" 20 | ports = ["db"] 21 | } 22 | } 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /sentinel/payload.json: -------------------------------------------------------------------------------- 1 | { 2 | "Name": "anonymous", 3 | "Description": "Allow read-only access for anonymous requests", 4 | "Rules": " 5 | namespace \"default\" { 6 | policy = \"read\" 7 | } 8 | agent { 9 | policy = \"read\" 10 | } 11 | node { 12 | policy = \"read\" 13 | } 14 | " 15 | } 16 | -------------------------------------------------------------------------------- /server-variables/reset.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/angrycub/nomad_example_jobs/034cc3998d19e3ee2a573c0220e26e5dc9949299/server-variables/reset.sh -------------------------------------------------------------------------------- /sleepy/README.md: -------------------------------------------------------------------------------- 1 | ## Sleepy 2 | 3 | This is a set of binaries that perform dumb loops over time in the exec driver and log each time it wakes up. They are useful for creating workload simulators. 4 | 5 | 6 | -------------------------------------------------------------------------------- /sleepy/sleepy_bash/sleepy.nomad: -------------------------------------------------------------------------------- 1 | job sleepy { 2 | datacenters = ["dc1"] 3 | group "group" { 4 | count = 1 5 | 6 | ## You might want to constrain this, so here's one to help 7 | # constraint { 8 | # attribute = "${attr.unique.hostname}" 9 | # operator = "=" 10 | # value = "nomad-client-1.node.consul" 11 | # } 12 | 13 | task "sleepy.sh" { 14 | template { 15 | data =<&1 >/dev/null; do echo '.'; sleep 2; done"] 18 | } 19 | 20 | lifecycle { 21 | hook = "prestart" 22 | sidecar = false 23 | } 24 | 25 | resources { 26 | cpu = 200 27 | memory = 128 28 | } 29 | } 30 | 31 | task "myapp-container" { 32 | driver = "docker" 33 | 34 | config { 35 | image = "busybox" 36 | command = "sh" 37 | args = ["-c", "echo The app is running! && sleep 3600"] 38 | } 39 | 40 | resources { 41 | cpu = 200 42 | memory = 128 43 | } 44 | } 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /task_deps/k8sdoc/myservice.nomad: -------------------------------------------------------------------------------- 1 | job "myservice" { 2 | datacenters = ["dc1"] 3 | type = "service" 4 | 5 | group "myservice" { 6 | task "myservice" { 7 | driver = "docker" 8 | 9 | config { 10 | image = "busybox" 11 | command = "sh" 12 | args = ["-c", "echo The service is running! && while true; do sleep 2; done"] 13 | } 14 | 15 | resources { 16 | cpu = 200 17 | memory = 128 18 | } 19 | 20 | service { 21 | name = "myservice" 22 | } 23 | } 24 | } 25 | } -------------------------------------------------------------------------------- /template/batch/README.md: -------------------------------------------------------------------------------- 1 | ## Batch Templates 2 | 3 | Using batch jobs can provide a way to experiment with templates. 4 | 5 | * **parameter.nomad** - This job demonstrates using a provided meta variable to create a composed key which could be used in another template tag, like key, service, secret, etc. 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /template/batch/context.nomad: -------------------------------------------------------------------------------- 1 | job "parameter" { 2 | datacenters = ["dc1"] 3 | type = "batch" 4 | group "group" { 5 | count = 1 6 | task "command" { 7 | driver = "exec" 8 | config { 9 | command = "bash" 10 | args = ["-c", "cat local/template.out"] 11 | } 12 | template { 13 | data = <