├── .github ├── dependabot.yml └── workflows │ ├── actionlint.yml │ ├── deploy-nomad-acls.yml │ ├── deploy-nomad-and-csi-plugins-gcp.yml │ ├── deploy-nomad-and-portworx.yml │ ├── deploy-nomad-basics.yml │ ├── deploy-nomad-consul-connect.yml │ ├── deploy-nomad-governance.yml │ ├── deploy-nomad-host-volumes.yml │ ├── deploy-nomad-integration-with-vault.yml │ ├── deploy-nomad-job-placement.yml │ ├── deploy-nomad-monitoring.yml │ ├── deploy-nomad-multi-region-federation.yml │ ├── deploy-nomad-multi-server-cluster.yml │ ├── deploy-nomad-simple-cluster.yml │ ├── deploy-nomad-update-strategies.yml │ ├── instruqt-track-deploy.yml │ ├── instruqt-track-test.yml │ └── nightly-test.yml ├── .gitignore ├── CODEOWNERS ├── LICENSE ├── README.md ├── common ├── Makefile ├── bin │ ├── alternative_track │ ├── check-make-prereqs │ ├── clean_id_and_checksums │ └── mk_help └── mk │ └── core.mk ├── docker └── nomad-integration-with-vault │ ├── application │ ├── Dockerfile │ ├── app.js │ ├── bin │ │ └── www │ ├── build.sh │ ├── package.json │ ├── public │ │ ├── css │ │ │ ├── app.css │ │ │ └── img │ │ │ │ ├── Consul_PrimaryLogo_FullColor.png │ │ │ │ ├── HashiCorp_PrimaryLogo_Black.png │ │ │ │ ├── Nomad_PrimaryLogo_FullColor.png │ │ │ │ ├── Packer_PrimaryLogo_FullColor.png │ │ │ │ ├── Terraform_PrimaryLogo_FullColor.png │ │ │ │ ├── Vagrant_PrimaryLogo_FullColor.png │ │ │ │ └── Vault_PrimaryLogo_FullColor.png │ │ └── js │ │ │ └── app.js │ ├── routes │ │ ├── api.js │ │ └── index.js │ └── views │ │ ├── error.hbs │ │ ├── index.hbs │ │ └── layout.hbs │ └── database │ ├── Dockerfile │ └── populate.sql ├── docs ├── README.md ├── index.html ├── index.md └── slides │ ├── README.md │ ├── aws │ ├── README.md │ ├── index.html │ ├── index.md │ ├── nomad-enterprise │ │ ├── index.html │ │ ├── nomad-1.html │ │ ├── nomad-1.md │ │ ├── nomad-2.html │ │ └── nomad-2.md │ └── nomad-oss │ │ ├── index.html │ │ ├── nomad-1.html │ │ ├── nomad-1.md │ │ ├── nomad-2.html │ │ └── nomad-2.md │ ├── azure │ ├── README.md │ ├── index.html │ ├── index.md │ ├── nomad-enterprise │ │ ├── index.html │ │ ├── nomad-1.html │ │ ├── nomad-1.md │ │ ├── nomad-2.html │ │ └── nomad-2.md │ └── nomad-oss │ │ ├── index.html │ │ ├── nomad-1.html │ │ ├── nomad-1.md │ │ ├── nomad-2.html │ │ └── nomad-2.md │ ├── gcp │ ├── README.md │ ├── index.html │ ├── index.md │ ├── nomad-enterprise │ │ ├── index.html │ │ ├── nomad-1.html │ │ ├── nomad-1.md │ │ ├── nomad-2.html │ │ └── nomad-2.md │ └── nomad-oss │ │ ├── index.html │ │ ├── nomad-1.html │ │ ├── nomad-1.md │ │ ├── nomad-2.html │ │ └── nomad-2.md │ ├── index.html │ ├── index.md │ ├── korean │ └── multi-cloud │ │ └── nomad-oss │ │ ├── index.html │ │ ├── nomad-0.html │ │ ├── nomad-0.md │ │ ├── nomad-1.html │ │ ├── nomad-1.md │ │ ├── nomad-2.html │ │ ├── nomad-2.md │ │ ├── nomad-3.html │ │ ├── nomad-3.md │ │ ├── nomad-4.html │ │ ├── nomad-4.md │ │ ├── nomad-5.html │ │ ├── nomad-5.md │ │ ├── nomad-6.html │ │ ├── nomad-6.md │ │ ├── nomad-7.html │ │ ├── nomad-7.md │ │ ├── nomad-end.html │ │ └── nomad-end.md │ └── multi-cloud │ ├── advanced-nomad │ ├── images │ │ ├── Demote-Old-Servers.png │ │ ├── Introduce-New-Servers.png │ │ ├── Nomad-Default-Namespace.png │ │ ├── Nomad-Federation.png │ │ ├── Nomad-QA-Namespace.png │ │ ├── Nomad-Workload-Types.png │ │ ├── NomadServersFrom2Clusters.png │ │ ├── Nomad_Business_Value.png │ │ ├── Nomad_HashiStack_Velocity.png │ │ ├── Nomad_Use_Cases.png │ │ ├── Upgrade-Start.png │ │ ├── Upgraded-Servers.png │ │ ├── acl-overview.jpg │ │ └── nomad-multi-region.png │ ├── index.html │ ├── nomad-0.html │ ├── nomad-0.md │ ├── nomad-autoscaler.html │ ├── nomad-autoscaler.md │ ├── nomad-end.html │ ├── nomad-end.md │ ├── nomad-enterprise-platform.html │ ├── nomad-enterprise-platform.md │ ├── nomad-governance.html │ ├── nomad-governance.md │ ├── nomad-job-placement.html │ ├── nomad-job-placement.md │ ├── nomad-job-update-strategies.html │ ├── nomad-job-update-strategies.md │ ├── nomad-multiregion-deployments.html │ ├── nomad-multiregion-deployments.md │ ├── nomad-security-exanded.md │ ├── nomad-security.html │ ├── nomad-security.md │ ├── nomad-stateful-workloads.html │ ├── nomad-stateful-workloads.md │ ├── nomad-vault-integration.html │ └── nomad-vault-integration.md │ ├── index.html │ ├── index.md │ └── nomad-oss │ ├── images │ ├── AddSystemAlert1.png │ ├── AddSystemAlert2.png │ ├── Evaluation_Queue.png │ ├── EvictBusinessAlert1.png │ ├── EvictBusinessAlert2.png │ ├── Failed-Region.png │ ├── FullCluster.png │ ├── Multi-Region.png │ ├── Nomad-Federation.png │ ├── Nomad-Job-with-Deployments.png │ ├── Nomad-Job-with-task-groups-allocations.png │ ├── Nomad-Job_Status-Highlevel.png │ ├── Nomad-Workload-Types.png │ ├── Nomad-task-group-status-and-task.png │ ├── Nomad-task-group.png │ ├── Nomad-task-logs.png │ ├── Nomad-task.png │ ├── NomadRegion.png │ ├── Nomad_Business_Value.png │ ├── Nomad_Evaluation_Kickoff.png │ ├── Nomad_HashiStack_Velocity.png │ ├── Nomad_Overall_Flow.png │ ├── Nomad_Simple_Cluster_Topology.png │ ├── Nomad_Use_Cases.png │ ├── Nomad_eval_alloc.png │ ├── Queue_Processing.png │ ├── ServerElection.png │ ├── SystemAlerting.png │ └── nomad-architecture-region.png │ ├── index.html │ ├── nomad-0.html │ ├── nomad-0.md │ ├── nomad-1.html │ ├── nomad-1.md │ ├── nomad-2.html │ ├── nomad-2.md │ ├── nomad-3.html │ ├── nomad-3.md │ ├── nomad-4.html │ ├── nomad-4.md │ ├── nomad-5.html │ ├── nomad-5.md │ ├── nomad-6.html │ ├── nomad-6.md │ ├── nomad-7.html │ ├── nomad-7.md │ ├── nomad-end.html │ └── nomad-end.md ├── instructor-guides ├── README.md ├── advanced_nomad_INSTRUCTOR_GUIDE.md ├── images │ └── 2001_phone.jpg ├── intro_to_nomad_INSTRUCTOR_GUIDE.md └── success_with_remote_workshops.md └── instruqt-tracks ├── DEVELOPMENT.md ├── nomad-acls ├── Makefile ├── bootstrap-acls │ ├── check-nomad-server-1 │ ├── setup-nomad-server-1 │ └── solve-nomad-server-1 ├── config.yml ├── configure-client-acls │ ├── check-nomad-client-1 │ ├── check-nomad-client-2 │ ├── setup-nomad-server-1 │ ├── solve-nomad-client-1 │ └── solve-nomad-client-2 ├── configure-server-acls │ ├── check-nomad-server-1 │ ├── check-nomad-server-2 │ ├── check-nomad-server-3 │ ├── setup-nomad-server-1 │ ├── solve-nomad-server-1 │ ├── solve-nomad-server-2 │ └── solve-nomad-server-3 ├── run-the-servers-and-clients │ ├── check-nomad-server-1 │ ├── setup-nomad-client-1 │ ├── setup-nomad-client-2 │ ├── setup-nomad-server-1 │ ├── setup-nomad-server-2 │ ├── setup-nomad-server-3 │ └── solve-nomad-server-1 ├── track.yml └── use-acls │ ├── check-nomad-server-1 │ ├── check-nomad-server-2 │ ├── check-nomad-server-3 │ ├── setup-nomad-server-1 │ ├── solve-nomad-server-1 │ ├── solve-nomad-server-2 │ └── solve-nomad-server-3 ├── nomad-and-csi-plugins-gcp ├── Makefile ├── config.yml ├── create-persistent-disk │ ├── check-cloud-client │ ├── setup-cloud-client │ └── solve-cloud-client ├── deploy-gcepd-driver │ ├── check-cloud-client │ ├── setup-cloud-client │ └── solve-cloud-client ├── deploy-mysql │ ├── check-cloud-client │ ├── setup-cloud-client │ └── solve-cloud-client ├── destroy-rerun-mysql │ ├── check-cloud-client │ ├── setup-cloud-client │ └── solve-cloud-client ├── track.yml └── verify-nomad-cluster-health │ ├── check-cloud-client │ ├── setup-cloud-client │ └── solve-cloud-client ├── nomad-basics ├── Makefile ├── config.yml ├── nomad-cli │ ├── check-nomad-server │ ├── setup-nomad-server │ └── solve-nomad-server ├── run-first-job │ ├── check-nomad-server │ └── solve-nomad-server ├── run-nomad-agent │ ├── check-nomad-server │ └── solve-nomad-server ├── track.yml └── use-nomad-http-api │ ├── check-nomad-server │ └── solve-nomad-server ├── nomad-consul-connect ├── Makefile ├── config.yml ├── nomad-and-consul-connect │ ├── check-nomad-server-1 │ ├── setup-nomad-server-1 │ └── solve-nomad-server-1 ├── track.yml └── verify-nomad-cluster-health │ ├── check-nomad-server-1 │ ├── setup-nomad-client-1 │ ├── setup-nomad-client-2 │ ├── setup-nomad-server-1 │ └── solve-nomad-server-1 ├── nomad-governance ├── 01-verify-nomad-cluster-health │ ├── assignment.md │ ├── check-nomad-server-1 │ ├── setup-nomad-client-1 │ ├── setup-nomad-client-2 │ ├── setup-nomad-client-3 │ ├── setup-nomad-server-1 │ └── solve-nomad-server-1 ├── 02-nomad-auditing │ ├── assignment.md │ ├── check-nomad-client-1 │ ├── check-nomad-client-2 │ ├── check-nomad-client-3 │ ├── check-nomad-server-1 │ ├── setup-nomad-client-1 │ ├── setup-nomad-client-2 │ ├── setup-nomad-client-3 │ ├── setup-nomad-server-1 │ └── solve-nomad-server-1 ├── 03-namespaces-and-resource-quotas │ ├── assignment.md │ ├── check-nomad-server-1 │ ├── setup-nomad-server-1 │ └── solve-nomad-server-1 ├── 04-nomad-acls │ ├── assignment.md │ ├── check-nomad-client-1 │ ├── check-nomad-client-2 │ ├── check-nomad-client-3 │ ├── check-nomad-server-1 │ ├── setup-nomad-client-1 │ ├── setup-nomad-client-2 │ ├── setup-nomad-client-3 │ ├── setup-nomad-server-1 │ └── solve-nomad-server-1 ├── 05-sentinel-policies │ ├── assignment.md │ ├── check-nomad-server-1 │ ├── setup-nomad-server-1 │ └── solve-nomad-server-1 ├── 06-run-nomad-jobs-1 │ ├── assignment.md │ ├── check-nomad-server-1 │ ├── setup-nomad-server-1 │ └── solve-nomad-server-1 ├── 07-run-nomad-jobs-2 │ ├── assignment.md │ ├── check-nomad-server-1 │ ├── setup-nomad-server-1 │ └── solve-nomad-server-1 ├── Makefile ├── config.yml ├── track.yml └── track_scripts │ ├── setup-nomad-client-1 │ ├── setup-nomad-client-2 │ ├── setup-nomad-client-3 │ └── setup-nomad-server-1 ├── nomad-host-volumes ├── 01-verify-nomad-cluster-health │ ├── assignment.md │ ├── check-nomad-server-1 │ ├── setup-nomad-client-1 │ ├── setup-nomad-client-2 │ ├── setup-nomad-client-3 │ ├── setup-nomad-server-1 │ └── solve-nomad-server-1 ├── 02-configure-host-volume │ ├── assignment.md │ ├── check-nomad-client-1 │ ├── setup-nomad-client-1 │ └── solve-nomad-client-1 ├── 03-deploy-mysql │ ├── assignment.md │ ├── check-nomad-server-1 │ ├── setup-nomad-server-1 │ └── solve-nomad-server-1 ├── 04-write-data │ ├── assignment.md │ ├── check-nomad-client-1 │ ├── setup-nomad-client-1 │ └── solve-nomad-client-1 ├── 05-stop-and-restart-job │ ├── assignment.md │ ├── check-nomad-server-1 │ ├── setup-nomad-server-1 │ └── solve-nomad-server-1 ├── Makefile ├── config.yml └── track.yml ├── nomad-integration-with-vault ├── 01-verify-agents │ ├── assignment.md │ ├── check-hashistack-server │ ├── setup-hashistack-client-1 │ ├── setup-hashistack-client-2 │ ├── setup-hashistack-server │ └── solve-hashistack-server ├── 02-create-the-nomad-server-policy-and-token │ ├── assignment.md │ ├── check-hashistack-server │ ├── setup-hashistack-server │ └── solve-hashistack-server ├── 03-create-a-vault-token-role │ ├── assignment.md │ ├── check-hashistack-server │ ├── setup-hashistack-server │ └── solve-hashistack-server ├── 04-reconfigure-the-nomad-server │ ├── assignment.md │ ├── check-hashistack-server │ ├── setup-hashistack-server │ └── solve-hashistack-server ├── 05-deploy-a-database │ ├── assignment.md │ ├── check-hashistack-server │ ├── setup-hashistack-server │ └── solve-hashistack-server ├── 06-configure-the-vault-database-secrets-engine │ ├── assignment.md │ ├── check-hashistack-server │ ├── setup-hashistack-server │ └── solve-hashistack-server ├── 07-deploy-an-application │ ├── assignment.md │ ├── check-hashistack-server │ ├── setup-hashistack-server │ └── solve-hashistack-server ├── Makefile ├── config.yml ├── track.yml └── track_scripts │ ├── setup-hashistack-client-1 │ ├── setup-hashistack-client-2 │ └── setup-hashistack-server ├── nomad-job-placement ├── Makefile ├── config.yml ├── deploy-the-jobs │ ├── check-nomad-server-1 │ ├── setup-nomad-server-1 │ └── solve-nomad-server-1 ├── track.yml ├── use-affinity │ ├── check-nomad-server-1 │ ├── setup-nomad-server-1 │ └── solve-nomad-server-1 ├── use-constraint │ ├── check-nomad-server-1 │ ├── setup-nomad-server-1 │ └── solve-nomad-server-1 ├── use-spread │ ├── check-nomad-server-1 │ ├── setup-nomad-server-1 │ └── solve-nomad-server-1 └── verify-nomad-cluster-health │ ├── check-nomad-server-1 │ ├── setup-nomad-client-1 │ ├── setup-nomad-client-2 │ ├── setup-nomad-client-3 │ ├── setup-nomad-server-1 │ └── solve-nomad-server-1 ├── nomad-monitoring ├── 01-fabio-and-prometheus-jobs │ ├── assignment.md │ ├── check-nomad-server │ ├── setup-nomad-client-1 │ ├── setup-nomad-client-2 │ ├── setup-nomad-client-3 │ ├── setup-nomad-server │ └── solve-nomad-server ├── 02-add-alertmanager │ ├── assignment.md │ ├── check-nomad-server │ ├── setup-nomad-server │ └── solve-nomad-server ├── 03-add-web-server │ ├── assignment.md │ ├── check-nomad-server │ ├── setup-nomad-server │ └── solve-nomad-server ├── Makefile ├── config.yml └── track.yml ├── nomad-multi-region-federation ├── 01-federation │ ├── assignment.md │ ├── check-nomad-server-1-east │ ├── check-nomad-server-1-west │ ├── setup-nomad-client-1-east │ ├── setup-nomad-client-1-west │ ├── setup-nomad-client-2-east │ ├── setup-nomad-client-2-west │ ├── setup-nomad-server-1-east │ ├── setup-nomad-server-1-west │ ├── solve-nomad-server-1-east │ └── solve-nomad-server-1-west ├── 02-multi-region-deployments │ ├── assignment.md │ ├── check-nomad-server-1-west │ ├── setup-nomad-server-1-east │ ├── setup-nomad-server-1-west │ └── solve-nomad-server-1-west ├── 03-simulate-failed-deployment │ ├── assignment.md │ ├── check-nomad-server-1-west │ ├── setup-nomad-server-1-east │ ├── setup-nomad-server-1-west │ └── solve-nomad-server-1-west ├── Makefile ├── config.yml ├── track.yml └── track_scripts │ ├── setup-nomad-client-1-east │ ├── setup-nomad-client-1-west │ ├── setup-nomad-client-2-east │ ├── setup-nomad-client-2-west │ ├── setup-nomad-server-1-east │ └── setup-nomad-server-1-west ├── nomad-multi-server-cluster ├── 01-manual-clustering │ ├── assignment.md │ ├── check-nomad-client-1 │ ├── check-nomad-client-2 │ ├── check-nomad-server-1 │ ├── check-nomad-server-2 │ ├── check-nomad-server-3 │ ├── setup-nomad-client-1 │ ├── setup-nomad-client-2 │ ├── setup-nomad-server-1 │ ├── setup-nomad-server-2 │ ├── setup-nomad-server-3 │ ├── solve-nomad-client-1 │ ├── solve-nomad-client-2 │ ├── solve-nomad-server-1 │ ├── solve-nomad-server-2 │ └── solve-nomad-server-3 ├── 02-automatic-clustering-with-consul │ ├── assignment.md │ ├── check-nomad-client-1 │ ├── check-nomad-client-2 │ ├── check-nomad-server-1 │ ├── check-nomad-server-2 │ ├── check-nomad-server-3 │ ├── setup-nomad-client-1 │ ├── setup-nomad-client-2 │ ├── setup-nomad-server-1 │ ├── setup-nomad-server-2 │ ├── setup-nomad-server-3 │ ├── solve-nomad-client-1 │ ├── solve-nomad-client-2 │ ├── solve-nomad-server-1 │ ├── solve-nomad-server-2 │ └── solve-nomad-server-3 ├── 03-nomad-and-consul-connect │ ├── assignment.md │ ├── check-nomad-server-1 │ ├── setup-nomad-server-1 │ └── solve-nomad-server-1 ├── Makefile ├── config.yml └── track.yml ├── nomad-simple-cluster ├── Makefile ├── config.yml ├── create-first-job-spec │ ├── check-nomad-server │ └── solve-nomad-server ├── modifying-a-job │ ├── check-nomad-server │ └── solve-nomad-server ├── run-the-server-and-clients │ ├── check-nomad-client-1 │ ├── check-nomad-client-2 │ ├── check-nomad-server │ ├── setup-nomad-client-1 │ ├── setup-nomad-client-2 │ ├── setup-nomad-server │ ├── solve-nomad-client-1 │ ├── solve-nomad-client-2 │ └── solve-nomad-server ├── run-your-first-job │ ├── check-nomad-server │ └── solve-nomad-server └── track.yml └── nomad-update-strategies ├── Makefile ├── blue-green-deployment ├── check-nomad-server-1 ├── setup-nomad-server-1 └── solve-nomad-server-1 ├── canary-deployment ├── check-nomad-server-1 ├── setup-nomad-server-1 └── solve-nomad-server-1 ├── config.yml ├── deploy-the-jobs ├── check-nomad-server-1 ├── setup-nomad-server-1 └── solve-nomad-server-1 ├── rolling-update ├── check-nomad-server-1 ├── setup-nomad-server-1 └── solve-nomad-server-1 ├── track.yml └── verify-nomad-cluster-health ├── check-nomad-server-1 ├── setup-nomad-client-1 ├── setup-nomad-client-2 ├── setup-nomad-client-3 ├── setup-nomad-server-1 └── solve-nomad-server-1 /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | 3 | updates: 4 | - package-ecosystem: "github-actions" 5 | directory: "/" 6 | schedule: 7 | interval: "daily" -------------------------------------------------------------------------------- /.github/workflows/actionlint.yml: -------------------------------------------------------------------------------- 1 | # If the repository is public, be sure to change to GitHub hosted runners 2 | name: Lint GitHub Actions Workflows 3 | on: 4 | push: 5 | pull_request: 6 | permissions: 7 | contents: read 8 | jobs: 9 | actionlint: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac # v4.0.0 13 | - name: "Check workflow files" 14 | uses: docker://docker.mirror.hashicorp.services/rhysd/actionlint:latest 15 | 16 | -------------------------------------------------------------------------------- /.github/workflows/deploy-nomad-acls.yml: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | name: hashicorp/field-workshops-nomad/deploy-nomad-acls 4 | on: 5 | workflow_dispatch: 6 | push: 7 | branches: 8 | - 'main' 9 | paths: 10 | - 'instruqt-tracks/nomad-acls/**' 11 | permissions: 12 | contents: read 13 | jobs: 14 | deploy-track: 15 | uses: ./.github/workflows/instruqt-track-deploy.yml 16 | with: 17 | working_directory: "instruqt-tracks/nomad-acls" 18 | INSTRUQT_CLI_URI: ${{ vars.INSTRUQT_CLI_URI }} 19 | secrets: 20 | INSTRUQT_TOKEN: ${{ secrets.INSTRUQT_TOKEN }} 21 | SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} -------------------------------------------------------------------------------- /.github/workflows/deploy-nomad-and-csi-plugins-gcp.yml: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | name: hashicorp/field-workshops-nomad/deploy-nomad-and-csi-plugins-gcp 4 | on: 5 | workflow_dispatch: 6 | push: 7 | branches: 8 | - 'main' 9 | paths: 10 | - 'instruqt-tracks/nomad-and-csi-plugins-gcp/**' 11 | permissions: 12 | contents: read 13 | jobs: 14 | deploy-track: 15 | uses: ./.github/workflows/instruqt-track-deploy.yml 16 | with: 17 | working_directory: "instruqt-tracks/nomad-and-csi-plugins-gcp" 18 | INSTRUQT_CLI_URI: ${{ vars.INSTRUQT_CLI_URI }} 19 | secrets: 20 | INSTRUQT_TOKEN: ${{ secrets.INSTRUQT_TOKEN }} 21 | SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} -------------------------------------------------------------------------------- /.github/workflows/deploy-nomad-and-portworx.yml: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | name: hashicorp/field-workshops-nomad/deploy-nomad-and-portworx 4 | on: 5 | workflow_dispatch: 6 | push: 7 | branches: 8 | - 'main' 9 | paths: 10 | - 'instruqt-tracks/nomad-and-portworx/**' 11 | permissions: 12 | contents: read 13 | jobs: 14 | deploy-track: 15 | uses: ./.github/workflows/instruqt-track-deploy.yml 16 | with: 17 | working_directory: "instruqt-tracks/nomad-and-portworx" 18 | INSTRUQT_CLI_URI: ${{ vars.INSTRUQT_CLI_URI }} 19 | secrets: 20 | INSTRUQT_TOKEN: ${{ secrets.INSTRUQT_TOKEN }} 21 | SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} -------------------------------------------------------------------------------- /.github/workflows/deploy-nomad-basics.yml: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | name: hashicorp/field-workshops-nomad/deploy-nomad-basics 4 | on: 5 | workflow_dispatch: 6 | push: 7 | branches: 8 | - 'main' 9 | paths: 10 | - 'instruqt-tracks/nomad-basics/**' 11 | permissions: 12 | contents: read 13 | jobs: 14 | deploy-track: 15 | uses: ./.github/workflows/instruqt-track-deploy.yml 16 | with: 17 | working_directory: "instruqt-tracks/nomad-basics" 18 | INSTRUQT_CLI_URI: ${{ vars.INSTRUQT_CLI_URI }} 19 | secrets: 20 | INSTRUQT_TOKEN: ${{ secrets.INSTRUQT_TOKEN }} 21 | SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} -------------------------------------------------------------------------------- /.github/workflows/deploy-nomad-consul-connect.yml: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | name: hashicorp/field-workshops-nomad/deploy-nomad-consul-connect 4 | on: 5 | workflow_dispatch: 6 | push: 7 | branches: 8 | - 'main' 9 | paths: 10 | - 'instruqt-tracks/nomad-consul-connect/**' 11 | permissions: 12 | contents: read 13 | jobs: 14 | deploy-track: 15 | uses: ./.github/workflows/instruqt-track-deploy.yml 16 | with: 17 | working_directory: "instruqt-tracks/nomad-consul-connect" 18 | INSTRUQT_CLI_URI: ${{ vars.INSTRUQT_CLI_URI }} 19 | secrets: 20 | INSTRUQT_TOKEN: ${{ secrets.INSTRUQT_TOKEN }} 21 | SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} -------------------------------------------------------------------------------- /.github/workflows/deploy-nomad-governance.yml: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | name: hashicorp/field-workshops-nomad/deploy-nomad-governance 4 | on: 5 | workflow_dispatch: 6 | push: 7 | branches: 8 | - 'main' 9 | paths: 10 | - 'instruqt-tracks/nomad-governance/**' 11 | permissions: 12 | contents: read 13 | jobs: 14 | deploy-track: 15 | uses: ./.github/workflows/instruqt-track-deploy.yml 16 | with: 17 | working_directory: "instruqt-tracks/nomad-governance" 18 | INSTRUQT_CLI_URI: ${{ vars.INSTRUQT_CLI_URI }} 19 | secrets: 20 | INSTRUQT_TOKEN: ${{ secrets.INSTRUQT_TOKEN }} 21 | SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} -------------------------------------------------------------------------------- /.github/workflows/deploy-nomad-host-volumes.yml: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | name: hashicorp/field-workshops-nomad/deploy-nomad-host-volumes 4 | on: 5 | workflow_dispatch: 6 | push: 7 | branches: 8 | - 'main' 9 | paths: 10 | - 'instruqt-tracks/nomad-host-volumes/**' 11 | permissions: 12 | contents: read 13 | jobs: 14 | deploy-track: 15 | uses: ./.github/workflows/instruqt-track-deploy.yml 16 | with: 17 | working_directory: "instruqt-tracks/nomad-host-volumes" 18 | INSTRUQT_CLI_URI: ${{ vars.INSTRUQT_CLI_URI }} 19 | secrets: 20 | INSTRUQT_TOKEN: ${{ secrets.INSTRUQT_TOKEN }} 21 | SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} -------------------------------------------------------------------------------- /.github/workflows/deploy-nomad-integration-with-vault.yml: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | name: hashicorp/field-workshops-nomad/deploy-nomad-integration-with-vault 4 | on: 5 | workflow_dispatch: 6 | push: 7 | branches: 8 | - 'main' 9 | paths: 10 | - 'instruqt-tracks/nomad-integration-with-vault/**' 11 | permissions: 12 | contents: read 13 | jobs: 14 | deploy-track: 15 | uses: ./.github/workflows/instruqt-track-deploy.yml 16 | with: 17 | working_directory: "instruqt-tracks/nomad-integration-with-vault" 18 | INSTRUQT_CLI_URI: ${{ vars.INSTRUQT_CLI_URI }} 19 | secrets: 20 | INSTRUQT_TOKEN: ${{ secrets.INSTRUQT_TOKEN }} 21 | SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} -------------------------------------------------------------------------------- /.github/workflows/deploy-nomad-job-placement.yml: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | name: hashicorp/field-workshops-nomad/deploy-nomad-job-placement 4 | on: 5 | workflow_dispatch: 6 | push: 7 | branches: 8 | - 'main' 9 | paths: 10 | - 'instruqt-tracks/nomad-job-placement/**' 11 | permissions: 12 | contents: read 13 | jobs: 14 | deploy-track: 15 | uses: ./.github/workflows/instruqt-track-deploy.yml 16 | with: 17 | working_directory: "instruqt-tracks/nomad-job-placement" 18 | INSTRUQT_CLI_URI: ${{ vars.INSTRUQT_CLI_URI }} 19 | secrets: 20 | INSTRUQT_TOKEN: ${{ secrets.INSTRUQT_TOKEN }} 21 | SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} -------------------------------------------------------------------------------- /.github/workflows/deploy-nomad-monitoring.yml: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | name: hashicorp/field-workshops-nomad/deploy-nomad-monitoring 4 | on: 5 | workflow_dispatch: 6 | push: 7 | branches: 8 | - 'main' 9 | paths: 10 | - 'instruqt-tracks/nomad-monitoring/**' 11 | permissions: 12 | contents: read 13 | jobs: 14 | deploy-track: 15 | uses: ./.github/workflows/instruqt-track-deploy.yml 16 | with: 17 | working_directory: "instruqt-tracks/nomad-monitoring" 18 | INSTRUQT_CLI_URI: ${{ vars.INSTRUQT_CLI_URI }} 19 | secrets: 20 | INSTRUQT_TOKEN: ${{ secrets.INSTRUQT_TOKEN }} 21 | SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} -------------------------------------------------------------------------------- /.github/workflows/deploy-nomad-multi-region-federation.yml: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | name: hashicorp/field-workshops-nomad/deploy-nomad-multi-region-federation 4 | on: 5 | workflow_dispatch: 6 | push: 7 | branches: 8 | - 'main' 9 | paths: 10 | - 'instruqt-tracks/nomad-multi-region-federation/**' 11 | permissions: 12 | contents: read 13 | jobs: 14 | deploy-track: 15 | uses: ./.github/workflows/instruqt-track-deploy.yml 16 | with: 17 | working_directory: "instruqt-tracks/nomad-multi-region-federation" 18 | INSTRUQT_CLI_URI: ${{ vars.INSTRUQT_CLI_URI }} 19 | secrets: 20 | INSTRUQT_TOKEN: ${{ secrets.INSTRUQT_TOKEN }} 21 | SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} -------------------------------------------------------------------------------- /.github/workflows/deploy-nomad-multi-server-cluster.yml: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | name: hashicorp/field-workshops-nomad/deploy-nomad-multi-server-cluster 4 | on: 5 | workflow_dispatch: 6 | push: 7 | branches: 8 | - 'main' 9 | paths: 10 | - 'instruqt-tracks/nomad-multi-server-cluster/**' 11 | permissions: 12 | contents: read 13 | jobs: 14 | deploy-track: 15 | uses: ./.github/workflows/instruqt-track-deploy.yml 16 | with: 17 | working_directory: "instruqt-tracks/nomad-multi-server-cluster" 18 | INSTRUQT_CLI_URI: ${{ vars.INSTRUQT_CLI_URI }} 19 | secrets: 20 | INSTRUQT_TOKEN: ${{ secrets.INSTRUQT_TOKEN }} 21 | SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} -------------------------------------------------------------------------------- /.github/workflows/deploy-nomad-simple-cluster.yml: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | name: hashicorp/field-workshops-nomad/deploy-nomad-simple-cluster 4 | on: 5 | workflow_dispatch: 6 | push: 7 | branches: 8 | - 'main' 9 | paths: 10 | - 'instruqt-tracks/nomad-simple-cluster/**' 11 | permissions: 12 | contents: read 13 | jobs: 14 | deploy-track: 15 | uses: ./.github/workflows/instruqt-track-deploy.yml 16 | with: 17 | working_directory: "instruqt-tracks/nomad-simple-cluster" 18 | INSTRUQT_CLI_URI: ${{ vars.INSTRUQT_CLI_URI }} 19 | secrets: 20 | INSTRUQT_TOKEN: ${{ secrets.INSTRUQT_TOKEN }} 21 | SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} -------------------------------------------------------------------------------- /.github/workflows/deploy-nomad-update-strategies.yml: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | name: hashicorp/field-workshops-nomad/deploy-nomad-update-strategies 4 | on: 5 | workflow_dispatch: 6 | push: 7 | branches: 8 | - 'main' 9 | paths: 10 | - 'instruqt-tracks/nomad-update-strategies/**' 11 | permissions: 12 | contents: read 13 | jobs: 14 | deploy-track: 15 | uses: ./.github/workflows/instruqt-track-deploy.yml 16 | with: 17 | working_directory: "instruqt-tracks/nomad-update-strategies" 18 | INSTRUQT_CLI_URI: ${{ vars.INSTRUQT_CLI_URI }} 19 | secrets: 20 | INSTRUQT_TOKEN: ${{ secrets.INSTRUQT_TOKEN }} 21 | SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | node_modules -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @hashicorp/customer-design-labs 2 | * @hashicorp/cdl-workshop-contributors 3 | -------------------------------------------------------------------------------- /common/Makefile: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | 4 | # This should be symlinked into each individual track 5 | # directory - not useful to run it in the 'common' directory 6 | 7 | REPO_TOP=$(shell git rev-parse --show-toplevel) 8 | 9 | include ${REPO_TOP}/common/mk/core.mk 10 | 11 | -------------------------------------------------------------------------------- /common/bin/check-make-prereqs: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Copyright (c) HashiCorp, Inc. 3 | # SPDX-License-Identifier: MPL-2.0 4 | 5 | err=0 6 | 7 | which yq >/dev/null 2>&1 8 | if [ $? -ne 0 ]; then 9 | echo "You must install the 'yq' tool" 10 | echo "See https://mikefarah.gitbook.io/yq/" 11 | err=1 12 | fi 13 | 14 | if [ "${err}" -ne 0 ]; then 15 | exit 1 16 | fi 17 | -------------------------------------------------------------------------------- /common/bin/clean_id_and_checksums: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Copyright (c) HashiCorp, Inc. 3 | # SPDX-License-Identifier: MPL-2.0 4 | 5 | # When you upload a track to Instruqt, it 'helpfully' adds an 6 | # 'id:' and 'checksum:' field to your 'track.yml', and adds 7 | # 'id:' to all of your 'assignment.md' files, which it appears 8 | # to use internally to associate things. If you attempt to make 9 | # a test or alternate version of a track and leave these in, 10 | # Instruqt can get confused, ranging from complaining at you to 11 | # overwriting parts of the original track. But you can safely 12 | # delete these lines from those files, which is how we get around 13 | # this in general 14 | 15 | # Assumes being ran from make, in a track directory 16 | 17 | if [ ! -f track.yml ] || [ ! -f config.yml ]; then 18 | echo "This assumes it is being ran from a Makefile in the track directory" 19 | echo "You do not appear to be so" 20 | exit 1 21 | fi 22 | 23 | # Remove from 'track.yml' 24 | sed -i '' \ 25 | -e '/^id: .*/d' \ 26 | -e '/^checksum: .*/d' \ 27 | track.yml 28 | 29 | # For each 'assignment.md' in a sub-directory 30 | # 1. delete challenge id 31 | 32 | for f in */assignment.md; do 33 | sed -i '' \ 34 | -e '/^id: .*/d' \ 35 | "${f}" 36 | done 37 | -------------------------------------------------------------------------------- /common/bin/mk_help: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Copyright (c) HashiCorp, Inc. 3 | # SPDX-License-Identifier: MPL-2.0 4 | 5 | cat < alternate_track\` 11 | 12 | In the directory for a given track, do the right thing to prepare 13 | the track to be a alternate version: remove track and challenge IDs, 14 | and track checksum, so Instruqt doesn't get confused, and change the 15 | track title and slug so it is easy to distinguish it from production 16 | versions of tracks. 17 | 18 | Running this command and then performing an \`instruqt track push\` 19 | should Just Do The Right Thing(TM) 20 | 21 | 22 | * \`make clean_id_and_checksums\` 23 | 24 | In the directory for a given track, remove track and challenge IDs 25 | and track checksum. Useful for tidying before creating a PR to merge 26 | changes back in. Automatically done by \`alternate_track\` 27 | 28 | 29 | EOF 30 | -------------------------------------------------------------------------------- /common/mk/core.mk: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | 4 | # Variables which should be set on the command line 5 | jira := "" 6 | 7 | .PHONEY: help check_prereqs clean_id_and_checksums alternate_track 8 | 9 | help: check_prereqs 10 | ${REPO_TOP}/common/bin/mk_help 11 | 12 | check_prereqs: 13 | ${REPO_TOP}/common/bin/check-make-prereqs 14 | 15 | clean_id_and_checksums: check_prereqs 16 | ${REPO_TOP}/common/bin/clean_id_and_checksums 17 | 18 | alternate_track: clean_id_and_checksums 19 | ${REPO_TOP}/common/bin/alternative_track $(jira) 20 | -------------------------------------------------------------------------------- /docker/nomad-integration-with-vault/application/Dockerfile: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | 4 | FROM node:current-alpine 5 | LABEL author="Patrick Gryzan " 6 | LABEL description="This the web client piece of an example node.js application" 7 | 8 | RUN apk --no-cache add curl 9 | 10 | RUN mkdir -p /app/node_modules && chown -R node:node /app 11 | 12 | WORKDIR /app 13 | 14 | COPY package*.json ./ 15 | 16 | USER node 17 | 18 | RUN npm install 19 | 20 | COPY --chown=node:node . . 21 | 22 | EXPOSE 3000 23 | 24 | CMD ["npm", "start" ] -------------------------------------------------------------------------------- /docker/nomad-integration-with-vault/application/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright (c) HashiCorp, Inc. 3 | # SPDX-License-Identifier: MPL-2.0 4 | 5 | 6 | docker build ./ -t pgryzan/demo-web -------------------------------------------------------------------------------- /docker/nomad-integration-with-vault/application/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "web", 3 | "version": "0.0.0", 4 | "private": true, 5 | "scripts": { 6 | "start": "node ./bin/www" 7 | }, 8 | "dependencies": { 9 | "cookie-parser": "~1.4.4", 10 | "debug": "~2.6.9", 11 | "express": "~4.16.1", 12 | "hbs": "^4.1.0", 13 | "http-errors": "~1.6.3", 14 | "morgan": "~1.9.1", 15 | "pg": "^7.18.1" 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /docker/nomad-integration-with-vault/application/public/css/img/Consul_PrimaryLogo_FullColor.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/field-workshops-nomad/3a8240f3cf686b9874af1b36328caf190a541375/docker/nomad-integration-with-vault/application/public/css/img/Consul_PrimaryLogo_FullColor.png -------------------------------------------------------------------------------- /docker/nomad-integration-with-vault/application/public/css/img/HashiCorp_PrimaryLogo_Black.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/field-workshops-nomad/3a8240f3cf686b9874af1b36328caf190a541375/docker/nomad-integration-with-vault/application/public/css/img/HashiCorp_PrimaryLogo_Black.png -------------------------------------------------------------------------------- /docker/nomad-integration-with-vault/application/public/css/img/Nomad_PrimaryLogo_FullColor.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/field-workshops-nomad/3a8240f3cf686b9874af1b36328caf190a541375/docker/nomad-integration-with-vault/application/public/css/img/Nomad_PrimaryLogo_FullColor.png -------------------------------------------------------------------------------- /docker/nomad-integration-with-vault/application/public/css/img/Packer_PrimaryLogo_FullColor.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/field-workshops-nomad/3a8240f3cf686b9874af1b36328caf190a541375/docker/nomad-integration-with-vault/application/public/css/img/Packer_PrimaryLogo_FullColor.png -------------------------------------------------------------------------------- /docker/nomad-integration-with-vault/application/public/css/img/Terraform_PrimaryLogo_FullColor.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/field-workshops-nomad/3a8240f3cf686b9874af1b36328caf190a541375/docker/nomad-integration-with-vault/application/public/css/img/Terraform_PrimaryLogo_FullColor.png -------------------------------------------------------------------------------- /docker/nomad-integration-with-vault/application/public/css/img/Vagrant_PrimaryLogo_FullColor.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/field-workshops-nomad/3a8240f3cf686b9874af1b36328caf190a541375/docker/nomad-integration-with-vault/application/public/css/img/Vagrant_PrimaryLogo_FullColor.png -------------------------------------------------------------------------------- /docker/nomad-integration-with-vault/application/public/css/img/Vault_PrimaryLogo_FullColor.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/field-workshops-nomad/3a8240f3cf686b9874af1b36328caf190a541375/docker/nomad-integration-with-vault/application/public/css/img/Vault_PrimaryLogo_FullColor.png -------------------------------------------------------------------------------- /docker/nomad-integration-with-vault/application/public/js/app.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) HashiCorp, Inc. 3 | * SPDX-License-Identifier: MPL-2.0 4 | */ 5 | 6 | ////////////////////////////////////////////////////////////////////////////////////// 7 | // 8 | // file: app.js 9 | // author: Patrick Gryzan 10 | // date: 02/14/20 11 | // description: client side js code make the single page application (spa) come to life 12 | // 13 | ////////////////////////////////////////////////////////////////////////////////////// 14 | 15 | // view model 16 | function viewModel() { 17 | var self = this; 18 | 19 | self.members = ko.observableArray([]); 20 | self.url = '/api'; 21 | 22 | self.list = function() { 23 | $.ajax({ 24 | url: self.url, 25 | type: 'GET', 26 | success: function(result) { 27 | self.members(result); 28 | } 29 | }); 30 | }; 31 | 32 | self.list(); 33 | }; 34 | 35 | // doument has been loaded and ready bind the view model 36 | $(document).ready(function() { 37 | ko.applyBindings(new viewModel()); 38 | }); -------------------------------------------------------------------------------- /docker/nomad-integration-with-vault/application/routes/index.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) HashiCorp, Inc. 3 | * SPDX-License-Identifier: MPL-2.0 4 | */ 5 | 6 | var express = require('express'); 7 | var router = express.Router(); 8 | 9 | /* GET home page. */ 10 | router.get('/', function(req, res, next) { 11 | 12 | res.render('index', { year: new Date().getFullYear() }); 13 | }); 14 | 15 | module.exports = router; 16 | -------------------------------------------------------------------------------- /docker/nomad-integration-with-vault/application/views/error.hbs: -------------------------------------------------------------------------------- 1 | {{! 2 | Copyright (c) HashiCorp, Inc. 3 | SPDX-License-Identifier: MPL-2.0 4 | }} 5 | 6 |

{{message}}

7 |

{{error.status}}

8 |
{{error.stack}}
9 | -------------------------------------------------------------------------------- /docker/nomad-integration-with-vault/application/views/index.hbs: -------------------------------------------------------------------------------- 1 | {{! 2 | Copyright (c) HashiCorp, Inc. 3 | SPDX-License-Identifier: MPL-2.0 4 | }} 5 | 6 |
7 | 16 | 17 |
18 | 19 |
-------------------------------------------------------------------------------- /docker/nomad-integration-with-vault/database/Dockerfile: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | 4 | #################################################################################### 5 | # 6 | # file: /db/Dockerfile 7 | # author: Patrick Gryzan 8 | # date: 02/14/20 9 | # description: Dockerfile for the Postgre data used for the deom application. 10 | # It inherits frrom the latest Postgres container. 11 | # It sets the username, password and database name. 12 | # Default information is populated on startup from populate.sql. 13 | # 14 | #################################################################################### 15 | 16 | FROM postgres:latest 17 | LABEL author="Patrick Gryzan " 18 | LABEL description="This the database container of demo application" 19 | 20 | ENV POSTGRES_DB demo 21 | ENV POSTGRES_USER demo 22 | ENV POSTGRES_PASSWORD demo 23 | 24 | ADD populate.sql /docker-entrypoint-initdb.d/ 25 | 26 | EXPOSE 5432 -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | # Slides for Nomad Workshops 2 | Please do **NOT** place any slides or any other content in this diretory. Slides for Nomad workshops should be placed under the [slides](./slides) sub-directory of this directory. We only have this `docs` directory because it is required in order to publish the workshop slides with GitHub Pages. 3 | -------------------------------------------------------------------------------- /docs/slides/README.md: -------------------------------------------------------------------------------- 1 | # Slides for Nomad Workshops 2 | Slides for all Nomad workshops should be created using [Remark](https://remarkjs.com) and placed under this directory which is organized by cloud and then by workshop. If a workshop targets a single cloud, its slides should be placed in a directory under that cloud's directory (aws, azure, or gcp). If a workshop can be used with multiple clouds, its slides should be placed in a directory under the [multi-cloud](./multi-cloud) directory. 3 | 4 | Please be sure to always put all slides for any workshop in its own directory. 5 | -------------------------------------------------------------------------------- /docs/slides/aws/README.md: -------------------------------------------------------------------------------- 1 | # Slides for Nomad Workshops that Target AWS 2 | Slides for all Nomad workshops that target AWS should be created using [Remark](https://remarkjs.com) and placed under this directory. 3 | 4 | Slides that target more than one cloud should be placed under the [multi-cloud](../multi-cloud) directory. 5 | 6 | Please be sure to always put all slides for any workshop in its own directory. 7 | -------------------------------------------------------------------------------- /docs/slides/aws/index.md: -------------------------------------------------------------------------------- 1 | name: nomad-field-workshops-slides 2 | class: title, shelf, no-footer, fullbleed 3 | background-image: url(https://hashicorp.github.io/field-workshops-assets/assets/bkgs/HashiCorp-Title-bkg.jpeg) 4 | count: false 5 | 6 | 7 | # Nomad AWS Field Workshop Slides 8 | ## Slides for Nomad AWS field workshops 9 | 10 | ![:scale 15%](https://hashicorp.github.io/field-workshops-assets/assets/logos/logo_nomad.png) 11 | 12 | ??? 13 | This is a title slide of a Remark.js slide show. 14 | 15 | --- 16 | layout: true 17 | 18 | .footer[ 19 | - Copyright © 2021 HashiCorp 20 | - ![:scale 100%](https://hashicorp.github.io/field-workshops-assets/assets/logos/HashiCorp_Icon_Black.svg) 21 | ] 22 | 23 | --- 24 | name: nomad-workshops-slides 25 | # Nomad AWS Workshop Slides 26 | ### This directory contains slides for Nomad workshops intended for use with AWS. 27 | ### Workshops for each cloud are organized by type: 28 | 1. Nomad OSS 29 | 1. Nomad Enterprise 30 | 31 | ??? 32 | This is a regular slide of a Remark.js slide show. 33 | -------------------------------------------------------------------------------- /docs/slides/azure/README.md: -------------------------------------------------------------------------------- 1 | # Slides for Nomad Workshops that Target Azure 2 | Slides for all Nomad workshops that target Azure should be created using [Remark](https://remarkjs.com) and placed under this directory. 3 | 4 | Slides that target more than one cloud should be placed under the [multi-cloud](../multi-cloud) directory. 5 | 6 | Please be sure to always put all slides for any workshop in its own directory. 7 | -------------------------------------------------------------------------------- /docs/slides/azure/index.md: -------------------------------------------------------------------------------- 1 | name: nomad-field-workshops-slides 2 | class: title, shelf, no-footer, fullbleed 3 | background-image: url(https://hashicorp.github.io/field-workshops-assets/assets/bkgs/HashiCorp-Title-bkg.jpeg) 4 | count: false 5 | 6 | 7 | # Nomad Azure Field Workshop Slides 8 | ## Slides for Nomad Azure field workshops 9 | 10 | ![:scale 15%](https://hashicorp.github.io/field-workshops-assets/assets/logos/logo_nomad.png) 11 | 12 | ??? 13 | This is a title slide of a Remark.js slide show. 14 | 15 | --- 16 | layout: true 17 | 18 | .footer[ 19 | - Copyright © 2021 HashiCorp 20 | - ![:scale 100%](https://hashicorp.github.io/field-workshops-assets/assets/logos/HashiCorp_Icon_Black.svg) 21 | ] 22 | 23 | --- 24 | name: nomad-workshops-slides 25 | # Nomad Azure Workshop Slides 26 | ### This directory contains slides for Nomad workshops intended for use with Azure. 27 | ### Workshops for each cloud are organized by type: 28 | 1. Nomad OSS 29 | 1. Nomad Enterprise 30 | 31 | ??? 32 | This is a regular slide of a Remark.js slide show. 33 | -------------------------------------------------------------------------------- /docs/slides/gcp/README.md: -------------------------------------------------------------------------------- 1 | # Slides for Nomad Workshops that Target GCP 2 | Slides for all Nomad workshops that target GCP should be created using [Remark](https://remarkjs.com) and placed under this directory. 3 | 4 | Slides that target more than one cloud should be placed under the [multi-cloud](../multi-cloud) directory. 5 | 6 | Please be sure to always put all slides for any workshop in its own directory. 7 | -------------------------------------------------------------------------------- /docs/slides/gcp/index.md: -------------------------------------------------------------------------------- 1 | name: nomad-field-workshops-slides 2 | class: title, shelf, no-footer, fullbleed 3 | background-image: url(https://hashicorp.github.io/field-workshops-assets/assets/bkgs/HashiCorp-Title-bkg.jpeg) 4 | count: false 5 | 6 | 7 | # Nomad GCP Field Workshop Slides 8 | ## Slides for Nomad GCP field workshops 9 | 10 | ![:scale 15%](https://hashicorp.github.io/field-workshops-assets/assets/logos/logo_nomad.png) 11 | 12 | ??? 13 | This is a title slide of a Remark.js slide show. 14 | 15 | --- 16 | layout: true 17 | 18 | .footer[ 19 | - Copyright © 2021 HashiCorp 20 | - ![:scale 100%](https://hashicorp.github.io/field-workshops-assets/assets/logos/HashiCorp_Icon_Black.svg) 21 | ] 22 | 23 | --- 24 | name: nomad-workshops-slides 25 | # Nomad GCP Workshop Slides 26 | ### This directory contains slides for Nomad workshops intended for use with GCP. 27 | ### Workshops for each cloud are organized by type: 28 | 1. Nomad OSS 29 | 1. Nomad Enterprise 30 | 31 | ??? 32 | This is a regular slide of a Remark.js slide show. 33 | -------------------------------------------------------------------------------- /docs/slides/multi-cloud/advanced-nomad/images/Demote-Old-Servers.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/field-workshops-nomad/3a8240f3cf686b9874af1b36328caf190a541375/docs/slides/multi-cloud/advanced-nomad/images/Demote-Old-Servers.png -------------------------------------------------------------------------------- /docs/slides/multi-cloud/advanced-nomad/images/Introduce-New-Servers.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/field-workshops-nomad/3a8240f3cf686b9874af1b36328caf190a541375/docs/slides/multi-cloud/advanced-nomad/images/Introduce-New-Servers.png -------------------------------------------------------------------------------- /docs/slides/multi-cloud/advanced-nomad/images/Nomad-Default-Namespace.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/field-workshops-nomad/3a8240f3cf686b9874af1b36328caf190a541375/docs/slides/multi-cloud/advanced-nomad/images/Nomad-Default-Namespace.png -------------------------------------------------------------------------------- /docs/slides/multi-cloud/advanced-nomad/images/Nomad-Federation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/field-workshops-nomad/3a8240f3cf686b9874af1b36328caf190a541375/docs/slides/multi-cloud/advanced-nomad/images/Nomad-Federation.png -------------------------------------------------------------------------------- /docs/slides/multi-cloud/advanced-nomad/images/Nomad-QA-Namespace.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/field-workshops-nomad/3a8240f3cf686b9874af1b36328caf190a541375/docs/slides/multi-cloud/advanced-nomad/images/Nomad-QA-Namespace.png -------------------------------------------------------------------------------- /docs/slides/multi-cloud/advanced-nomad/images/Nomad-Workload-Types.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/field-workshops-nomad/3a8240f3cf686b9874af1b36328caf190a541375/docs/slides/multi-cloud/advanced-nomad/images/Nomad-Workload-Types.png -------------------------------------------------------------------------------- /docs/slides/multi-cloud/advanced-nomad/images/NomadServersFrom2Clusters.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/field-workshops-nomad/3a8240f3cf686b9874af1b36328caf190a541375/docs/slides/multi-cloud/advanced-nomad/images/NomadServersFrom2Clusters.png -------------------------------------------------------------------------------- /docs/slides/multi-cloud/advanced-nomad/images/Nomad_Business_Value.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/field-workshops-nomad/3a8240f3cf686b9874af1b36328caf190a541375/docs/slides/multi-cloud/advanced-nomad/images/Nomad_Business_Value.png -------------------------------------------------------------------------------- /docs/slides/multi-cloud/advanced-nomad/images/Nomad_HashiStack_Velocity.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/field-workshops-nomad/3a8240f3cf686b9874af1b36328caf190a541375/docs/slides/multi-cloud/advanced-nomad/images/Nomad_HashiStack_Velocity.png -------------------------------------------------------------------------------- /docs/slides/multi-cloud/advanced-nomad/images/Nomad_Use_Cases.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/field-workshops-nomad/3a8240f3cf686b9874af1b36328caf190a541375/docs/slides/multi-cloud/advanced-nomad/images/Nomad_Use_Cases.png -------------------------------------------------------------------------------- /docs/slides/multi-cloud/advanced-nomad/images/Upgrade-Start.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/field-workshops-nomad/3a8240f3cf686b9874af1b36328caf190a541375/docs/slides/multi-cloud/advanced-nomad/images/Upgrade-Start.png -------------------------------------------------------------------------------- /docs/slides/multi-cloud/advanced-nomad/images/Upgraded-Servers.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/field-workshops-nomad/3a8240f3cf686b9874af1b36328caf190a541375/docs/slides/multi-cloud/advanced-nomad/images/Upgraded-Servers.png -------------------------------------------------------------------------------- /docs/slides/multi-cloud/advanced-nomad/images/acl-overview.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/field-workshops-nomad/3a8240f3cf686b9874af1b36328caf190a541375/docs/slides/multi-cloud/advanced-nomad/images/acl-overview.jpg -------------------------------------------------------------------------------- /docs/slides/multi-cloud/advanced-nomad/images/nomad-multi-region.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/field-workshops-nomad/3a8240f3cf686b9874af1b36328caf190a541375/docs/slides/multi-cloud/advanced-nomad/images/nomad-multi-region.png -------------------------------------------------------------------------------- /docs/slides/multi-cloud/index.md: -------------------------------------------------------------------------------- 1 | name: nomad-field-workshops-slides 2 | class: title, shelf, no-footer, fullbleed 3 | background-image: url(https://hashicorp.github.io/field-workshops-assets/assets/bkgs/HashiCorp-Title-bkg.jpeg) 4 | count: false 5 | 6 | 7 | # Nomad Multi-Cloud Field Workshop 8 | ## Slides for Nomad multi-cloud field workshops 9 | 10 | ![:scale 15%](https://hashicorp.github.io/field-workshops-assets/assets/logos/logo_nomad.png) 11 | 12 | ??? 13 | This is a title slide of a Remark.js slide show. 14 | 15 | --- 16 | layout: true 17 | 18 | .footer[ 19 | - Copyright © 2021 HashiCorp 20 | - ![:scale 100%](https://hashicorp.github.io/field-workshops-assets/assets/logos/HashiCorp_Icon_Black.svg) 21 | ] 22 | 23 | --- 24 | name: nomad-workshops-slides 25 | # Nomad Multi-Cloud Workshop Slides 26 | ### This directory contains slides for Nomad workshops intended for use with multiple clouds. 27 | ### Workshops for each cloud are organized by type: 28 | 1. Nomad OSS 29 | 1. Nomad Enterprise 30 | 31 | ??? 32 | This is a regular slide of a Remark.js slide show. 33 | -------------------------------------------------------------------------------- /docs/slides/multi-cloud/nomad-oss/images/AddSystemAlert1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/field-workshops-nomad/3a8240f3cf686b9874af1b36328caf190a541375/docs/slides/multi-cloud/nomad-oss/images/AddSystemAlert1.png -------------------------------------------------------------------------------- /docs/slides/multi-cloud/nomad-oss/images/AddSystemAlert2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/field-workshops-nomad/3a8240f3cf686b9874af1b36328caf190a541375/docs/slides/multi-cloud/nomad-oss/images/AddSystemAlert2.png -------------------------------------------------------------------------------- /docs/slides/multi-cloud/nomad-oss/images/Evaluation_Queue.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/field-workshops-nomad/3a8240f3cf686b9874af1b36328caf190a541375/docs/slides/multi-cloud/nomad-oss/images/Evaluation_Queue.png -------------------------------------------------------------------------------- /docs/slides/multi-cloud/nomad-oss/images/EvictBusinessAlert1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/field-workshops-nomad/3a8240f3cf686b9874af1b36328caf190a541375/docs/slides/multi-cloud/nomad-oss/images/EvictBusinessAlert1.png -------------------------------------------------------------------------------- /docs/slides/multi-cloud/nomad-oss/images/EvictBusinessAlert2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/field-workshops-nomad/3a8240f3cf686b9874af1b36328caf190a541375/docs/slides/multi-cloud/nomad-oss/images/EvictBusinessAlert2.png -------------------------------------------------------------------------------- /docs/slides/multi-cloud/nomad-oss/images/Failed-Region.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/field-workshops-nomad/3a8240f3cf686b9874af1b36328caf190a541375/docs/slides/multi-cloud/nomad-oss/images/Failed-Region.png -------------------------------------------------------------------------------- /docs/slides/multi-cloud/nomad-oss/images/FullCluster.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/field-workshops-nomad/3a8240f3cf686b9874af1b36328caf190a541375/docs/slides/multi-cloud/nomad-oss/images/FullCluster.png -------------------------------------------------------------------------------- /docs/slides/multi-cloud/nomad-oss/images/Multi-Region.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/field-workshops-nomad/3a8240f3cf686b9874af1b36328caf190a541375/docs/slides/multi-cloud/nomad-oss/images/Multi-Region.png -------------------------------------------------------------------------------- /docs/slides/multi-cloud/nomad-oss/images/Nomad-Federation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/field-workshops-nomad/3a8240f3cf686b9874af1b36328caf190a541375/docs/slides/multi-cloud/nomad-oss/images/Nomad-Federation.png -------------------------------------------------------------------------------- /docs/slides/multi-cloud/nomad-oss/images/Nomad-Job-with-Deployments.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/field-workshops-nomad/3a8240f3cf686b9874af1b36328caf190a541375/docs/slides/multi-cloud/nomad-oss/images/Nomad-Job-with-Deployments.png -------------------------------------------------------------------------------- /docs/slides/multi-cloud/nomad-oss/images/Nomad-Job-with-task-groups-allocations.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/field-workshops-nomad/3a8240f3cf686b9874af1b36328caf190a541375/docs/slides/multi-cloud/nomad-oss/images/Nomad-Job-with-task-groups-allocations.png -------------------------------------------------------------------------------- /docs/slides/multi-cloud/nomad-oss/images/Nomad-Job_Status-Highlevel.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/field-workshops-nomad/3a8240f3cf686b9874af1b36328caf190a541375/docs/slides/multi-cloud/nomad-oss/images/Nomad-Job_Status-Highlevel.png -------------------------------------------------------------------------------- /docs/slides/multi-cloud/nomad-oss/images/Nomad-Workload-Types.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/field-workshops-nomad/3a8240f3cf686b9874af1b36328caf190a541375/docs/slides/multi-cloud/nomad-oss/images/Nomad-Workload-Types.png -------------------------------------------------------------------------------- /docs/slides/multi-cloud/nomad-oss/images/Nomad-task-group-status-and-task.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/field-workshops-nomad/3a8240f3cf686b9874af1b36328caf190a541375/docs/slides/multi-cloud/nomad-oss/images/Nomad-task-group-status-and-task.png -------------------------------------------------------------------------------- /docs/slides/multi-cloud/nomad-oss/images/Nomad-task-group.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/field-workshops-nomad/3a8240f3cf686b9874af1b36328caf190a541375/docs/slides/multi-cloud/nomad-oss/images/Nomad-task-group.png -------------------------------------------------------------------------------- /docs/slides/multi-cloud/nomad-oss/images/Nomad-task-logs.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/field-workshops-nomad/3a8240f3cf686b9874af1b36328caf190a541375/docs/slides/multi-cloud/nomad-oss/images/Nomad-task-logs.png -------------------------------------------------------------------------------- /docs/slides/multi-cloud/nomad-oss/images/Nomad-task.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/field-workshops-nomad/3a8240f3cf686b9874af1b36328caf190a541375/docs/slides/multi-cloud/nomad-oss/images/Nomad-task.png -------------------------------------------------------------------------------- /docs/slides/multi-cloud/nomad-oss/images/NomadRegion.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/field-workshops-nomad/3a8240f3cf686b9874af1b36328caf190a541375/docs/slides/multi-cloud/nomad-oss/images/NomadRegion.png -------------------------------------------------------------------------------- /docs/slides/multi-cloud/nomad-oss/images/Nomad_Business_Value.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/field-workshops-nomad/3a8240f3cf686b9874af1b36328caf190a541375/docs/slides/multi-cloud/nomad-oss/images/Nomad_Business_Value.png -------------------------------------------------------------------------------- /docs/slides/multi-cloud/nomad-oss/images/Nomad_Evaluation_Kickoff.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/field-workshops-nomad/3a8240f3cf686b9874af1b36328caf190a541375/docs/slides/multi-cloud/nomad-oss/images/Nomad_Evaluation_Kickoff.png -------------------------------------------------------------------------------- /docs/slides/multi-cloud/nomad-oss/images/Nomad_HashiStack_Velocity.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/field-workshops-nomad/3a8240f3cf686b9874af1b36328caf190a541375/docs/slides/multi-cloud/nomad-oss/images/Nomad_HashiStack_Velocity.png -------------------------------------------------------------------------------- /docs/slides/multi-cloud/nomad-oss/images/Nomad_Overall_Flow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/field-workshops-nomad/3a8240f3cf686b9874af1b36328caf190a541375/docs/slides/multi-cloud/nomad-oss/images/Nomad_Overall_Flow.png -------------------------------------------------------------------------------- /docs/slides/multi-cloud/nomad-oss/images/Nomad_Simple_Cluster_Topology.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/field-workshops-nomad/3a8240f3cf686b9874af1b36328caf190a541375/docs/slides/multi-cloud/nomad-oss/images/Nomad_Simple_Cluster_Topology.png -------------------------------------------------------------------------------- /docs/slides/multi-cloud/nomad-oss/images/Nomad_Use_Cases.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/field-workshops-nomad/3a8240f3cf686b9874af1b36328caf190a541375/docs/slides/multi-cloud/nomad-oss/images/Nomad_Use_Cases.png -------------------------------------------------------------------------------- /docs/slides/multi-cloud/nomad-oss/images/Nomad_eval_alloc.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/field-workshops-nomad/3a8240f3cf686b9874af1b36328caf190a541375/docs/slides/multi-cloud/nomad-oss/images/Nomad_eval_alloc.png -------------------------------------------------------------------------------- /docs/slides/multi-cloud/nomad-oss/images/Queue_Processing.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/field-workshops-nomad/3a8240f3cf686b9874af1b36328caf190a541375/docs/slides/multi-cloud/nomad-oss/images/Queue_Processing.png -------------------------------------------------------------------------------- /docs/slides/multi-cloud/nomad-oss/images/ServerElection.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/field-workshops-nomad/3a8240f3cf686b9874af1b36328caf190a541375/docs/slides/multi-cloud/nomad-oss/images/ServerElection.png -------------------------------------------------------------------------------- /docs/slides/multi-cloud/nomad-oss/images/SystemAlerting.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/field-workshops-nomad/3a8240f3cf686b9874af1b36328caf190a541375/docs/slides/multi-cloud/nomad-oss/images/SystemAlerting.png -------------------------------------------------------------------------------- /docs/slides/multi-cloud/nomad-oss/images/nomad-architecture-region.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/field-workshops-nomad/3a8240f3cf686b9874af1b36328caf190a541375/docs/slides/multi-cloud/nomad-oss/images/nomad-architecture-region.png -------------------------------------------------------------------------------- /instructor-guides/README.md: -------------------------------------------------------------------------------- 1 | # Instructor Guides for Nomad Workshops 2 | Instructor Guides for all Nomad workshops should be created as Markdown files and placed in this directory. They should have names like `__INSTRUCTOR_GUIDE.md` where `` is the cloud the workshop targets and `` is the name of the workshop. But if the workshop is intended for use with multiple clouds, `` should be omitted. 3 | 4 | Please do not use sub-directories for instructor guides. 5 | -------------------------------------------------------------------------------- /instructor-guides/images/2001_phone.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hashicorp/field-workshops-nomad/3a8240f3cf686b9874af1b36328caf190a541375/instructor-guides/images/2001_phone.jpg -------------------------------------------------------------------------------- /instruqt-tracks/nomad-acls/Makefile: -------------------------------------------------------------------------------- 1 | ../../common/Makefile -------------------------------------------------------------------------------- /instruqt-tracks/nomad-acls/bootstrap-acls/check-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "Secret" /root/nomad/acl_token.txt || fail-message "Have you created the bootstrap token file acl_token.txt?." 6 | 7 | grep -q "export\s\+NOMAD_TOKEN=" /root/.bash_history || fail-message "Have you exported your Secret ID in the console" 8 | 9 | nomad server members | grep -q server1.global || fail-message "Have you applied the anonymous policy yet?" 10 | 11 | grep -q "nomad\s\+server\s\+members" /root/.bash_history || fail-message "You haven't checked the status of your Nomad servers yet." 12 | 13 | grep -q "nomad\s\+node\s\+status" /root/.bash_history || fail-message "You haven't checked the status of your Nomad client nodes yet." 14 | 15 | nomad_servers=$(nomad server members | grep alive | wc -l) 16 | if [ $nomad_servers -ne 3 ]; then 17 | fail-message "There are not 3 running Nomad servers, or the annoymous policy is not configured after enabling Nomad ACLs." 18 | fi 19 | 20 | nomad_clients=$(nomad node status | grep ready | wc -l) 21 | if [ $nomad_clients -ne 2 ]; then 22 | fail-message "There are not 2 running Nomad clients, or the annoymous policy is not configured after enabling Nomad ACLs." 23 | fi 24 | 25 | exit 0 26 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-acls/bootstrap-acls/setup-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | mkdir /root/nomad 3 | cat <<-EOF > /root/nomad/anonymous.json 4 | { 5 | "Name": "anonymous", 6 | "Description": "Allow read-only access for anonymous requests", 7 | "Rules": " 8 | namespace \"default\" { 9 | policy = \"read\" 10 | } 11 | agent { 12 | policy = \"read\" 13 | } 14 | node { 15 | policy = \"read\" 16 | } 17 | " 18 | } 19 | EOF 20 | 21 | #rm -rf /root/.bash_history 22 | echo "Bootstrap ACLs" > /root/.bash_history 23 | 24 | exit 0 25 | 26 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-acls/bootstrap-acls/solve-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=~/.bash_history 5 | set -o history 6 | 7 | #Work Through Nomad 8 | 9 | cd /root/nomad 10 | nomad acl bootstrap > /root/nomad/acl_token.txt 11 | 12 | export NOMAD_TOKEN=`cat /root/nomad/acl_token.txt | grep Secret | awk '{print $4}'` 13 | 14 | curl --request POST --data @anonymous.json -H "X-Nomad-Token: $NOMAD_TOKEN" http://localhost:4646/v1/acl/policy/anonymous 15 | 16 | curl http://localhost:4646/v1/jobs 17 | 18 | nomad server members 19 | 20 | nomad node status 21 | 22 | exit 0 23 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-acls/config.yml: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | 4 | version: "2" 5 | virtualmachines: 6 | - name: nomad-server-1 7 | image: instruqt-hashicorp/hashistack-2004-0-13-1 8 | shell: /bin/bash -l 9 | machine_type: n1-standard-1 10 | - name: nomad-server-2 11 | image: instruqt-hashicorp/hashistack-2004-0-13-1 12 | shell: /bin/bash -l 13 | machine_type: n1-standard-1 14 | - name: nomad-server-3 15 | image: instruqt-hashicorp/hashistack-2004-0-13-1 16 | shell: /bin/bash -l 17 | machine_type: n1-standard-1 18 | - name: nomad-client-1 19 | image: instruqt-hashicorp/hashistack-2004-0-13-1 20 | shell: /bin/bash -l 21 | machine_type: n1-standard-1 22 | - name: nomad-client-2 23 | image: instruqt-hashicorp/hashistack-2004-0-13-1 24 | shell: /bin/bash -l 25 | machine_type: n1-standard-1 26 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-acls/configure-client-acls/check-nomad-client-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "acl" /etc/nomad.d/client1.hcl || fail-message "You have not added the 'acl' stanza to the client1.hcl file on Client 1 yet." 6 | 7 | grep -q "systemctl\s\+restart\s\+nomad" /root/.bash_history || fail-message "You haven't restarted your Client 1 yet." 8 | 9 | grep -q "nomad\s\+node\s\+status" /root/.bash_history || fail-message "You haven't run 'nomad node status' on Client 1 yet." 10 | 11 | grep -q "ps\s\+-ef\s\+|\s\+grep\s\+nomad" /root/.bash_history || fail-message "You haven't performed the process check of Client 1 yet." 12 | 13 | exit 0 14 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-acls/configure-client-acls/check-nomad-client-2: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "acl" /etc/nomad.d/client2.hcl || fail-message "You have not added the 'acl' stanza to the client2.hcl file on Client 2 yet." 6 | 7 | grep -q "systemctl\s\+restart\s\+nomad" /root/.bash_history || fail-message "You haven't restarted Client 2 yet." 8 | 9 | grep -q "ps\s\+-ef\s\+|\s\+grep\s\+nomad" /root/.bash_history || fail-message "You haven't performed the process check of Client 2 yet." 10 | 11 | exit 0 12 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-acls/configure-client-acls/setup-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | echo "Configure Client ACLs" > /root/.bash_history 4 | 5 | exit 0 6 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-acls/configure-client-acls/solve-nomad-client-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=~/.bash_history 5 | set -o history 6 | 7 | # Check the ACL Enabled 8 | cat <> /etc/nomad.d/client1.hcl 9 | 10 | # ACL Enabled 11 | acl { 12 | enabled = true 13 | } 14 | EOF 15 | 16 | # Restart Nomad 17 | systemctl restart nomad 18 | 19 | sleep 15 20 | 21 | # Run `nomad node status` 22 | nomad node status 23 | 24 | # Check status with ps -ef 25 | ps -ef | grep nomad 26 | 27 | exit 0 28 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-acls/configure-client-acls/solve-nomad-client-2: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=~/.bash_history 5 | set -o history 6 | 7 | # Check the ACL Enabled 8 | cat <> /etc/nomad.d/client2.hcl 9 | 10 | # ACL Enabled 11 | acl { 12 | enabled = true 13 | } 14 | EOF 15 | 16 | # Restart Nomad 17 | systemctl restart nomad 18 | 19 | sleep 15 20 | 21 | # Run `nomad node status` 22 | nomad node status 23 | 24 | # Check status with ps -ef 25 | ps -ef | grep nomad 26 | 27 | exit 0 28 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-acls/configure-server-acls/check-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "acl" /etc/nomad.d/server1.hcl || fail-message "You have not added the 'acl' stanza to /etc/nomad.d/server1.hcl on Server 1 yet." 6 | 7 | grep -q "authoritative_region" /etc/nomad.d/server1.hcl || fail-message "You have not set the 'authoritative_region' in the server1.hcl file on Server 1 yet." 8 | 9 | grep -q "systemctl\s\+restart\s\+nomad" /root/.bash_history || fail-message "You haven't restarted Server 1 yet." 10 | 11 | grep -q "nomad\s\+server\s\+members" /root/.bash_history || fail-message "You haven't run 'nomad server members' on Server 1 yet." 12 | 13 | grep -q "ps\s\+-ef\s\+|\s\+grep\s\+nomad" /root/.bash_history || fail-message "You haven't performed the process check of Server 1 yet." 14 | 15 | exit 0 16 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-acls/configure-server-acls/check-nomad-server-2: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "acl" /etc/nomad.d/server2.hcl || fail-message "You have not added the 'acl' stanza to /etc/nomad.d/server2.hcl on Server 2 yet." 6 | 7 | grep -q "authoritative_region" /etc/nomad.d/server2.hcl || fail-message "You have not set the 'authoritative_region' in the server2.hcl file on Server 2 yet." 8 | 9 | grep -q "systemctl\s\+restart\s\+nomad" /root/.bash_history || fail-message "You haven't restarted Server 2 yet." 10 | 11 | grep -q "ps\s\+-ef\s\+|\s\+grep\s\+nomad" /root/.bash_history || fail-message "You haven't performed the process check of Server 2 yet." 12 | 13 | exit 0 14 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-acls/configure-server-acls/check-nomad-server-3: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "acl" /etc/nomad.d/server3.hcl || fail-message "You have not added the 'acl' stanza to /etc/nomad.d/server3.hcl on Server 3 yet." 6 | 7 | grep -q "authoritative_region" /etc/nomad.d/server3.hcl || fail-message "You have not set 'authoritative_region' in the server3.hcl file on Server 3 yet." 8 | 9 | grep -q "systemctl\s\+restart\s\+nomad" /root/.bash_history || fail-message "You haven't restarted Server 3 yet." 10 | 11 | grep -q "ps\s\+-ef\s\+|\s\+grep\s\+nomad" /root/.bash_history || fail-message "You haven't performed the process check of Server 3 yet." 12 | 13 | exit 0 14 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-acls/configure-server-acls/setup-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | echo "Configure Server ACLs" > /root/.bash_history 4 | 5 | exit 0 6 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-acls/configure-server-acls/solve-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=~/.bash_history 5 | set -o history 6 | 7 | # Check the ACL Enabled 8 | cat <> /etc/nomad.d/server1.hcl 9 | 10 | # ACL Enabled 11 | 12 | acl { 13 | enabled = true 14 | } 15 | EOF 16 | 17 | # Check Authoritive region 18 | ex -s -c '10i| authoritative_region = "global"' -c x /etc/nomad.d/server1.hcl 19 | 20 | 21 | # Restart Nomad Agent 22 | systemctl restart nomad 23 | 24 | sleep 15 25 | 26 | # Check Nomad Members 27 | nomad server members 28 | 29 | # Check the Nomad nodes 30 | nomad node status 31 | 32 | # Check nomad agent Running 33 | ps -ef | grep nomad 34 | 35 | exit 0 36 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-acls/configure-server-acls/solve-nomad-server-2: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=~/.bash_history 5 | set -o history 6 | 7 | # Check the ACL Enabled 8 | cat <> /etc/nomad.d/server2.hcl 9 | 10 | # ACL Enabled 11 | 12 | acl { 13 | enabled = true 14 | } 15 | EOF 16 | 17 | # Check Authoritive region 18 | ex -s -c '10i| authoritative_region = "global"' -c x /etc/nomad.d/server2.hcl 19 | 20 | # Restart Nomad Agent 21 | systemctl restart nomad 22 | 23 | sleep 15 24 | 25 | # Check Nomad Members 26 | nomad server members 27 | 28 | # Check the Nomad nodes 29 | nomad node status 30 | 31 | # Check nomad agent Running 32 | ps -ef | grep nomad 33 | 34 | exit 0 35 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-acls/configure-server-acls/solve-nomad-server-3: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=~/.bash_history 5 | set -o history 6 | 7 | # Check the ACL Enabled 8 | cat <> /etc/nomad.d/server3.hcl 9 | 10 | # ACL Enabled 11 | 12 | acl { 13 | enabled = true 14 | } 15 | EOF 16 | 17 | # Check Authoritive region 18 | ex -s -c '10i| authoritative_region = "global"' -c x /etc/nomad.d/server3.hcl 19 | 20 | # Restart Nomad Agent 21 | systemctl restart nomad 22 | 23 | sleep 15 24 | 25 | # Check Nomad Members 26 | nomad server members 27 | 28 | # Check the Nomad nodes 29 | nomad node status 30 | 31 | # Check nomad agent Running 32 | ps -ef | grep nomad 33 | 34 | exit 0 35 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-acls/run-the-servers-and-clients/check-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "nomad\s\+server\s\+members" /root/.bash_history || fail-message "You haven't checked the status of your Nomad servers yet." 6 | 7 | grep -q "nomad\s\+node\s\+status" /root/.bash_history || fail-message "You haven't checked the status of your Nomad client nodes yet." 8 | 9 | grep -q "nomad\s\+status" /root/.bash_history || fail-message "You haven't checked the status of your Nomad jobs yet." 10 | 11 | nomad_servers=$(nomad server members | grep alive | wc -l) 12 | if [ $nomad_servers -ne 3 ]; then 13 | fail-message "There are not 3 running Nomad servers." 14 | fi 15 | 16 | nomad_clients=$(nomad node status | grep ready | wc -l) 17 | if [ $nomad_clients -ne 2 ]; then 18 | fail-message "There are not 2 running Nomad clients." 19 | fi 20 | 21 | exit 0 22 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-acls/run-the-servers-and-clients/solve-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=~/.bash_history 5 | set -o history 6 | 7 | # Check the Nomad cluster members 8 | nomad server members 9 | 10 | # Check the Nomad nodes 11 | nomad node status 12 | 13 | # Check the Nomad jobs 14 | nomad status 15 | 16 | exit 0 17 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-acls/use-acls/check-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "export\s\+NOMAD_TOKEN" /root/.bash_history || fail-message "You haven't exported the admin token on Server 1 yet." 6 | 7 | grep -q "nomad\s\+acl\s\+policy\s\+delete\s\+anonymous" /root/.bash_history || fail-message "You haven't deleted the anonymous policy on Server 1 yet." 8 | 9 | grep -q "nomad\s\+acl\s\+policy\s\+apply .* devRO\s\+dev_policy.hcl" /root/.bash_history || fail-message "You haven't created the devRO ACL policy on Server 1 yet." 10 | 11 | grep -q "nomad\s\+acl\s\+policy\s\+apply .* opsRW\s\+ops_policy.hcl" /root/.bash_history || fail-message "You haven't created the opsRW ACL policy on Server 1 yet." 12 | 13 | grep -q "nomad\s\+acl\s\+policy\s\+list" /root/.bash_history || fail-message "You haven't listed your ACL policies on Server 1 yet." 14 | 15 | grep -q "nomad\s\+acl\s\+token\s\+create .*Dev\s\+RO.*" /root/.bash_history || fail-message "You haven't created the Dev RO token on Server 1 yet." 16 | 17 | grep -q "nomad\s\+acl\s\+token\s\+create .*Ops RW.*" /root/.bash_history || fail-message "You haven't created the Ops RW token on Server 1 yet." 18 | 19 | grep -q "nomad\s\+acl\s\+token\s\+list" /root/.bash_history || fail-message "You haven't listed your ACL tokens on Server 1 yet." 20 | 21 | exit 0 22 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-acls/use-acls/check-nomad-server-2: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "nomad\s\+job\s\+status" /root/.bash_history || fail-message "You haven't run 'nomad job status' on Server 2 yet." 6 | 7 | grep -q "nomad\s\+agent-info" /root/.bash_history || fail-message "You haven't run 'nomad agent-info' on Server 2 yet." 8 | 9 | grep -q "export\s\+NOMAD_TOKEN" /root/.bash_history || fail-message "You haven't exported the admin token on Server 2 yet." 10 | 11 | 12 | exit 0 13 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-acls/use-acls/check-nomad-server-3: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "nomad\s\+job\s\+status" /root/.bash_history || fail-message "You haven't run 'nomad job status' on Server 3 yet." 6 | 7 | grep -q "export\s\+NOMAD_TOKEN" /root/.bash_history || fail-message "You haven't exported the admin token on Server 3 yet." 8 | 9 | grep -q "nomad\s\+job\s\+stop\s\+redis" /root/.bash_history || fail-message "You haven't run 'nomad job stop' on Server 3 yet." 10 | 11 | grep -q "nomad\s\+job\s\+run\s\+redis.nomad" /root/.bash_history || fail-message "You haven't run 'nomad job stop' on Server 3 yet." 12 | 13 | exit 0 14 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-acls/use-acls/setup-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | cat <<-EOF > /root/nomad/dev_policy.hcl 4 | namespace "default" { 5 | policy = "read" 6 | capabilities = ["list-jobs", "read-job"] 7 | } 8 | EOF 9 | 10 | cat <<-EOF > /root/nomad/ops_policy.hcl 11 | namespace "default" { 12 | policy = "write" 13 | capabilities = ["list-jobs", "read-job", "submit-job", "dispatch-job", "read-logs", "read-fs", "alloc-exec", "alloc-lifecycle"] 14 | } 15 | EOF 16 | 17 | # Clear Bash History 18 | echo "Use ACLs" > /root/.bash_history 19 | 20 | exit 0 21 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-acls/use-acls/solve-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=~/.bash_history 5 | set -o history 6 | 7 | cd /root/nomad 8 | 9 | export NOMAD_TOKEN=`cat /root/nomad/acl_token.txt | grep Secret | awk '{print $4}'` 10 | 11 | nomad acl policy delete anonymous 12 | 13 | nomad acl policy apply -description "Dev Read Only" devRO dev_policy.hcl 14 | 15 | nomad acl policy apply -description "OPS Read write" opsRW ops_policy.hcl 16 | 17 | nomad acl policy list 18 | 19 | nomad acl token create -name="Dev RO" -type="client" -policy="devRO" > devro_token.txt 20 | 21 | nomad acl token create -name="Ops RW" -type="client" -policy="opsRW" > opsrw_token.txt 22 | 23 | nomad acl token list 24 | 25 | exit 0 26 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-acls/use-acls/solve-nomad-server-2: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=~/.bash_history 5 | set -o history 6 | 7 | scp -oStrictHostKeyChecking=no nomad-server-1:/root/nomad/devro_token.txt /root/nomad/devro_token.txt 8 | 9 | export NOMAD_TOKEN=`cat /root/nomad/devro_token.txt | grep Secret | awk '{print $4}'` 10 | 11 | nomad job status 12 | 13 | nomad agent-info 14 | 15 | exit 0 16 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-acls/use-acls/solve-nomad-server-3: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=~/.bash_history 5 | set -o history 6 | 7 | cd /root/nomad 8 | scp -oStrictHostKeyChecking=no nomad-server-1:/root/nomad/opsrw_token.txt /root/nomad/opsrw_token.txt 9 | 10 | export NOMAD_TOKEN=`cat /root/nomad/opsrw_token.txt | grep Secret | awk '{print $4}'` 11 | 12 | nomad job status 13 | 14 | nomad job stop redis 15 | 16 | nomad job run redis.nomad 17 | 18 | exit 0 19 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-and-csi-plugins-gcp/Makefile: -------------------------------------------------------------------------------- 1 | ../../common/Makefile -------------------------------------------------------------------------------- /instruqt-tracks/nomad-and-csi-plugins-gcp/config.yml: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | 4 | version: "2" 5 | containers: 6 | - name: cloud-client 7 | image: gcr.io/instruqt/cloud-client 8 | shell: /bin/bash -l 9 | ports: 10 | - 80 11 | gcp_projects: 12 | - name: nomad 13 | services: 14 | - cloudresourcemanager.googleapis.com 15 | - compute.googleapis.com 16 | - iam.googleapis.com 17 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-and-csi-plugins-gcp/create-persistent-disk/check-cloud-client: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "gcloud\s\+compute\s\+disks\s\+create\s\+mysql\s\+--size\s\+10\s\+--zone\s\+europe-west1-b" /root/.bash_history || fail-message "You have not created a disk yet" 6 | 7 | # Get the edited copy of volume.hcl 8 | gcloud compute scp nomad-server-1:/root/nomad/volume.hcl /root/volume.hcl --strict-host-key-checking no --zone "europe-west1-b" 9 | 10 | # Check that volume.hcl has valid external_id 11 | grep -q "projects/.*/zones/europe-west1-b/disks/mysql" /root/volume.hcl || fail-message "You have not set 'external_id' in volume.hcl correctly yet." 12 | 13 | # Check status of volume 14 | volume_status=$(gcloud compute ssh nomad-server-1 --zone europe-west1-b --command 'nomad volume status | grep "true" | wc -l') 15 | if [ $volume_status -ne 1 ]; then 16 | fail-message "The volume mysql is not schedulable." 17 | fi 18 | 19 | exit 0 20 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-and-csi-plugins-gcp/create-persistent-disk/setup-cloud-client: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | cat <<-EOF > /root/nomad/volume.hcl 6 | type = "csi" 7 | id = "mysql" 8 | name = "mysql" 9 | external_id = "SELF_LINK" 10 | access_mode = "single-node-writer" 11 | attachment_mode = "file-system" 12 | plugin_id = "gcepd" 13 | EOF 14 | 15 | gcloud compute scp /root/nomad/volume.hcl nomad-server-1:/root/nomad/volume.hcl --strict-host-key-checking no --zone "europe-west1-b" 16 | 17 | exit 0 18 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-and-csi-plugins-gcp/create-persistent-disk/solve-cloud-client: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=/root/.bash_history 5 | set -o history 6 | 7 | gcloud compute disks create mysql --size 10 --zone europe-west1-b 8 | 9 | sleep 30 10 | 11 | self_link=$(gcloud compute disks describe mysql --zone europe-west1-b | grep "selfLink"| cut -c49-) 12 | 13 | gcloud compute ssh nomad-server-1 --zone europe-west1-b --command "sed -i 's:SELF_LINK:$self_link:g' nomad/volume.hcl" 14 | 15 | gcloud compute ssh nomad-server-1 --zone europe-west1-b --command 'nomad volume register nomad/volume.hcl' 16 | 17 | sleep 30 18 | 19 | # Check volume status 20 | gcloud compute ssh nomad-server-1 --zone europe-west1-b --command 'nomad volume status' 21 | 22 | exit 0 23 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-and-csi-plugins-gcp/deploy-gcepd-driver/check-cloud-client: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | running_jobs=$(gcloud compute ssh nomad-server-1 --zone europe-west1-b --command 'nomad job status | grep "running" | wc -l') 6 | if [ $running_jobs -ne 2 ]; then 7 | fail-message "At least one of the CSI driver jobs is not running." 8 | fi 9 | 10 | gcloud compute ssh nomad-server-1 --zone europe-west1-b --command "nomad plugin status gcepd" > plugin_status.txt 11 | 12 | plugin_controller_status=$(cat plugin_status.txt | grep "Controllers Healthy" | grep "1" | wc -l) 13 | if [ $plugin_controller_status -ne 1 ]; then 14 | fail-message "'nomad plugin status gcepd' does not show that the gcppd CSI plugin has 1 healthy controller." 15 | fi 16 | 17 | plugin_node_status=$(cat plugin_status.txt | grep "Nodes Healthy" | grep "3" | wc -l) 18 | if [ $plugin_node_status -ne 1 ]; then 19 | fail-message "'nomad plugin status gcepd' does not show that the gcppd CSI plugin has 3 healthy nodes." 20 | fi 21 | 22 | exit 0 23 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-and-csi-plugins-gcp/deploy-gcepd-driver/solve-cloud-client: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=/root/.bash_history 5 | set -o history 6 | 7 | gcloud compute ssh nomad-server-1 --zone europe-west1-b --command 'nomad job run nomad/controller.nomad' 8 | 9 | sleep 60 10 | 11 | gcloud compute ssh nomad-server-1 --zone europe-west1-b --command 'nomad job run nomad/nodes.nomad' 12 | 13 | sleep 60 14 | 15 | gcloud compute ssh nomad-server-1 --zone europe-west1-b --command 'nomad plugin status gcepd' 16 | 17 | exit 0 18 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-and-csi-plugins-gcp/deploy-mysql/check-cloud-client: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | running_jobs=$(gcloud compute ssh nomad-server-1 --zone europe-west1-b --command 'nomad job status | grep "running" | wc -l') 6 | if [ $running_jobs -ne 3 ]; then 7 | fail-message "You appear to have not run the mysql job." 8 | fi 9 | 10 | items_in_table=$(gcloud compute ssh nomad-server-1 --zone europe-west1-b --command 'mysql -h mysql-server.service.consul -u web -ppassword -D itemcollection -ss -e "select * from items;" | wc -l') 11 | if [ $items_in_table -lt 5 ]; then 12 | fail-message "You did not add 2 or more items to the items table in the MySQL database." 13 | fi 14 | 15 | exit 0 16 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-and-csi-plugins-gcp/deploy-mysql/solve-cloud-client: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=/root/.bash_history 5 | set -o history 6 | 7 | gcloud compute ssh nomad-server-1 --zone europe-west1-b --command 'nomad job run nomad/mysql.nomad' 8 | 9 | sleep 60 10 | 11 | gcloud compute ssh nomad-server-1 --zone europe-west1-b --command 'nomad job status mysql' 12 | 13 | gcloud compute ssh nomad-server-1 --zone europe-west1-b --command "mysql -h mysql-server.service.consul -u web -ppassword -D itemcollection -e \"INSERT INTO items (name) VALUES ('glove');\"" 14 | 15 | gcloud compute ssh nomad-server-1 --zone europe-west1-b --command "mysql -h mysql-server.service.consul -u web -ppassword -D itemcollection -e \"INSERT INTO items (name) VALUES ('dog');\"" 16 | 17 | exit 0 18 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-and-csi-plugins-gcp/destroy-rerun-mysql/check-cloud-client: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | gcloud compute scp nomad-server-1:/root/.bash_history /root/.nomad_server_history --zone "europe-west1-b" 6 | 7 | grep -q "nomad\s\+job\s\+stop\s\+-purge\s\+mysql-server" /root/.nomad_server_history || fail-message "You have not stopped the mysql-server job on the Server yet." 8 | 9 | grep -q "nomad\s\+job\s\+run\s\+nomad/mysql.nomad" /root/.nomad_server_history || fail-message "You have not re-run the mysql.nomad job on the Server yet." 10 | 11 | items_in_table=$(gcloud compute ssh nomad-server-1 --zone europe-west1-b --command 'mysql -h mysql-server.service.consul -u web -ppassword -D itemcollection -ss -e "select * from items;" | wc -l') 12 | if [ $items_in_table -lt 5 ]; then 13 | fail-message "There are less than 5 items in the items table. Somehow, the data stored by MySQL was not persisted." 14 | fi 15 | 16 | exit 0 17 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-and-csi-plugins-gcp/destroy-rerun-mysql/setup-cloud-client: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | rm /root/.bash_history 6 | touch /root/.bash_history 7 | 8 | # Write solve script 9 | cat <<-EOF > /root/nomad/stop-and-restart-mysql.sh 10 | #!/bin/bash -l 11 | #Enable bash history 12 | HISTFILE=~/.bash_history 13 | set -o history 14 | # Stop and Purge the Job 15 | nomad job stop -purge mysql-server 16 | # Sleep 17 | sleep 60 18 | # Rerun the mysql.nomad Job 19 | nomad job run nomad/mysql.nomad 20 | # Sleep 21 | sleep 60 22 | EOF 23 | 24 | # SCP solve script to nomad-server-1 25 | gcloud compute scp /root/nomad/stop-and-restart-mysql.sh nomad-server-1:/root/nomad/stop-and-restart-mysql.sh --zone "europe-west1-b" 26 | 27 | # Make solve script executable 28 | gcloud compute ssh nomad-server-1 --zone europe-west1-b --command "chmod +x /root/nomad/stop-and-restart-mysql.sh" 29 | 30 | # Purge history on server 31 | gcloud compute ssh nomad-server-1 --zone europe-west1-b --command "rm /root/.bash_history && touch /root/.bash_history" 32 | 33 | exit 0 34 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-and-csi-plugins-gcp/destroy-rerun-mysql/solve-cloud-client: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=/root/.bash_history 5 | set -o history 6 | 7 | gcloud compute ssh nomad-server-1 --zone europe-west1-b --command "/root/nomad/stop-and-restart-mysql.sh" 8 | 9 | exit 0 10 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-and-csi-plugins-gcp/verify-nomad-cluster-health/check-cloud-client: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "echo\s\+\$nomad_server_ip" /root/.bash_history || fail-message "You have not checked the IP of your Nomad server yet." 6 | 7 | consul_members=$(gcloud compute ssh nomad-server-1 --zone europe-west1-b --project $INSTRUQT_GCP_PROJECT_NOMAD_PROJECT_ID --strict-host-key-checking no --command "consul members | grep alive | wc -l") 8 | 9 | if [ $consul_members -ne 4 ]; then 10 | fail-message "'consul members' does not show 4 running Consul nodes." 11 | fi 12 | 13 | nomad_servers=$(gcloud compute ssh nomad-server-1 --zone europe-west1-b --project $INSTRUQT_GCP_PROJECT_NOMAD_PROJECT_ID --strict-host-key-checking no --command "nomad server members | grep alive | wc -l") 14 | 15 | if [ $nomad_servers -ne 1 ]; then 16 | fail-message "'nomad server members' does not show 1 running Nomad server." 17 | fi 18 | 19 | nomad_clients=$(gcloud compute ssh nomad-server-1 --zone europe-west1-b --project $INSTRUQT_GCP_PROJECT_NOMAD_PROJECT_ID --strict-host-key-checking no --command "nomad node status | grep ready | wc -l") 20 | 21 | if [ $nomad_clients -ne 3 ]; then 22 | fail-message "'nomad node status' does not show 3 running Nomad clients." 23 | fi 24 | 25 | exit 0 26 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-and-csi-plugins-gcp/verify-nomad-cluster-health/solve-cloud-client: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=/root/.bash_history 5 | set -o history 6 | 7 | echo $nomad_server_ip 8 | 9 | exit 0 -------------------------------------------------------------------------------- /instruqt-tracks/nomad-basics/Makefile: -------------------------------------------------------------------------------- 1 | ../../common/Makefile -------------------------------------------------------------------------------- /instruqt-tracks/nomad-basics/config.yml: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | 4 | version: "2" 5 | virtualmachines: 6 | - name: nomad-server 7 | image: instruqt-hashicorp/hashistack-2004-0-13-1 8 | shell: /bin/bash -l 9 | machine_type: n1-standard-1 10 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-basics/nomad-cli/check-nomad-server: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "nomad\s\+version" /root/.bash_history || fail-message "You haven't run 'nomad version' yet." 6 | 7 | grep -q "nomad\s\+job\s\+run\s\+-h" /root/.bash_history || grep -q "nomad\s\+job\s\+run\s\+-help" /root/.bash_history || grep -q "nomad\s\+job\s\+run\s\+--help" /root/.bash_history || fail-message "You haven't run 'nomad job run -h' (or a variation of it) yet." 8 | 9 | exit 0 10 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-basics/nomad-cli/setup-nomad-server: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | # Uncomment out mapping of 127.0.0.1 to hostname in /etc/hosts 4 | HOSTNAME=$(hostname -s) 5 | sed -i "s/127\.0\.0\.1 ${HOSTNAME}/#127.0.0.1 ${HOSTNAME}/" /etc/hosts 6 | 7 | # Generate short version of example job 8 | nomad job init --short 9 | 10 | # Change name to redis.nomad 11 | mv example.nomad redis.nomad 12 | 13 | # Change name of job 14 | sed -i "s/example/redis/g" redis.nomad 15 | 16 | exit 0 17 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-basics/nomad-cli/solve-nomad-server: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=~/.bash_history 5 | set -o history 6 | 7 | # Get the Nomad version 8 | nomad version 9 | 10 | # Get help for the `nomad job run` command 11 | nomad job run -h 12 | 13 | exit 0 14 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-basics/run-first-job/check-nomad-server: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | # Check that `cat redis.nomad` was run 6 | grep -q "cat\s\+redis.nomad" /root/.bash_history || fail-message "You haven't looked at the redis configuration file" 7 | 8 | # Check that Nomad job was run 9 | grep -q "nomad\s\+job\s\+run\s\+redis.nomad" /root/.bash_history || fail-message "You haven't run the redis.nomad job yet." 10 | 11 | # Check that Nomad job status was checked with CLI 12 | grep -q "nomad\s\+status" /root/.bash_history || fail-message "You must check the status of the Nomad job." 13 | 14 | # Check that Nomad job was stopped with CLI 15 | grep -q "nomad\s\+job\s\+stop\s\+redis" /root/.bash_history || fail-message "You must stop the redis job before continuing" 16 | 17 | exit 0 18 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-basics/run-first-job/solve-nomad-server: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=~/.bash_history 5 | set -o history 6 | 7 | # Inspect the job specification file 8 | cat redis.nomad 9 | 10 | # Run the Job 11 | nomad job run redis.nomad 12 | 13 | # Sleep 14 | sleep 30 15 | 16 | # Check the job status with the CLI 17 | nomad status 18 | 19 | # Stop the job with the CLI 20 | nomad job stop redis 21 | 22 | # Sleep 23 | sleep 30 24 | 25 | exit 0 26 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-basics/run-nomad-agent/check-nomad-server: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "nomad\s\+agent\s\+-dev\s\+-bind=0.0.0.0.*&" /root/.bash_history || fail-message "You haven't started the development mode Nomad agent yet" 6 | 7 | grep -q "nomad\s\+node\s\+status" /root/.bash_history || fail-message "You haven't checked status of your Nomad node yet." 8 | 9 | grep -q "nomad\s\+server\s\+members" /root/.bash_history || fail-message "You haven't checked the members of your Nomad cluster yet." 10 | 11 | nomad_servers=$(nomad server members | grep alive | wc -l) 12 | if [ $nomad_servers -ne 1 ]; then 13 | fail-message "There is not 1 running Nomad server." 14 | fi 15 | 16 | nomad_clients=$(nomad node status | grep ready | wc -l) 17 | if [ $nomad_clients -ne 1 ]; then 18 | fail-message "There are not 1 running Nomad client." 19 | fi 20 | 21 | exit 0 22 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-basics/run-nomad-agent/solve-nomad-server: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=~/.bash_history 5 | set -o history 6 | 7 | # Run the Nomad agent in Dev mode 8 | nomad agent -dev -bind=0.0.0.0 > nomad.log 2>&1 & 9 | 10 | # Sleep 11 | sleep 30 12 | 13 | # Get the Nomad node status 14 | nomad node status 15 | 16 | # Get the members of the Nomad cluster 17 | nomad server members 18 | 19 | exit 0 20 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-basics/use-nomad-http-api/check-nomad-server: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | # Check that payload.json was generated 6 | grep -q "nomad\s\+job\s\+run\s\+-output\s\+redis.nomad\s\+>\s\+payload.json" /root/.bash_history || fail-message "You haven't generated your payload json file." 7 | 8 | # Check that job was started with HTTP API 9 | grep -q "curl\s\+--data\s\+@payload.json\s\+http://localhost:4646/v1/jobs" /root/.bash_history || fail-message "You haven't started the redis job using the API." 10 | 11 | # Check that job status was checked with HTTP API 12 | grep -q "curl\s\+http://localhost:4646/v1/job/redis/summary" /root/.bash_history || fail-message "You haven't queried the redis job status using the API" 13 | 14 | # Check that job was stopped with HTTP API 15 | grep -q "curl\s\+--request\s\+DELETE\s\+http://localhost:4646/v1/job/redis" /root/.bash_history || fail-message "You must stop the redis job before moving forward." 16 | 17 | exit 0 18 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-basics/use-nomad-http-api/solve-nomad-server: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=~/.bash_history 5 | set -o history 6 | 7 | # Generate the payload.json file 8 | nomad job run -output redis.nomad > payload.json 9 | 10 | # Run the job with the HTTP API 11 | curl --data @payload.json http://localhost:4646/v1/jobs 12 | 13 | # Sleep 14 | sleep 30 15 | 16 | # Get the Nomad node status with HTTP API 17 | curl http://localhost:4646/v1/job/redis/summary 18 | 19 | # Stop the job with the HTTP API 20 | curl --request DELETE http://localhost:4646/v1/job/redis 21 | 22 | # Sleep 30 23 | sleep 30 24 | 25 | exit 0 26 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-consul-connect/Makefile: -------------------------------------------------------------------------------- 1 | ../../common/Makefile -------------------------------------------------------------------------------- /instruqt-tracks/nomad-consul-connect/config.yml: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | 4 | version: "2" 5 | virtualmachines: 6 | - name: nomad-server-1 7 | image: instruqt-hashicorp/hashistack-2004-0-13-1 8 | shell: /bin/bash -l 9 | environment: 10 | CONSUL_HTTP_ADDR: nomad-server-1:8500 11 | machine_type: n1-standard-1 12 | - name: nomad-client-1 13 | image: instruqt-hashicorp/hashistack-2004-0-13-1 14 | shell: /bin/bash -l 15 | environment: 16 | CONSUL_HTTP_ADDR: nomad-client-1:8500 17 | machine_type: n1-standard-1 18 | - name: nomad-client-2 19 | image: instruqt-hashicorp/hashistack-2004-0-13-1 20 | shell: /bin/bash -l 21 | environment: 22 | CONSUL_HTTP_ADDR: nomad-client-2:8500 23 | machine_type: n1-standard-1 24 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-consul-connect/nomad-and-consul-connect/check-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "nomad\s\+job\s\+run\s\+connect.nomad" /root/.bash_history || fail-message "You haven't run the connect.nomad job yet." 6 | 7 | nomad_allocations=$(nomad job status countdash | grep Healthy -A3 | grep "1 1 1" | wc -l) 8 | if [ $nomad_allocations -ne 2 ]; then 9 | fail-message "The countdash job does not have 2 healthy allocations." 10 | fi 11 | 12 | exit 0 13 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-consul-connect/nomad-and-consul-connect/setup-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | # Write connect.nomad to /root/nomad/connect.nomad 4 | cat <<-EOF > /root/nomad/connect.nomad 5 | job "countdash" { 6 | datacenters = ["dc1"] 7 | 8 | group "api" { 9 | network { 10 | mode = "bridge" 11 | } 12 | 13 | service { 14 | name = "count-api" 15 | port = "9001" 16 | 17 | connect { 18 | sidecar_service {} 19 | } 20 | } 21 | 22 | task "web" { 23 | driver = "docker" 24 | 25 | config { 26 | image = "hashicorpnomad/counter-api:v1" 27 | } 28 | } 29 | } 30 | 31 | group "dashboard" { 32 | network { 33 | mode = "bridge" 34 | 35 | port "http" { 36 | static = 9002 37 | to = 9002 38 | } 39 | } 40 | 41 | service { 42 | name = "count-dashboard" 43 | port = "9002" 44 | 45 | connect { 46 | sidecar_service { 47 | proxy { 48 | upstreams { 49 | destination_name = "count-api" 50 | local_bind_port = 8080 51 | } 52 | } 53 | } 54 | } 55 | } 56 | 57 | task "dashboard" { 58 | driver = "docker" 59 | 60 | env { 61 | COUNTING_SERVICE_URL = "http://\${NOMAD_UPSTREAM_ADDR_count_api}" 62 | } 63 | 64 | config { 65 | image = "hashicorpnomad/counter-dashboard:v1" 66 | } 67 | } 68 | } 69 | } 70 | EOF 71 | 72 | exit 0 73 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-consul-connect/nomad-and-consul-connect/solve-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=~/.bash_history 5 | set -o history 6 | 7 | # Navigate to /root/nomad directory 8 | cd nomad 9 | 10 | # Run the connect.nomad Job 11 | nomad job run connect.nomad 12 | 13 | # Sleep 14 | sleep 60 15 | 16 | exit 0 17 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-consul-connect/verify-nomad-cluster-health/check-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "consul\s\+members" /root/.bash_history || fail-message "You have not run 'consul members' on the Server yet." 6 | 7 | grep -q "nomad\s\+server\s\+members" /root/.bash_history || fail-message "You have not run 'nomad server members' on the Server yet." 8 | 9 | grep -q "nomad\s\+node\s\+status" /root/.bash_history || fail-message "You have not run 'nomad node status' on the Server yet." 10 | 11 | consul_clients=$(consul members | grep alive | wc -l) 12 | if [ $consul_clients -ne 3 ]; then 13 | fail-message "There are not 3 running Consul clients." 14 | fi 15 | 16 | nomad_servers=$(nomad server members | grep alive | wc -l) 17 | if [ $nomad_servers -ne 1 ]; then 18 | fail-message "The Nomad server is not running." 19 | fi 20 | 21 | nomad_clients=$(nomad node status | grep ready | wc -l) 22 | if [ $nomad_clients -ne 2 ]; then 23 | fail-message "There are not 2 running Nomad clients." 24 | fi 25 | 26 | exit 0 27 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-consul-connect/verify-nomad-cluster-health/solve-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=/root/.bash_history 5 | set -o history 6 | 7 | # Run consul members 8 | consul members 9 | 10 | # Run nomad server members 11 | nomad server members 12 | 13 | # Run nomad node status 14 | nomad node status 15 | 16 | exit 0 17 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-governance/01-verify-nomad-cluster-health/check-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "consul members" /root/.bash_history || fail-message "You have not run 'consul members' on the Server yet." 6 | 7 | grep -q "nomad server members" /root/.bash_history || fail-message "You have not run 'nomad server members' on the Server yet." 8 | 9 | grep -q "nomad node status" /root/.bash_history || fail-message "You have not run 'nomad node status' on the Server yet." 10 | 11 | consul_clients=$(consul members | grep alive | wc -l) 12 | if [ $consul_clients -ne 4 ]; then 13 | fail-message "There are not 4 running Consul clients." 14 | fi 15 | 16 | nomad_servers=$(nomad server members | grep alive | wc -l) 17 | if [ $nomad_servers -ne 1 ]; then 18 | fail-message "The Nomad server is not running." 19 | fi 20 | 21 | nomad_clients=$(nomad node status | grep ready | wc -l) 22 | if [ $nomad_clients -ne 3 ]; then 23 | fail-message "There are not 3 running Nomad clients." 24 | fi 25 | 26 | exit 0 27 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-governance/01-verify-nomad-cluster-health/solve-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=/root/.bash_history 5 | set -o history 6 | 7 | # Run consul members 8 | consul members 9 | 10 | # Run nomad server members 11 | nomad server members 12 | 13 | # Run nomad node status 14 | nomad node status 15 | 16 | exit 0 17 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-governance/02-nomad-auditing/check-nomad-client-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "systemctl restart nomad" /root/.bash_history || fail-message "You have not restarted Client 1 yet." 6 | 7 | exit 0 8 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-governance/02-nomad-auditing/check-nomad-client-2: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "systemctl restart nomad" /root/.bash_history || fail-message "You have not restarted Client 2 yet." 6 | 7 | exit 0 8 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-governance/02-nomad-auditing/check-nomad-client-3: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "systemctl restart nomad" /root/.bash_history || fail-message "You have not restarted Client 3 yet." 6 | 7 | exit 0 8 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-governance/02-nomad-auditing/check-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "systemctl restart nomad" /root/.bash_history || fail-message "You have not restarted the Server yet." 6 | 7 | grep -q "nomad job status" /root/.bash_history || fail-message "You have not run 'nomad job status' yet." 8 | 9 | grep -q "cat /tmp/nomad/server1/audit/audit.log | jq ." /root/.bash_history || fail-message "You have not examined the audit log yet." 10 | 11 | exit 0 12 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-governance/02-nomad-auditing/setup-nomad-client-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | # Add ACL stanza to nomad-client1.hcl config 4 | cat <<-EOF >> /etc/nomad.d/nomad-client1.hcl 5 | 6 | # Enable Auditing 7 | audit { 8 | enabled = true 9 | } 10 | EOF 11 | 12 | mkdir -p /tmp/nomad/client1/audit 13 | touch /tmp/nomad/client1/audit/audit.log 14 | 15 | exit 0 16 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-governance/02-nomad-auditing/setup-nomad-client-2: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | # Add ACL stanza to nomad-client2.hcl config 4 | cat <<-EOF >> /etc/nomad.d/nomad-client2.hcl 5 | 6 | # Enable Auditing 7 | audit { 8 | enabled = true 9 | } 10 | EOF 11 | 12 | mkdir -p /tmp/nomad/client2/audit 13 | touch /tmp/nomad/client2/audit/audit.log 14 | #chmod -R 777 /tmp/nomad 15 | 16 | exit 0 17 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-governance/02-nomad-auditing/setup-nomad-client-3: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | # Add ACL stanza to nomad-client3.hcl config 4 | cat <<-EOF >> /etc/nomad.d/nomad-client3.hcl 5 | 6 | # Enable Auditing 7 | audit { 8 | enabled = true 9 | } 10 | EOF 11 | 12 | mkdir -p /tmp/nomad/client3/audit 13 | touch /tmp/nomad/client3/audit/audit.log 14 | 15 | exit 0 16 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-governance/02-nomad-auditing/setup-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | # Add ACL stanza to nomad.hcl config 4 | cat <<-EOF >> /etc/nomad.d/nomad-server1.hcl 5 | 6 | # Enable Auditing 7 | audit { 8 | enabled = true 9 | } 10 | EOF 11 | 12 | # Add ACL stanza to copy of nomad.hcl config 13 | cat <<-EOF >> /root/nomad/nomad-server1.hcl 14 | 15 | # Enable Auditing 16 | audit { 17 | enabled = true 18 | } 19 | EOF 20 | 21 | # Add ACL stanza to copy of nomad-client1.hcl config 22 | cat <<-EOF >> /root/nomad/nomad-client1.hcl 23 | 24 | # Enable Auditing 25 | audit { 26 | enabled = true 27 | } 28 | EOF 29 | 30 | # Add ACL stanza to copy of nomad-client2.hcl config 31 | cat <<-EOF >> /root/nomad/nomad-client2.hcl 32 | 33 | # Enable Auditing 34 | audit { 35 | enabled = true 36 | } 37 | EOF 38 | 39 | # Add ACL stanza to copy of nomad-client3.hcl config 40 | cat <<-EOF >> /root/nomad/nomad-client3.hcl 41 | 42 | # Enable Auditing 43 | audit { 44 | enabled = true 45 | } 46 | EOF 47 | 48 | mkdir -p /tmp/nomad/server1/audit 49 | touch /tmp/nomad/server1/audit/audit.log 50 | 51 | exit 0 52 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-governance/02-nomad-auditing/solve-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=/root/.bash_history 5 | set -o history 6 | 7 | # Navigate to right directory 8 | cd /root/nomad/ 9 | 10 | # Restart server 11 | systemctl restart nomad 12 | 13 | # Sleep 14 | sleep 30 15 | 16 | # Restart the Nomad clients 17 | ssh -o StrictHostKeyChecking=no nomad-client-1 << ENDSSH 18 | HISTFILE=/root/.bash_history 19 | set -o history 20 | systemctl restart nomad 21 | ENDSSH 22 | ssh -o StrictHostKeyChecking=no nomad-client-2 << ENDSSH 23 | HISTFILE=/root/.bash_history 24 | set -o history 25 | systemctl restart nomad 26 | ENDSSH 27 | ssh -o StrictHostKeyChecking=no nomad-client-3 << ENDSSH 28 | HISTFILE=/root/.bash_history 29 | set -o history 30 | systemctl restart nomad 31 | ENDSSH 32 | 33 | # Check job status 34 | nomad job status 35 | 36 | # Inspect audit log 37 | cat /tmp/nomad/server1/audit/audit.log | jq . 38 | 39 | exit 0 40 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-governance/03-namespaces-and-resource-quotas/check-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "cd /root/nomad/quotas" /root/.bash_history || fail-message "You have not navigated to the /root/nomad/quotas directory on the Server yet." 6 | 7 | grep -q "nomad quota apply quota-default.hcl" /root/.bash_history || fail-message "You have not created the default resource quota on the Server yet." 8 | 9 | grep -q "nomad quota apply quota-dev.hcl" /root/.bash_history || fail-message "You have not created the dev resource quota on the Server yet." 10 | 11 | grep -q "nomad quota apply quota-qa.hcl" /root/.bash_history || fail-message "You have not created the qa resource quota on the Server yet." 12 | 13 | grep -q 'nomad namespace apply -quota default -description "default namespace" default' /root/.bash_history || fail-message "You have not applied the default quota to the default namespace on the Server yet." 14 | 15 | grep -q 'nomad namespace apply -quota dev -description "dev namespace" dev' /root/.bash_history || fail-message "You have not created the dev namespace and applied the dev resource quota to it on the Server yet." 16 | 17 | grep -q 'nomad namespace apply -quota qa -description "qa namespace" qa' /root/.bash_history || fail-message "You have not created the qa namespace and applied the qa resource quota to it on the Server yet." 18 | 19 | exit 0 20 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-governance/03-namespaces-and-resource-quotas/setup-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | mkdir -p /root/nomad/quotas 4 | 5 | # Write Default Resource Quota 6 | cat <<-EOF > /root/nomad/quotas/quota-default.hcl 7 | name = "default" 8 | description = "default quota" 9 | 10 | limit { 11 | region = "global" 12 | region_limit { 13 | cpu = 2300 14 | memory = 3100 15 | } 16 | } 17 | EOF 18 | 19 | # Write Dev Resource Quota 20 | cat <<-EOF > /root/nomad/quotas/quota-dev.hcl 21 | name = "dev" 22 | description = "dev quota" 23 | 24 | limit { 25 | region = "global" 26 | region_limit { 27 | cpu = 2300 28 | memory = 4100 29 | } 30 | } 31 | EOF 32 | 33 | # Write QA Resource Quota 34 | cat <<-EOF > /root/nomad/quotas/quota-qa.hcl 35 | name = "qa" 36 | description = "qa quota" 37 | 38 | limit { 39 | region = "global" 40 | region_limit { 41 | cpu = 2300 42 | memory = 4100 43 | } 44 | } 45 | EOF 46 | 47 | set-workdir /root/nomad/quotas 48 | 49 | exit 0 50 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-governance/03-namespaces-and-resource-quotas/solve-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=/root/.bash_history 5 | set -o history 6 | 7 | # Navigate to right directory 8 | cd /root/nomad/quotas 9 | 10 | # Create Default Resource Quota 11 | nomad quota apply quota-default.hcl 12 | 13 | # Create Dev Resource Quota 14 | nomad quota apply quota-dev.hcl 15 | 16 | # Create QA Resource Quota 17 | nomad quota apply quota-qa.hcl 18 | 19 | # Sleep 20 | sleep 15 21 | 22 | # Apply Default Quota to Default Namespace 23 | nomad namespace apply -quota default -description "default namespace" default 24 | 25 | # Create Dev Namespace and Apply Dev Quota 26 | nomad namespace apply -quota dev -description "dev namespace" dev 27 | 28 | # Create QA Namespace and Apply QA Quota 29 | nomad namespace apply -quota qa -description "qa namespace" qa 30 | 31 | exit 0 32 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-governance/04-nomad-acls/check-nomad-client-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "systemctl restart nomad" /root/.bash_history || fail-message "You have not restarted Client 1 yet." 6 | 7 | exit 0 8 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-governance/04-nomad-acls/check-nomad-client-2: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "systemctl restart nomad" /root/.bash_history || fail-message "You have not restarted Client 2 yet." 6 | 7 | exit 0 8 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-governance/04-nomad-acls/check-nomad-client-3: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "systemctl restart nomad" /root/.bash_history || fail-message "You have not restarted Client 3 yet." 6 | 7 | exit 0 8 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-governance/04-nomad-acls/setup-nomad-client-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | # Add ACL stanza to nomad-client1.hcl config 4 | cat <<-EOF >> /etc/nomad.d/nomad-client1.hcl 5 | 6 | # Enable ACLs 7 | acl { 8 | enabled = true 9 | } 10 | EOF 11 | 12 | set-workdir /root/nomad/acls 13 | exit 0 14 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-governance/04-nomad-acls/setup-nomad-client-2: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | # Add ACL stanza to nomad-client2.hcl config 4 | cat <<-EOF >> /etc/nomad.d/nomad-client2.hcl 5 | 6 | # Enable ACLs 7 | acl { 8 | enabled = true 9 | } 10 | EOF 11 | 12 | set-workdir /root/nomad/acls 13 | 14 | exit 0 15 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-governance/04-nomad-acls/setup-nomad-client-3: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | # Add ACL stanza to nomad-client3.hcl config 4 | cat <<-EOF >> /etc/nomad.d/nomad-client3.hcl 5 | 6 | # Enable ACLs 7 | acl { 8 | enabled = true 9 | } 10 | EOF 11 | 12 | set-workdir /root/nomad/acls 13 | 14 | exit 0 15 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-governance/05-sentinel-policies/check-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "cd /root/nomad/sentinel" /root/.bash_history || fail-message "You have not navigated to the /root/nomad/sentinel directory on the Server yet." 6 | 7 | grep -q "nomad sentinel apply .* -level hard-mandatory allow-docker-and-java-drivers allow-docker-and-java-drivers.sentinel" /root/.bash_history || fail-message "You have not created the allow-docker-and-java-drivers Sentinel policy on the Server yet." 8 | 9 | grep -q "nomad sentinel apply .* -level soft-mandatory restrict-docker-images restrict-docker-images.sentinel" /root/.bash_history || fail-message "You have not created the restrict-docker-images Sentinel policy on the Server yet." 10 | 11 | grep -q "nomad sentinel apply .* -level soft-mandatory prevent-docker-host-network prevent-docker-host-network.sentinel" /root/.bash_history || fail-message "You have not created the prevent-docker-host-network Sentinel policy on the Server yet." 12 | 13 | exit 0 14 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-governance/05-sentinel-policies/solve-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=/root/.bash_history 5 | set -o history 6 | 7 | # Navigate to right directory 8 | cd /root/nomad/sentinel 9 | 10 | # Export bootstrap token 11 | export NOMAD_TOKEN=$(cat /root/nomad/acls/bootstrap.txt | grep Secret | cut -d' ' -f7) 12 | 13 | # Create allow-docker-and-java-drivers.sentinel 14 | nomad sentinel apply -description "Only allow the Docker and Java drivers" -level hard-mandatory allow-docker-and-java-drivers allow-docker-and-java-drivers.sentinel 15 | 16 | # Create restrict-docker-images.sentinel 17 | nomad sentinel apply -description "Restrict allowed Docker images" -level soft-mandatory restrict-docker-images restrict-docker-images.sentinel 18 | 19 | # Create prevent-docker-host-network.sentinel 20 | nomad sentinel apply -description "Prevent Docker containers running with host network mode" -level soft-mandatory prevent-docker-host-network prevent-docker-host-network.sentinel 21 | 22 | exit 0 23 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-governance/07-run-nomad-jobs-2/setup-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set-workdir /root/nomad/jobs 4 | 5 | exit 0 6 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-governance/07-run-nomad-jobs-2/solve-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=/root/.bash_history 5 | set -o history 6 | 7 | # Navigate to right directory 8 | cd /root/nomad/jobs 9 | 10 | # Export Alice's ACL token 11 | export NOMAD_TOKEN=$(cat /root/nomad/acls/alice-token.txt | grep Secret | cut -d' ' -f7) 12 | 13 | # Check the dev resource quota 14 | nomad quota status dev 15 | 16 | # Run the website-dev.nomad job 17 | nomad job run -detach website-dev.nomad 18 | 19 | # Sleep 20 | sleep 30 21 | 22 | # Check the status of the website job in the dev namespace 23 | # nomad job status -namespace=dev website 24 | 25 | # Check the dev resource quota again 26 | nomad quota status dev 27 | 28 | # Export Bob's ACL token 29 | export NOMAD_TOKEN=$(cat /root/nomad/acls/bob-token.txt | grep Secret | cut -d' ' -f7) 30 | 31 | # Check the qa resource quota 32 | nomad quota status qa 33 | 34 | # Run the website-qa.nomad job 35 | nomad job run -detach website-qa.nomad 36 | 37 | # Sleep 38 | sleep 30 39 | 40 | # Stop the webserver-test job 41 | nomad job stop -namespace=qa webserver-test 42 | 43 | # Sleep 44 | sleep 30 45 | 46 | # Export the bootstrap token 47 | export NOMAD_TOKEN=$(cat /root/nomad/acls/bootstrap.txt | grep Secret | cut -d' ' -f7) 48 | 49 | # Use a Cross-Namespace Query 50 | nomad job status -namespace=* website 51 | 52 | exit 0 53 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-governance/Makefile: -------------------------------------------------------------------------------- 1 | ../../common/Makefile -------------------------------------------------------------------------------- /instruqt-tracks/nomad-governance/config.yml: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | 4 | version: "3" 5 | secrets: 6 | - name: HC_CONSUL_LICENSE 7 | - name: HC_NOMAD_LICENSE 8 | virtualmachines: 9 | - name: nomad-server-1 10 | image: instruqt-hashicorp/hashistack-2004-enterprise-0-13-1 11 | shell: /bin/bash -l 12 | environment: 13 | CONSUL_HTTP_ADDR: nomad-server-1:8500 14 | machine_type: n1-standard-1 15 | - name: nomad-client-1 16 | image: instruqt-hashicorp/hashistack-2004-enterprise-0-13-1 17 | shell: /bin/bash -l 18 | environment: 19 | CONSUL_HTTP_ADDR: nomad-client-1:8500 20 | machine_type: n1-standard-1 21 | - name: nomad-client-2 22 | image: instruqt-hashicorp/hashistack-2004-enterprise-0-13-1 23 | shell: /bin/bash -l 24 | environment: 25 | CONSUL_HTTP_ADDR: nomad-client-2:8500 26 | machine_type: n1-standard-1 27 | - name: nomad-client-3 28 | image: instruqt-hashicorp/hashistack-2004-enterprise-0-13-1 29 | shell: /bin/bash -l 30 | environment: 31 | CONSUL_HTTP_ADDR: nomad-client-3:8500 32 | machine_type: n1-standard-1 33 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-governance/track_scripts/setup-nomad-client-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euvo pipefail 4 | 5 | while [ ! -f /opt/instruqt/bootstrap/host-bootstrap-completed ] 6 | do 7 | echo "Waiting for Instruqt to finish booting the virtual machine" 8 | sleep 1 9 | done 10 | 11 | # Ensure we always have fresh copies of the Consul 12 | # Enterprise license, which we store as Instruqt secrets 13 | # We don't need a Nomad license on the clients, it fetches 14 | # from the Nomad server once it can join 15 | 16 | # This trick with awk keeps the contents of the secret 17 | # environment variables from being logged 18 | echo "Writing license files" 19 | awk 'BEGIN {print ENVIRON["HC_CONSUL_LICENSE"]}' > /var/consul-license.hclic < /dev/null 20 | 21 | # And restart services 22 | echo "Restarting services" 23 | systemctl restart consul.service 24 | systemctl restart nomad.service 25 | 26 | exit 0 27 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-governance/track_scripts/setup-nomad-client-2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euvo pipefail 4 | 5 | while [ ! -f /opt/instruqt/bootstrap/host-bootstrap-completed ] 6 | do 7 | echo "Waiting for Instruqt to finish booting the virtual machine" 8 | sleep 1 9 | done 10 | 11 | # Ensure we always have fresh copies of the Consul 12 | # Enterprise license, which we store as Instruqt secrets 13 | # We don't need a Nomad license on the clients, it fetches 14 | # from the Nomad server once it can join 15 | 16 | # This trick with awk keeps the contents of the secret 17 | # environment variables from being logged 18 | echo "Writing license files" 19 | awk 'BEGIN {print ENVIRON["HC_CONSUL_LICENSE"]}' > /var/consul-license.hclic < /dev/null 20 | 21 | # And restart services 22 | echo "Restarting services" 23 | systemctl restart consul.service 24 | systemctl restart nomad.service 25 | 26 | exit 0 27 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-governance/track_scripts/setup-nomad-client-3: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euvo pipefail 4 | 5 | while [ ! -f /opt/instruqt/bootstrap/host-bootstrap-completed ] 6 | do 7 | echo "Waiting for Instruqt to finish booting the virtual machine" 8 | sleep 1 9 | done 10 | 11 | # Ensure we always have fresh copies of the Consul 12 | # Enterprise license, which we store as Instruqt secrets 13 | # We don't need a Nomad license on the clients, it fetches 14 | # from the Nomad server once it can join 15 | 16 | # This trick with awk keeps the contents of the secret 17 | # environment variables from being logged 18 | echo "Writing license files" 19 | awk 'BEGIN {print ENVIRON["HC_CONSUL_LICENSE"]}' > /var/consul-license.hclic < /dev/null 20 | 21 | # And restart services 22 | echo "Restarting services" 23 | systemctl restart consul.service 24 | systemctl restart nomad.service 25 | 26 | exit 0 27 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-governance/track_scripts/setup-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euvo pipefail 4 | 5 | while [ ! -f /opt/instruqt/bootstrap/host-bootstrap-completed ] 6 | do 7 | echo "Waiting for Instruqt to finish booting the virtual machine" 8 | sleep 1 9 | done 10 | 11 | # Ensure we always have fresh copies of the Consul and Nomad 12 | # Enterprise licenses, which we store as Instruqt secrets 13 | 14 | # This trick with awk keeps the contents of the secret 15 | # environment variables from being logged 16 | echo "Writing license files" 17 | awk 'BEGIN {print ENVIRON["HC_CONSUL_LICENSE"]}' > /var/consul-license.hclic < /dev/null 18 | awk 'BEGIN {print ENVIRON["HC_NOMAD_LICENSE"]}' > /var/nomad-license.hclic < /dev/null 19 | 20 | # And restart services 21 | echo "Restarting services" 22 | systemctl restart consul.service 23 | systemctl restart nomad.service 24 | 25 | exit 0 26 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-host-volumes/01-verify-nomad-cluster-health/check-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "consul\s\+members" /root/.bash_history || fail-message "You have not run 'consul members' on the Server yet." 6 | 7 | grep -q "nomad\s\+server\s\+members" /root/.bash_history || fail-message "You have not run 'nomad server members' on the Server yet." 8 | 9 | grep -q "nomad\s\+node\s\+status" /root/.bash_history || fail-message "You have not run 'nomad node status' on the Server yet." 10 | 11 | consul_clients=$(consul members | grep alive |wc -l) 12 | if [ $consul_clients -ne 4 ]; then 13 | fail-message "There are not 4 running Consul clients." 14 | fi 15 | 16 | nomad_servers=$(nomad server members | grep alive | wc -l) 17 | if [ $nomad_servers -ne 1 ]; then 18 | fail-message "The Nomad servers is not running." 19 | fi 20 | 21 | nomad_clients=$(nomad node status | grep ready | wc -l) 22 | if [ $nomad_clients -ne 3 ]; then 23 | fail-message "There are not 3 running Nomad clients." 24 | fi 25 | 26 | exit 0 27 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-host-volumes/01-verify-nomad-cluster-health/solve-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=/root/.bash_history 5 | set -o history 6 | 7 | # Run consul members 8 | consul members 9 | 10 | # Run nomad server members 11 | nomad server members 12 | 13 | # Run nomad node status 14 | nomad node status 15 | 16 | exit 0 17 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-host-volumes/02-configure-host-volume/check-nomad-client-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "mkdir\s\+-p\s\+/opt/mysql/data" /root/.bash_history || fail-message "You have not created the directory /opt/mysql/data on Nomad Client 1 yet." 6 | 7 | grep -q "systemctl\s\+restart\s\+nomad" /root/.bash_history || fail-message "You have not restarted Nomad Client 1 yet." 8 | 9 | grep -q "nomad\s\+node\s\+status\s\+-short\s\+-self" /root/.bash_history || fail-message "You have not checked the status of Nomad Client 1 yet." 10 | 11 | mysql_volume=$(nomad node status -short -self | grep "Host Volumes" | grep "mysql" | wc -l) 12 | if [ $mysql_volume -ne 1 ]; then 13 | fail-message "The mysql host volume is not yet loaded on Nomad Client 1" 14 | fi 15 | 16 | exit 0 17 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-host-volumes/02-configure-host-volume/setup-nomad-client-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | rm /root/.bash_history 6 | touch /root/.bash_history 7 | 8 | # Write Nomad Client 1 Config 9 | cat <<-EOF > /etc/nomad.d/nomad-client1.hcl 10 | # Setup data dir 11 | data_dir = "/tmp/nomad/client1" 12 | 13 | # Give the agent a unique name. 14 | name = "client1" 15 | 16 | # Enable the client 17 | client { 18 | enabled = true 19 | 20 | host_volume "mysql" { 21 | path = "/opt/mysql/data" 22 | read_only = false 23 | } 24 | } 25 | 26 | # Consul configuration 27 | consul { 28 | address = "nomad-client-1:8500" 29 | } 30 | EOF 31 | 32 | exit 0 33 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-host-volumes/02-configure-host-volume/solve-nomad-client-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=~/.bash_history 5 | set -o history 6 | 7 | # Create Data Directory 8 | mkdir -p /opt/mysql/data 9 | 10 | # Restart Nomad client 11 | systemctl restart nomad 12 | 13 | # Sleep 14 | sleep 30 15 | 16 | # Check Node Status 17 | nomad node status -short -self 18 | 19 | exit 0 20 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-host-volumes/03-deploy-mysql/check-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "cd\s\+/root/nomad" /root/.bash_history || fail-message "You have not navigated to the /root/nomad directory on the Server yet." 6 | 7 | grep -q "nomad\s\+job\s\+run\s\+mysql.nomad" /root/.bash_history || fail-message "You have not run the mysql.nomad job on the Server yet." 8 | 9 | grep -q "nomad\s\+job\s\+status\s\+mysql-server" /root/.bash_history || fail-message "You have not checked the status of the mysql-server job on the Server yet." 10 | 11 | mysql_job_status=$(nomad job status -short mysql-server | grep Status | grep running | wc -l) 12 | if [ $mysql_job_status -ne 1 ]; then 13 | fail-message "The mysql-server job is not running." 14 | fi 15 | 16 | exit 0 17 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-host-volumes/03-deploy-mysql/setup-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | cat <<-EOF > /root/nomad/mysql.nomad 4 | job "mysql-server" { 5 | datacenters = ["dc1"] 6 | type = "service" 7 | 8 | group "mysql-server" { 9 | count = 1 10 | 11 | network { 12 | port "db" { 13 | static = 3306 14 | } 15 | } 16 | 17 | volume "mysql_volume" { 18 | type = "host" 19 | read_only = false 20 | source = "mysql" 21 | } 22 | 23 | task "mysql-server" { 24 | driver = "docker" 25 | 26 | volume_mount { 27 | volume = "mysql_volume" 28 | destination = "/var/lib/mysql" 29 | read_only = false 30 | } 31 | 32 | env { 33 | MYSQL_ROOT_PASSWORD = "password" 34 | } 35 | 36 | config { 37 | image = "rberlind/mysql-demo:latest" 38 | ports = ["db"] 39 | } 40 | 41 | resources { 42 | cpu = 500 43 | memory = 1024 44 | } 45 | 46 | service { 47 | name = "mysql-server" 48 | port = "db" 49 | 50 | check { 51 | type = "tcp" 52 | interval = "10s" 53 | timeout = "2s" 54 | } 55 | } 56 | } 57 | } 58 | } 59 | EOF 60 | 61 | exit 0 62 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-host-volumes/03-deploy-mysql/solve-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=/root/.bash_history 5 | set -o history 6 | 7 | # Change Directory 8 | cd /root/nomad 9 | 10 | # Run the mysql.nomad Job 11 | nomad job run mysql.nomad 12 | 13 | # Sleep 14 | sleep 60 15 | 16 | # Check the status of the Job 17 | nomad job status mysql-server 18 | 19 | exit 0 20 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-host-volumes/04-write-data/check-nomad-client-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "mysql\s\+-h\s\+mysql-server.service.consul\s\+-u\s\+web\s\+-ppassword\s\+-D\s\+itemcollection" /root/.bash_history || fail-message "You have not connected to the MySQL database on Nomad Client 1 yet." 6 | 7 | glove_result=$(mysql -h mysql-server.service.consul -u web -ppassword -D itemcollection -s -N -e "select count(*) from items where name='glove';") 8 | 9 | if [ $glove_result -ne 1 ]; then 10 | fail-message "The MySQL table does not contain 'glove'." 11 | fi 12 | 13 | exit 0 14 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-host-volumes/04-write-data/setup-nomad-client-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | apt install mysql-client-core-8.0 5 | exit 0 6 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-host-volumes/04-write-data/solve-nomad-client-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=~/.bash_history 5 | set -o history 6 | 7 | # Connect to the MySQL Database and Insert item 8 | mysql -h mysql-server.service.consul -u web -ppassword -D itemcollection -e "INSERT INTO items (name) VALUES ('glove');" 9 | 10 | exit 0 11 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-host-volumes/05-stop-and-restart-job/setup-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | rm /root/.bash_history 4 | touch /root/.bash_history 5 | apt install mysql-client-core-8.0 6 | 7 | exit 0 8 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-host-volumes/05-stop-and-restart-job/solve-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=/root/.bash_history 5 | set -o history 6 | 7 | # Change Directory 8 | cd /root/nomad 9 | 10 | # Stop and Purge the Job 11 | nomad job stop -purge mysql-server 12 | 13 | # Sleep 14 | sleep 30 15 | 16 | # Check Nomad's status 17 | nomad status 18 | 19 | # Rerun the mysql.nomad Job 20 | nomad job run mysql.nomad 21 | 22 | # Sleep 23 | sleep 60 24 | 25 | # Check the status of the Job 26 | nomad job status mysql-server 27 | 28 | # Run query against the Database 29 | mysql -h mysql-server.service.consul -u web -ppassword -D itemcollection -s -N -e "select * from items" 30 | 31 | exit 0 32 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-host-volumes/Makefile: -------------------------------------------------------------------------------- 1 | ../../common/Makefile -------------------------------------------------------------------------------- /instruqt-tracks/nomad-host-volumes/config.yml: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | 4 | version: "3" 5 | virtualmachines: 6 | - name: nomad-server-1 7 | image: instruqt-hashicorp/hashistack-2004-0-13-1 8 | shell: /bin/bash -l 9 | environment: 10 | CONSUL_HTTP_ADDR: 127.0.0.1:8500 11 | machine_type: n1-standard-1 12 | - name: nomad-client-1 13 | image: instruqt-hashicorp/hashistack-2004-0-13-1 14 | shell: /bin/bash -l 15 | environment: 16 | CONSUL_HTTP_ADDR: 127.0.0.1:8500 17 | machine_type: n1-standard-1 18 | - name: nomad-client-2 19 | image: instruqt-hashicorp/hashistack-2004-0-13-1 20 | shell: /bin/bash -l 21 | environment: 22 | CONSUL_HTTP_ADDR: 127.0.0.1:8500 23 | machine_type: n1-standard-1 24 | - name: nomad-client-3 25 | image: instruqt-hashicorp/hashistack-2004-0-13-1 26 | shell: /bin/bash -l 27 | environment: 28 | CONSUL_HTTP_ADDR: 127.0.0.1:8500 29 | machine_type: n1-standard-1 30 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-integration-with-vault/01-verify-agents/check-hashistack-server: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "nomad\s\+server\s\+members" /root/.bash_history || fail-message "You haven't checked the status of the Nomad server yet." 6 | 7 | nomad_servers=$(nomad server members | grep alive | wc -l) 8 | if [ "$nomad_servers" -lt "1" ]; then 9 | fail-message "The command 'nomad server members' did not show an alive server. Something went wrong" 10 | fi 11 | 12 | grep -q "nomad\s\+node\s\+status" /root/.bash_history || fail-message "You haven't checked the status of the Nomad client nodes yet." 13 | 14 | nomad_clients=$(nomad node status | grep ready | wc -l) 15 | if [ "$nomad_clients" -lt "2" ]; then 16 | fail-message "The command 'nomad node status' did not show 2 ready clients. Something went wrong" 17 | fi 18 | 19 | grep -q "consul\s\+members" /root/.bash_history || fail-message "You haven't checked the status of the Consul server yet." 20 | 21 | consul_members=$(consul members | grep alive | wc -l) 22 | if [ "$consul_members" -lt "3" ]; then 23 | fail-message "The command 'consul members' did not show 3 alive nodes. Something went wrong" 24 | fi 25 | 26 | grep -q "vault\s\+status" /root/.bash_history || fail-message "You haven't checked the status of the Vault server yet." 27 | 28 | vault_seal_status=$(vault status | grep Sealed | grep false | wc -l) 29 | if [ "$vault_seal_status" -ne "1" ]; then 30 | fail-message "Vault is not running. Something went wrong" 31 | fi 32 | 33 | exit 0 34 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-integration-with-vault/01-verify-agents/solve-hashistack-server: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | # Enable bash history 4 | HISTFILE=/root/.bash_history 5 | set -o history 6 | 7 | # Sleep 8 | sleep 30 9 | 10 | # Check the Nomad cluster members 11 | nomad server members 12 | 13 | # Check the Nomad nodes 14 | nomad node status 15 | 16 | # Check the Consul server 17 | consul members 18 | 19 | # Check the Vault server 20 | vault status 21 | 22 | exit 0 23 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-integration-with-vault/02-create-the-nomad-server-policy-and-token/check-hashistack-server: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "cd\s\+/root/hashistack/vault" /root/.bash_history || fail-message "You haven't changed directories to /root/hashistack/vault yet." 6 | 7 | grep -q "vault\s\+policy\s\+write\s\+nomad-server\s\+nomad-server-policy.hcl" /root/.bash_history || fail-message "You haven't written your Nomad policy to Vault yet." 8 | 9 | vault policy read nomad-server 10 | if [ "$?" -ne "0" ]; then 11 | fail-message "The Vault policy nomad-server does not exist yet." 12 | fi 13 | 14 | grep -q "vault\s\+token\s\+create\s\+-policy\s\+nomad-server\s\+-period\s\+72h\s\+-orphan\s\+>\s\+/root/hashistack/nomad/nomad-token.txt" /root/.bash_history || fail-message "You haven't created your Nomad token yet." 15 | 16 | nomad_token=$(cat /root/hashistack/nomad/nomad-token.txt | grep nomad-server | wc -l) 17 | if [ "$nomad_token" -lt "2" ]; then 18 | fail-message "The Nomad Vault token does not appear to have been generated correctly yet." 19 | fi 20 | 21 | exit 0 22 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-integration-with-vault/02-create-the-nomad-server-policy-and-token/solve-hashistack-server: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | # Enable bash history 4 | HISTFILE=/root/.bash_history 5 | set -o history 6 | 7 | # Change directory 8 | cd /root/hashistack/vault 9 | 10 | # Write the Nomad policy to Vault 11 | vault policy write nomad-server nomad-server-policy.hcl 12 | 13 | # Write the Nomad policy to Vault 14 | vault token create -policy nomad-server -period 72h -orphan > /root/hashistack/nomad/nomad-token.txt 15 | 16 | exit 0 17 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-integration-with-vault/03-create-a-vault-token-role/check-hashistack-server: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "vault\s\+write\s\+auth/token/roles/nomad-cluster\s\+@nomad-cluster-role.json" /root/.bash_history || fail-message "You haven't created your token role yet." 6 | 7 | exit 0 8 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-integration-with-vault/03-create-a-vault-token-role/setup-hashistack-server: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | # Write Server main config file 6 | cat <<-EOF > /root/hashistack/vault/nomad-cluster-role.json 7 | { 8 | "allowed_policies": "access-tables", 9 | "token_explicit_max_ttl": 0, 10 | "name": "nomad-cluster", 11 | "orphan": true, 12 | "token_period": 259200, 13 | "renewable": true 14 | } 15 | EOF 16 | 17 | # Set home directory 18 | echo -e "cd /root/hashistack/vault" >> /root/.bash_profile 19 | 20 | exit 0 21 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-integration-with-vault/03-create-a-vault-token-role/solve-hashistack-server: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | # Enable bash history 4 | HISTFILE=/root/.bash_history 5 | set -o history 6 | 7 | cd /root/hashistack/vault 8 | 9 | # Create Token Role 10 | vault write auth/token/roles/nomad-cluster @nomad-cluster-role.json 11 | 12 | exit 0 13 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-integration-with-vault/04-reconfigure-the-nomad-server/check-hashistack-server: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | vault_enabled=$(grep "enabled = true" /root/hashistack/nomad/server.hcl | wc -l) 6 | if [ "$vault_enabled" -ne "2" ]; then 7 | fail-message "You have not enabled Vault in Nomad's server.hcl configuration file yet." 8 | fi 9 | 10 | token_replaced=$(grep "your nomad server token" /root/hashistack/nomad/server.hcl | wc -l) 11 | if [ "$token_replaced" -ne "0" ]; then 12 | fail-message "You have not added your Vault token to Nomad's server.hcl configuration file yet." 13 | fi 14 | 15 | grep -q "systemctl\s\+restart\s\+nomad" /root/.bash_history || fail-message "You haven't restarted the Nomad agent yet." 16 | 17 | exit 0 18 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-integration-with-vault/04-reconfigure-the-nomad-server/setup-hashistack-server: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | exit 0 6 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-integration-with-vault/04-reconfigure-the-nomad-server/solve-hashistack-server: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | # Enable bash history 4 | HISTFILE=/root/.bash_history 5 | set -o history 6 | 7 | # Remove the current configuration 8 | rm /root/hashistack/nomad/server.hcl 9 | 10 | # Get the Genrated Nomad token 11 | token=$(sed -n 3p /root/hashistack/nomad/nomad-token.txt | cut -d' ' -f17) 12 | 13 | # Create a new configuration file 14 | cat <<-EOF > /root/hashistack/nomad/server.hcl 15 | # Set the Datacenter 16 | datacenter = "instruqt" 17 | 18 | # Setup data dir 19 | data_dir = "/root/hashistack/nomad/server" 20 | 21 | # Give the agent a unique name. Defaults to hostname 22 | name = "server" 23 | 24 | # Enable the server 25 | server { 26 | enabled = true 27 | 28 | # Self-elect, should be 3 or 5 for production 29 | bootstrap_expect = 1 30 | } 31 | 32 | # Vault 33 | vault { 34 | enabled = true 35 | address = "http://active.vault.service.consul:8200" 36 | task_token_ttl = "1h" 37 | create_from_role = "nomad-cluster" 38 | token = "$token" 39 | } 40 | EOF 41 | 42 | # Start Nomad Agent 43 | systemctl restart nomad 44 | 45 | # Sleep 46 | sleep 60 47 | 48 | exit 0 49 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-integration-with-vault/05-deploy-a-database/check-hashistack-server: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "nomad\s\+run\s\+/root/hashistack/nomad/db.nomad" /root/.bash_history || fail-message "You haven't run the database deployment job yet." 6 | 7 | grep -q "nomad\s\+status\s\+database" /root/.bash_history || fail-message "You haven't checked the status of the deployed job yet." 8 | 9 | database_status=$(nomad status database | grep "Status .* running" | wc -l) 10 | if [ "$database_status" -ne "1" ]; then 11 | fail-message "The Nomad database job is not running yet." 12 | fi 13 | 14 | exit 0 15 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-integration-with-vault/05-deploy-a-database/setup-hashistack-server: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | # Write Server main config file 6 | cat <<-EOF > /root/hashistack/nomad/db.nomad 7 | job "database" { 8 | datacenters = ["instruqt"] 9 | 10 | group "db" { 11 | 12 | network { 13 | port "db" { 14 | static = 5432 15 | to = 5432 16 | } 17 | } 18 | 19 | task "server" { 20 | driver = "docker" 21 | 22 | config { 23 | image = "pgryzan/demo-db:latest" 24 | ports = ["db"] 25 | } 26 | 27 | service { 28 | name = "database" 29 | port = "db" 30 | 31 | check { 32 | type = "tcp" 33 | interval = "2s" 34 | timeout = "2s" 35 | } 36 | } 37 | } 38 | } 39 | } 40 | EOF 41 | 42 | exit 0 43 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-integration-with-vault/05-deploy-a-database/solve-hashistack-server: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | # Enable bash history 4 | HISTFILE=/root/.bash_history 5 | set -o history 6 | 7 | # Create Token Role 8 | nomad run /root/hashistack/nomad/db.nomad 9 | 10 | sleep 60 11 | 12 | # Write the Nomad policy to Vault 13 | nomad status database 14 | 15 | exit 0 16 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-integration-with-vault/06-configure-the-vault-database-secrets-engine/setup-hashistack-server: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | # Write Server main config file 6 | cat <<-EOF > /root/hashistack/vault/connection.json 7 | { 8 | "plugin_name": "postgresql-database-plugin", 9 | "allowed_roles": "accessdb", 10 | "connection_url": "postgresql://{{username}}:{{password}}@database.service.consul:5432/postgres?sslmode=disable", 11 | "username": "demo", 12 | "password": "demo" 13 | } 14 | EOF 15 | 16 | # Write the SQL needed to allow Vault to create users in Postgres 17 | cat <<-EOF > /root/hashistack/vault/accessdb.sql 18 | CREATE USER "{{name}}" WITH ENCRYPTED PASSWORD '{{password}}' VALID UNTIL '{{expiration}}'; 19 | ALTER USER "{{name}}" WITH SUPERUSER; 20 | EOF 21 | 22 | # Write the access-tables Policy 23 | cat <<-EOF > /root/hashistack/vault/access-tables-policy.hcl 24 | path "database/creds/accessdb" { 25 | capabilities = ["read"] 26 | } 27 | EOF 28 | 29 | exit 0 -------------------------------------------------------------------------------- /instruqt-tracks/nomad-integration-with-vault/06-configure-the-vault-database-secrets-engine/solve-hashistack-server: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | # Enable bash history 4 | HISTFILE=/root/.bash_history 5 | set -o history 6 | 7 | # Enable the Database secrets engine 8 | vault secrets enable database 9 | 10 | # Sleep 11 | sleep 30 12 | 13 | # Create the Database Connection 14 | cd /root/hashistack/vault 15 | vault write database/config/postgresql @connection.json 16 | 17 | # Create a Vault Role to Manage Database Privileges 18 | vault write database/roles/accessdb db_name=postgresql creation_statements=@accessdb.sql default_ttl=1h max_ttl=24h 19 | 20 | # Create PostgreSQL Credentials 21 | vault read database/creds/accessdb 22 | 23 | # Create the access-tables Policy 24 | vault policy write access-tables access-tables-policy.hcl 25 | 26 | exit 0 27 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-integration-with-vault/07-deploy-an-application/check-hashistack-server: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "nomad\s\+run\s\+/root/hashistack/nomad/web.nomad" /root/.bash_history || fail-message "You haven't deployed your application yet." 6 | 7 | web_status=$(nomad status web | grep "Status .* running" | wc -l) 8 | if [ "$web_status" -ne "1" ]; then 9 | fail-message "The Nomad web job is not running yet." 10 | fi 11 | 12 | grep -q "dig\s\++short\s\+SRV\s\+web.service.consul." /root/.bash_history || fail-message "You haven't used dig to find a record yet." 13 | 14 | grep -qE "curl.*http://web.service.consul:3000/api.*|.*jq" /root/.bash_history || fail-message "You haven't curled your application yet." 15 | 16 | products_length=$(curl http://web.service.consul:3000/api | jq '. | length') 17 | if [ "$products_length" -ne "6" ]; then 18 | fail-message "The web application is not showing all 6 HashiCorp products." 19 | fi 20 | 21 | exit 0 22 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-integration-with-vault/07-deploy-an-application/solve-hashistack-server: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | # Enable bash history 4 | HISTFILE=/root/.bash_history 5 | set -o history 6 | 7 | # Deploy the Application 8 | nomad run /root/hashistack/nomad/web.nomad 9 | 10 | # Sleep 11 | sleep 60 12 | 13 | # Dig the Application 14 | dig +short SRV web.service.consul. 15 | 16 | # Curl the Application 17 | curl -s http://web.service.consul:3000/api | tee /dev/fd/2 | jq . 18 | 19 | exit 0 20 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-integration-with-vault/Makefile: -------------------------------------------------------------------------------- 1 | ../../common/Makefile -------------------------------------------------------------------------------- /instruqt-tracks/nomad-integration-with-vault/config.yml: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | 4 | version: "3" 5 | virtualmachines: 6 | - name: hashistack-server 7 | image: instruqt-hashicorp/hashistack-2004-0-13-1 8 | shell: /bin/bash -l 9 | environment: 10 | CONSUL_HTTP_ADDR: hashistack-server:8500 11 | VAULT_ADDR: http://127.0.0.1:8200/ 12 | machine_type: n1-standard-1 13 | - name: hashistack-client-1 14 | image: instruqt-hashicorp/hashistack-2004-0-13-1 15 | shell: /bin/bash -l 16 | environment: 17 | CONSUL_HTTP_ADDR: hashistack-client-1:8500 18 | VAULT_ADDR: http://active.vault.service.consul:8200/ 19 | machine_type: n1-standard-1 20 | - name: hashistack-client-2 21 | image: instruqt-hashicorp/hashistack-2004-0-13-1 22 | shell: /bin/bash -l 23 | environment: 24 | CONSUL_HTTP_ADDR: hashistack-client-2:8500 25 | VAULT_ADDR: http://active.vault.service.consul:8200/ 26 | machine_type: n1-standard-1 27 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-integration-with-vault/track_scripts/setup-hashistack-client-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | until [ -f /opt/instruqt/bootstrap/host-bootstrap-completed ]; do 4 | echo "Waiting for instruqt bootstrap to complete" 5 | sleep 1 6 | done 7 | 8 | service boundary-controller stop 9 | service boundary-worker stop 10 | service waypoint stop 11 | systemctl disable boundary-controller 12 | systemctl disable boundary-worker 13 | systemctl disable waypoint 14 | 15 | rm /etc/systemd/resolved.conf.d/instruqt.conf 16 | 17 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-integration-with-vault/track_scripts/setup-hashistack-client-2: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | until [ -f /opt/instruqt/bootstrap/host-bootstrap-completed ]; do 4 | echo "Waiting for instruqt bootstrap to complete" 5 | sleep 1 6 | done 7 | 8 | service boundary-controller stop 9 | service boundary-worker stop 10 | service waypoint stop 11 | systemctl disable boundary-controller 12 | systemctl disable boundary-worker 13 | systemctl disable waypoint 14 | 15 | rm /etc/systemd/resolved.conf.d/instruqt.conf 16 | 17 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-integration-with-vault/track_scripts/setup-hashistack-server: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | until [ -f /opt/instruqt/bootstrap/host-bootstrap-completed ]; do 4 | echo "Waiting for instruqt bootstrap to complete" 5 | sleep 1 6 | done 7 | 8 | service boundary-controller stop 9 | service boundary-worker stop 10 | service waypoint stop 11 | systemctl disable boundary-controller 12 | systemctl disable boundary-worker 13 | systemctl disable waypoint 14 | 15 | rm /etc/systemd/resolved.conf.d/instruqt.conf 16 | 17 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-job-placement/Makefile: -------------------------------------------------------------------------------- 1 | ../../common/Makefile -------------------------------------------------------------------------------- /instruqt-tracks/nomad-job-placement/config.yml: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | 4 | version: "2" 5 | virtualmachines: 6 | - name: nomad-server-1 7 | image: instruqt-hashicorp/hashistack-2004-0-13-1 8 | shell: /bin/bash -l 9 | environment: 10 | CONSUL_HTTP_ADDR: nomad-server-1:8500 11 | machine_type: n1-standard-1 12 | - name: nomad-client-1 13 | image: instruqt-hashicorp/hashistack-2004-0-13-1 14 | shell: /bin/bash -l 15 | environment: 16 | CONSUL_HTTP_ADDR: nomad-client-1:8500 17 | machine_type: n1-standard-2 18 | - name: nomad-client-2 19 | image: instruqt-hashicorp/hashistack-2004-0-13-1 20 | shell: /bin/bash -l 21 | environment: 22 | CONSUL_HTTP_ADDR: nomad-client-2:8500 23 | machine_type: n1-standard-1 24 | - name: nomad-client-3 25 | image: instruqt-hashicorp/hashistack-2004-0-13-1 26 | shell: /bin/bash -l 27 | environment: 28 | CONSUL_HTTP_ADDR: nomad-client-3:8500 29 | machine_type: n1-standard-1 30 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-job-placement/deploy-the-jobs/check-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | # 6 | grep -q "cd\s\+/root/nomad/jobs" /root/.bash_history || fail-message "You have not navigated to /root/nomad/jobs on the Nomad server yet." 7 | 8 | grep -q "nomad\s\+job\s\+run\s\+webapp.nomad" /root/.bash_history || fail-message "You have not run the webapp.nomad job on the Nomad server yet." 9 | 10 | grep -q "nomad\s\+job\s\+status\s\+webapp" /root/.bash_history || fail-message "You have not checked the status of the webapp job with the CLI on the Nomad server yet." 11 | 12 | grep -q "nomad\s\+job\s\+run\s\+traefik.nomad" /root/.bash_history || fail-message "You have not run the traefik.nomad job on the Nomad server yet." 13 | 14 | grep -q "nomad\s\+job\s\+status\s\+traefik" /root/.bash_history || fail-message "You have not checked the status of the traefik job on the Nomad server yet." 15 | 16 | exit 0 17 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-job-placement/deploy-the-jobs/solve-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=~/.bash_history 5 | set -o history 6 | 7 | # Change directory 8 | cd /root/nomad/jobs 9 | 10 | # Run webapp Job 11 | nomad job run webapp.nomad 12 | 13 | # Sleep 14 | sleep 60 15 | 16 | # Check webapp job Status 17 | nomad job status webapp 18 | 19 | # Run traefik Job 20 | nomad job run traefik.nomad 21 | 22 | # Sleep 23 | sleep 60 24 | 25 | # Check traefik Status 26 | nomad job status traefik 27 | 28 | exit 0 29 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-job-placement/use-affinity/setup-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | # Purge history 6 | rm /root/.bash_history 7 | touch /root/.bash_history 8 | 9 | exit 0 10 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-job-placement/use-affinity/solve-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=~/.bash_history 5 | set -o history 6 | 7 | # Change directory 8 | cd /root/nomad/jobs 9 | 10 | # Edit the webapp.nomad job specification 11 | sed -i '6,9d' webapp.nomad 12 | sed -i 's/count = 6/count = 6\n\n\t\taffinity {\n\t\t\tattribute = "${attr.platform.gce.machine-type}"\n\t\t\tvalue = "n1-standard-2"\n\t\t\tweight = 100\n\t\t}/g' webapp.nomad 13 | 14 | # Stop and purge the webapp job 15 | nomad job stop -purge webapp 16 | 17 | # Re-run webapp Job 18 | nomad job run webapp.nomad 19 | 20 | # Sleep 21 | sleep 60 22 | 23 | # Check webapp job Status 24 | nomad job status webapp 25 | 26 | # Determine the url of the first webapp allocation 27 | first_allocation=$(nomad job status webapp | grep Allocations -A2 | grep webapp | cut -d' ' -f1) 28 | 29 | # Get verbose details for the allocation 30 | nomad alloc status -verbose $first_allocation 31 | 32 | exit 0 33 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-job-placement/use-constraint/check-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | # 6 | grep -q "cd\s\+/root/nomad/jobs" /root/.bash_history || fail-message "You have not navigated to /root/nomad/jobs on the Nomad server yet." 7 | 8 | grep -q "constraint" /root/nomad/jobs/traefik.nomad || fail-message "You have not added the constraint stanza to the traefik.nomad job on the Nomad server yet." 9 | 10 | fgrep -q "\${node.unique.name}" /root/nomad/jobs/traefik.nomad || fail-message "You have not added '${node.unique.name}' to the constraint stanza yet." 11 | 12 | fgrep -q "client1" /root/nomad/jobs/traefik.nomad || fail-message "You have not added 'client1' to the constraint yet." 13 | 14 | grep -q "nomad\s\+job\s\+run\s\+traefik.nomad" /root/.bash_history || fail-message "You have not re-run the traefik.nomad job on the Nomad server yet." 15 | 16 | grep -q "curl\s\+http://nomad-client-1:8080/myapp" /root/.bash_history || fail-message "You have not used 'curl' against one of the web app allocations on the Nomad server yet." 17 | 18 | exit 0 19 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-job-placement/use-constraint/setup-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | # Purge history 6 | rm /root/.bash_history 7 | touch /root/.bash_history 8 | 9 | exit 0 10 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-job-placement/use-constraint/solve-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=~/.bash_history 5 | set -o history 6 | 7 | # Change directory 8 | cd /root/nomad/jobs 9 | 10 | # Add a constraint stanza 11 | sed -i 's/count = 1/count = 1\n\n\t\tconstraint {\n\t\t\tattribute = "${node.unique.name}"\n\t\t\tvalue = "client1"\n\t\t}/g' traefik.nomad 12 | 13 | # Re-run the traefik Job 14 | nomad job run traefik.nomad 15 | 16 | # Sleep 17 | sleep 60 18 | 19 | # Determine the url of the first webapp allocation 20 | first_allocation=$(nomad job status webapp | grep Allocations -A2 | grep webapp | cut -d' ' -f1) 21 | first_allocation_url=$(nomad alloc status $first_allocation | grep http | cut -d' ' -f12) 22 | 23 | # Curl the first webapp allocation 24 | curl $first_allocation_url 25 | 26 | # Curl via Traefik 27 | curl http://nomad-client-1:8080/myapp 28 | 29 | exit 0 30 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-job-placement/use-spread/check-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "cd\s\+/root/nomad/jobs" /root/.bash_history || fail-message "You have not navigated to /root/nomad/jobs on the Nomad server yet." 6 | 7 | grep -q "spread" /root/nomad/jobs/webapp.nomad || fail-message "You have not added the spread stanza to the traefik.nomad job on the Nomad server yet." 8 | 9 | fgrep -q "\${node.unique.name}" /root/nomad/jobs/webapp.nomad || fail-message "You have not added '${node.unique.name}' to the spread stanza yet." 10 | 11 | grep -q "nomad\s\+job\s\+run\s\+webapp.nomad" /root/.bash_history || fail-message "You have not re-run the webapp.nomad job on the Nomad server yet." 12 | 13 | exit 0 14 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-job-placement/use-spread/setup-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | # Purge history 6 | rm /root/.bash_history 7 | touch /root/.bash_history 8 | 9 | exit 0 10 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-job-placement/use-spread/solve-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=~/.bash_history 5 | set -o history 6 | 7 | # Change directory 8 | cd /root/nomad/jobs 9 | 10 | # Edit the webapp.nomad job specification 11 | sed -i 's/count = 6/count = 6\n\n\t\tspread {\n\t\t\tattribute = "${node.unique.name}"\n\t\t}/g' webapp.nomad 12 | 13 | # Re-run the webapp.nomad Job 14 | nomad job run webapp.nomad 15 | 16 | # Sleep 17 | sleep 60 18 | 19 | exit 0 20 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-job-placement/verify-nomad-cluster-health/check-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "consul\s\+members" /root/.bash_history || fail-message "You have not run 'consul members' on the Server yet." 6 | 7 | grep -q "nomad\s\+server\s\+members" /root/.bash_history || fail-message "You have not run 'nomad server members' on the Server yet." 8 | 9 | grep -q "nomad\s\+node\s\+status" /root/.bash_history || fail-message "You have not run 'nomad node status' on the Server yet." 10 | 11 | consul_clients=$(consul members | grep alive | wc -l) 12 | if [ $consul_clients -ne 4 ]; then 13 | echo "There are not 4 running Consul clients." 14 | exit 1 15 | fi 16 | 17 | nomad_servers=$(nomad server members | grep alive | wc -l) 18 | if [ $nomad_servers -ne 1 ]; then 19 | echo "The Nomad server is not running." 20 | exit 1 21 | fi 22 | 23 | nomad_clients=$(nomad node status | grep ready | wc -l) 24 | if [ $nomad_clients -ne 3 ]; then 25 | echo "There are not 3 running Nomad clients." 26 | exit 1 27 | fi 28 | 29 | exit 0 30 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-job-placement/verify-nomad-cluster-health/solve-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=/root/.bash_history 5 | set -o history 6 | 7 | # Run consul members 8 | consul members 9 | 10 | # Run nomad server members 11 | nomad server members 12 | 13 | # Run nomad node status 14 | nomad node status 15 | 16 | exit 0 17 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-monitoring/01-fabio-and-prometheus-jobs/check-nomad-server: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | # Check that the fabio.nomad job was run 6 | grep -q "nomad\s\+job\s\+run\s\+fabio.nomad" /root/.bash_history || fail-message "You haven't run the fabio.nomad job yet." 7 | 8 | # Check that the prometheus1.nomad job was run 9 | grep -q "nomad\s\+job\s\+run\s\+prometheus1.nomad" /root/.bash_history || fail-message "You haven't run the prometheus1.nomad job yet." 10 | 11 | # Check that the status of the fabio job was checked 12 | grep -q "nomad\s\+job\s\+status\s\+fabio" /root/.bash_history || fail-message "You haven't checked the status of the fabio job yet." 13 | 14 | # Check that the status of the prometheus job was checked 15 | grep -q "nomad\s\+job\s\+status\s\+prometheus" /root/.bash_history || fail-message "You haven't checked the status of the prometheus job yet." 16 | 17 | consul_clients=$(consul members | grep alive |wc -l) 18 | if [ $consul_clients -ne 4 ]; then 19 | fail-message "There are not 4 running Consul clients." 20 | fi 21 | 22 | nomad_servers=$(nomad server members | grep alive | wc -l) 23 | if [ $nomad_servers -ne 1 ]; then 24 | fail-message "There is not 1 running Nomad server." 25 | fi 26 | 27 | nomad_clients=$(nomad node status | grep ready | wc -l) 28 | if [ $nomad_clients -ne 3 ]; then 29 | fail-message "There are not 3 running Nomad clients." 30 | fi 31 | 32 | exit 0 33 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-monitoring/01-fabio-and-prometheus-jobs/solve-nomad-server: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=~/.bash_history 5 | set -o history 6 | 7 | # CD to nomad directory 8 | cd nomad 9 | 10 | # Run the fabio job 11 | nomad job run fabio.nomad 12 | 13 | # Run the prometheus1.nomad job 14 | nomad job run prometheus1.nomad 15 | 16 | # Check status of the fabio job 17 | nomad job status fabio 18 | 19 | # Check status of the prometheus job 20 | nomad job status prometheus 21 | 22 | # sleep 23 | sleep 60 24 | 25 | exit 0 26 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-monitoring/02-add-alertmanager/check-nomad-server: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | # Check that the alertmanager.nomad job was run 6 | grep -q "nomad\s\+job\s\+run\s\+alertmanager.nomad" /root/.bash_history || fail-message "You haven't run the alertmanager.nomad job yet." 7 | 8 | # Check that the status of the alertmanager job was checked 9 | grep -q "nomad\s\+job\s\+status\s\+alertmanager" /root/.bash_history || fail-message "You haven't checked the status of the alertmanager job yet." 10 | 11 | # Check that the prometheus2.nomad job was run 12 | grep -q "nomad\s\+job\s\+run\s\+prometheus2.nomad" /root/.bash_history || fail-message "You haven't run the prometheus2.nomad job yet." 13 | 14 | # Check that the status of the prometheus job was checked 15 | grep -q "nomad\s\+job\s\+status\s\+prometheus" /root/.bash_history || fail-message "You haven't re-checked the status of the prometheus job yet." 16 | 17 | exit 0 18 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-monitoring/02-add-alertmanager/setup-nomad-server: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | rm /root/.bash_history 6 | 7 | exit 0 8 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-monitoring/02-add-alertmanager/solve-nomad-server: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=~/.bash_history 5 | set -o history 6 | 7 | # cd to nomad directory 8 | cd nomad 9 | 10 | # Run the alertmanager.nomad job 11 | nomad job run alertmanager.nomad 12 | 13 | # Check the status of the alertmanager job 14 | nomad job status alertmanager 15 | 16 | # Run the prometheus2.nomad job 17 | nomad job run prometheus2.nomad 18 | 19 | # Check the status of the prometheus job 20 | nomad job status prometheus 21 | 22 | 23 | exit 0 24 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-monitoring/03-add-web-server/check-nomad-server: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | # Check that Nomad job was run for alertmanager 6 | grep -q "nomad\s\+job\s\+run\s\+webserver.nomad" /root/.bash_history || fail-message "You haven't run the webserver.nomad job yet." 7 | 8 | # Check that the status of the webserver job was checked 9 | grep -q "nomad\s\+job\s\+status\s\+webserver" /root/.bash_history || fail-message "You haven't checked the status of the webserver job yet." 10 | 11 | # Check that the webserver job was stopped 12 | grep -q "nomad\s\+job\s\+stop\s\+webserver" /root/.bash_history || fail-message "You haven't stopped the webserver job yet." 13 | 14 | # Check that the webserver job was re-run 15 | webserver_runs=$(grep -q "nomad job run webserver.nomad" /root/.bash_history | wc | cut -d " " -f 8) 16 | if [ "$webserver_runs" -ne "2" ]; then 17 | echo "You haven't re-started the webserver job yet." 18 | exit 19 | fi 20 | 21 | exit 0 22 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-monitoring/03-add-web-server/setup-nomad-server: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | rm /root/.bash_history 6 | 7 | exit 0 8 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-monitoring/03-add-web-server/solve-nomad-server: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=~/.bash_history 5 | set -o history 6 | 7 | # cd to nomad directory 8 | cd nomad 9 | 10 | # Run the webserver.nomad job 11 | nomad job run webserver.nomad 12 | 13 | # Check the status of the webserver job 14 | nomad job status webserver 15 | 16 | # Stop the webserver job 17 | nomad job stop webserver 18 | 19 | # Re-run the webserver job 20 | nomad job run webserver.nomad 21 | 22 | exit 0 23 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-monitoring/Makefile: -------------------------------------------------------------------------------- 1 | ../../common/Makefile -------------------------------------------------------------------------------- /instruqt-tracks/nomad-monitoring/config.yml: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | 4 | version: "3" 5 | virtualmachines: 6 | - name: nomad-server 7 | image: instruqt-hashicorp/hashistack-2004-0-13-1 8 | shell: /bin/bash -l 9 | environment: 10 | CONSUL_HTTP_ADDR: http://nomad-server:8500 11 | machine_type: n1-standard-1 12 | - name: nomad-client-1 13 | image: instruqt-hashicorp/hashistack-2004-0-13-1 14 | shell: /bin/bash -l 15 | environment: 16 | CONSUL_HTTP_ADDR: http://nomad-client-1:8500 17 | machine_type: n1-standard-1 18 | - name: nomad-client-2 19 | image: instruqt-hashicorp/hashistack-2004-0-13-1 20 | shell: /bin/bash -l 21 | environment: 22 | CONSUL_HTTP_ADDR: http://nomad-client-2:8500 23 | machine_type: n1-standard-1 24 | - name: nomad-client-3 25 | image: instruqt-hashicorp/hashistack-2004-0-13-1 26 | shell: /bin/bash -l 27 | environment: 28 | CONSUL_HTTP_ADDR: http://nomad-client-3:8500 29 | machine_type: n1-standard-1 30 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-multi-region-federation/01-federation/check-nomad-server-1-east: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "nomad.*server.*join.*4648" /root/.bash_history || fail-message "you have not joined your Nomad clusters on the Nomad_Server_East tab yet." 6 | 7 | # Check status of Nomad servers (after joining) 8 | nomad_servers=$(nomad server members | grep alive | wc -l) 9 | if [ "$nomad_servers" -lt "2" ]; then 10 | fail-message "The command 'nomad server members' on the Nomad_Server_East tab did not show two Nomad servers, one in each region. Something went wrong with the cluster setup or you did not yet join the clusters together." 11 | fi 12 | 13 | # Check status of Nomad clients 14 | nomad_clients=$(nomad node status | grep ready | wc -l) 15 | if [ "$nomad_clients" -lt "2" ]; then 16 | fail-message "The command 'nomad node status' did not show 2 ready Nomad clients in the east cluster. Something went wrong" 17 | fi 18 | 19 | exit 0 20 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-multi-region-federation/01-federation/check-nomad-server-1-west: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | # Check status of Nomad servers (after joining) 6 | nomad_servers=$(nomad server members | grep alive | wc -l) 7 | if [ "$nomad_servers" -lt "2" ]; then 8 | fail-message "The command 'nomad server members' on the Nomad_Server_West tab did not show two Nomad servers, one in each region. Something went wrong with the cluster setup or you did not yet join the clusters together." 9 | fi 10 | 11 | # Validate that Nomad clients are running 12 | nomad_clients=$(nomad node status | grep ready | wc -l) 13 | if [ "$nomad_clients" -lt "2" ]; then 14 | fail-message "The command 'nomad node status' did not show 2 ready Nomad clients in the West cluster. Something went wrong" 15 | fi 16 | 17 | exit 0 18 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-multi-region-federation/01-federation/solve-nomad-server-1-east: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | HISTFILE=/root/.bash_history 4 | set -o history 5 | 6 | # Check node status 7 | nomad node status 8 | 9 | # Check server status 10 | nomad server members 11 | 12 | # Check status of the west region 13 | nomad status -region=west 14 | 15 | exit 0 16 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-multi-region-federation/01-federation/solve-nomad-server-1-west: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | HISTFILE=/root/.bash_history 4 | set -o history 5 | 6 | # Sleep 7 | sleep 30 8 | 9 | # Check node status 10 | nomad node status 11 | 12 | # Check server status 13 | nomad server members 14 | 15 | # Get IP of server in west region 16 | server_ip=$(ip addr show ens4 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1) 17 | 18 | # SSH to the east server and join the clusters 19 | ssh -o StrictHostKeyChecking=no nomad-server-1-east << ENDSSH 20 | HISTFILE=/root/.bash_history 21 | set -o history 22 | nomad server join $server_ip:4648 23 | ENDSSH 24 | 25 | # Sleep 26 | sleep 15 27 | 28 | # Check servers again 29 | nomad server members 30 | 31 | # Check status for east region 32 | nomad status -region=east 33 | 34 | exit 0 35 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-multi-region-federation/02-multi-region-deployments/check-nomad-server-1-west: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "nomad.*job.*run.*/root/nomad/jobs/multi-redis.nomad" /root/.bash_history || fail-message "you have not run the multi-redis job on the Nomad_Server_West tab yet." 6 | 7 | job_status=$(nomad job status example | grep Multiregion -A4 | grep successful | wc -l) 8 | if [ "$job_status" -lt "2" ]; then 9 | fail-message "The example job's deployments are not successful in both regions yet." 10 | fi 11 | 12 | exit 0 13 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-multi-region-federation/02-multi-region-deployments/setup-nomad-server-1-east: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | mkdir /root/nomad/jobs 4 | 5 | # Write multi-region job spec 6 | cat <<-EOF > /root/nomad/jobs/multi-redis.nomad 7 | job "example" { 8 | 9 | multiregion { 10 | 11 | strategy { 12 | max_parallel = 1 13 | on_failure = "fail_all" 14 | } 15 | 16 | region "west" { 17 | count = 1 18 | datacenters = ["west-1"] 19 | } 20 | 21 | region "east" { 22 | count = 1 23 | datacenters = ["east-1"] 24 | } 25 | 26 | } 27 | 28 | update { 29 | max_parallel = 1 30 | min_healthy_time = "10s" 31 | healthy_deadline = "2m" 32 | progress_deadline = "3m" 33 | auto_revert = true 34 | auto_promote = true 35 | canary = 1 36 | } 37 | 38 | 39 | group "cache" { 40 | 41 | count = 0 42 | 43 | network { 44 | port "db" { 45 | to = 6379 46 | } 47 | } 48 | 49 | task "redis" { 50 | driver = "docker" 51 | 52 | config { 53 | image = "redis:6.0" 54 | ports = ["db"] 55 | } 56 | 57 | resources { 58 | cpu = 256 59 | memory = 128 60 | } 61 | } 62 | } 63 | } 64 | EOF 65 | 66 | exit 0 67 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-multi-region-federation/02-multi-region-deployments/setup-nomad-server-1-west: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | mkdir /root/nomad/jobs 4 | 5 | # Write multi-region job spec 6 | cat <<-EOF > /root/nomad/jobs/multi-redis.nomad 7 | job "example" { 8 | 9 | multiregion { 10 | 11 | strategy { 12 | max_parallel = 1 13 | on_failure = "fail_all" 14 | } 15 | 16 | region "west" { 17 | count = 1 18 | datacenters = ["west-1"] 19 | } 20 | 21 | region "east" { 22 | count = 1 23 | datacenters = ["east-1"] 24 | } 25 | 26 | } 27 | 28 | update { 29 | max_parallel = 1 30 | min_healthy_time = "10s" 31 | healthy_deadline = "2m" 32 | progress_deadline = "3m" 33 | auto_revert = true 34 | auto_promote = true 35 | canary = 1 36 | } 37 | 38 | 39 | group "cache" { 40 | 41 | count = 0 42 | 43 | network { 44 | port "db" { 45 | to = 6379 46 | } 47 | } 48 | 49 | task "redis" { 50 | driver = "docker" 51 | 52 | config { 53 | image = "redis:6.0" 54 | ports = ["db"] 55 | } 56 | 57 | resources { 58 | cpu = 256 59 | memory = 128 60 | } 61 | } 62 | } 63 | } 64 | EOF 65 | 66 | exit 0 67 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-multi-region-federation/02-multi-region-deployments/solve-nomad-server-1-west: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | HISTFILE=/root/.bash_history 4 | set -o history 5 | 6 | # Run the job 7 | nomad job run /root/nomad/jobs/multi-redis.nomad 8 | 9 | # Sleep 10 | sleep 60 11 | 12 | exit 0 13 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-multi-region-federation/03-simulate-failed-deployment/check-nomad-server-1-west: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | # Check that the multi-redis.nomad job was edited to contain fail_local 6 | grep -q "fail_local" /root/nomad/jobs/multi-redis.nomad || fail-message "You have not edited the multi-redis.nomad file to set on_failure to fail_local yet." 7 | 8 | # Check that the job was run again 9 | grep -q "nomad.*job.*run.*/root/nomad/jobs/multi-redis.nomad" /root/.bash_history || fail-message "you have not redeployed the multi-redis job on the Nomad_Server_West tab yet." 10 | 11 | # Check that the deployment in the west region was unblocked 12 | grep -q "nomad.*deployment.*unblock.*-region.*west" /root/.bash_history || fail-message "you have not unblocked the deployment of the job in the west region on the Nomad_Server_West tab yet." 13 | 14 | # Check that the job was stopped 15 | # grep -q "nomad.*job.*stop.*-global.*example" /root/.bash_history || fail-message "you have not stopped the job on the Nomad_Server_West tab yet." 16 | 17 | exit 0 18 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-multi-region-federation/03-simulate-failed-deployment/solve-nomad-server-1-west: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | HISTFILE=/root/.bash_history 4 | set -o history 5 | 6 | # Edit the job file 7 | sed -i "s/fail_all/fail_local/g" /root/nomad/jobs/multi-redis.nomad 8 | 9 | # Run the job again 10 | nomad job run /root/nomad/jobs/multi-redis.nomad 11 | 12 | # Sleep 13 | sleep 60 14 | 15 | # Get the deployment ID for the west region 16 | west_deployment_id=$(nomad job status example | grep Multiregion -A4 | grep blocked | cut -d' ' -f5) 17 | 18 | # Unblock the deployment in the west region 19 | nomad deployment unblock -region west $west_deployment_id 20 | 21 | # Sleep 22 | sleep 15 23 | 24 | # Stop and purge the job 25 | # nomad job stop -global example 26 | 27 | exit 0 28 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-multi-region-federation/Makefile: -------------------------------------------------------------------------------- 1 | ../../common/Makefile -------------------------------------------------------------------------------- /instruqt-tracks/nomad-multi-region-federation/track_scripts/setup-nomad-client-1-east: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euvo pipefail 4 | 5 | while [ ! -f /opt/instruqt/bootstrap/host-bootstrap-completed ] 6 | do 7 | echo "Waiting for Instruqt to finish booting the virtual machine" 8 | sleep 1 9 | done 10 | 11 | # Ensure we always have fresh copies of the Consul and Nomad 12 | # Enterprise licenses, which we store as Instruqt secrets 13 | 14 | # This trick with awk keeps the contents of the secret 15 | # environment variables from being logged 16 | echo "Writing license files" 17 | awk 'BEGIN {print ENVIRON["HC_CONSUL_LICENSE"]}' > /var/consul-license.hclic < /dev/null 18 | awk 'BEGIN {print ENVIRON["HC_NOMAD_LICENSE"]}' > /var/nomad-license.hclic < /dev/null 19 | 20 | exit 0 21 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-multi-region-federation/track_scripts/setup-nomad-client-1-west: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euvo pipefail 4 | 5 | while [ ! -f /opt/instruqt/bootstrap/host-bootstrap-completed ] 6 | do 7 | echo "Waiting for Instruqt to finish booting the virtual machine" 8 | sleep 1 9 | done 10 | 11 | # Ensure we always have fresh copies of the Consul and Nomad 12 | # Enterprise licenses, which we store as Instruqt secrets 13 | 14 | # This trick with awk keeps the contents of the secret 15 | # environment variables from being logged 16 | echo "Writing license files" 17 | awk 'BEGIN {print ENVIRON["HC_CONSUL_LICENSE"]}' > /var/consul-license.hclic < /dev/null 18 | awk 'BEGIN {print ENVIRON["HC_NOMAD_LICENSE"]}' > /var/nomad-license.hclic < /dev/null 19 | 20 | exit 0 21 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-multi-region-federation/track_scripts/setup-nomad-client-2-east: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euvo pipefail 4 | 5 | while [ ! -f /opt/instruqt/bootstrap/host-bootstrap-completed ] 6 | do 7 | echo "Waiting for Instruqt to finish booting the virtual machine" 8 | sleep 1 9 | done 10 | 11 | # Ensure we always have fresh copies of the Consul and Nomad 12 | # Enterprise licenses, which we store as Instruqt secrets 13 | 14 | # This trick with awk keeps the contents of the secret 15 | # environment variables from being logged 16 | echo "Writing license files" 17 | awk 'BEGIN {print ENVIRON["HC_CONSUL_LICENSE"]}' > /var/consul-license.hclic < /dev/null 18 | awk 'BEGIN {print ENVIRON["HC_NOMAD_LICENSE"]}' > /var/nomad-license.hclic < /dev/null 19 | 20 | exit 0 21 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-multi-region-federation/track_scripts/setup-nomad-client-2-west: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euvo pipefail 4 | 5 | while [ ! -f /opt/instruqt/bootstrap/host-bootstrap-completed ] 6 | do 7 | echo "Waiting for Instruqt to finish booting the virtual machine" 8 | sleep 1 9 | done 10 | 11 | # Ensure we always have fresh copies of the Consul and Nomad 12 | # Enterprise licenses, which we store as Instruqt secrets 13 | 14 | # This trick with awk keeps the contents of the secret 15 | # environment variables from being logged 16 | echo "Writing license files" 17 | awk 'BEGIN {print ENVIRON["HC_CONSUL_LICENSE"]}' > /var/consul-license.hclic < /dev/null 18 | awk 'BEGIN {print ENVIRON["HC_NOMAD_LICENSE"]}' > /var/nomad-license.hclic < /dev/null 19 | 20 | exit 0 21 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-multi-region-federation/track_scripts/setup-nomad-server-1-east: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euvo pipefail 4 | 5 | while [ ! -f /opt/instruqt/bootstrap/host-bootstrap-completed ] 6 | do 7 | echo "Waiting for Instruqt to finish booting the virtual machine" 8 | sleep 1 9 | done 10 | 11 | # Ensure we always have fresh copies of the Consul and Nomad 12 | # Enterprise licenses, which we store as Instruqt secrets 13 | 14 | # This trick with awk keeps the contents of the secret 15 | # environment variables from being logged 16 | echo "Writing license files" 17 | awk 'BEGIN {print ENVIRON["HC_CONSUL_LICENSE"]}' > /var/consul-license.hclic < /dev/null 18 | awk 'BEGIN {print ENVIRON["HC_NOMAD_LICENSE"]}' > /var/nomad-license.hclic < /dev/null 19 | 20 | exit 0 21 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-multi-region-federation/track_scripts/setup-nomad-server-1-west: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euvo pipefail 4 | 5 | while [ ! -f /opt/instruqt/bootstrap/host-bootstrap-completed ] 6 | do 7 | echo "Waiting for Instruqt to finish booting the virtual machine" 8 | sleep 1 9 | done 10 | 11 | # Ensure we always have fresh copies of the Consul and Nomad 12 | # Enterprise licenses, which we store as Instruqt secrets 13 | 14 | # This trick with awk keeps the contents of the secret 15 | # environment variables from being logged 16 | echo "Writing license files" 17 | awk 'BEGIN {print ENVIRON["HC_CONSUL_LICENSE"]}' > /var/consul-license.hclic < /dev/null 18 | awk 'BEGIN {print ENVIRON["HC_NOMAD_LICENSE"]}' > /var/nomad-license.hclic < /dev/null 19 | 20 | exit 0 21 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-multi-server-cluster/01-manual-clustering/check-nomad-client-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "nomad\s\+agent\s\+-config\s\+nomad-client1.hcl\s\+>\s\+nomad.log\s\+2>&1\s\+&" /root/.bash_history || fail-message "You haven't run your first Nomad client yet." 6 | 7 | exit 0 8 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-multi-server-cluster/01-manual-clustering/check-nomad-client-2: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "nomad\s\+agent\s\+-config\s\+nomad-client2.hcl\s\+>\s\+nomad.log\s\+2>&1\s\+&" /root/.bash_history || fail-message "You haven't run your second Nomad client yet." 6 | 7 | exit 0 8 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-multi-server-cluster/01-manual-clustering/check-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "nomad\s\+agent\s\+-config\s\+nomad-server1.hcl\s\+>\s\+nomad.log\s\+2>&1\s\+&" /root/.bash_history || fail-message "You haven't run your first Nomad server yet." 6 | 7 | nomad_servers=$(nomad server members | grep alive | wc -l) 8 | if [ $nomad_servers -ne 3 ]; then 9 | fail-message "There are not 3 running Nomad servers." 10 | fi 11 | 12 | nomad_clients=$(nomad node status | grep ready | wc -l) 13 | if [ $nomad_clients -ne 2 ]; then 14 | fail-message "There are not 2 running Nomad clients." 15 | fi 16 | 17 | 18 | exit 0 19 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-multi-server-cluster/01-manual-clustering/check-nomad-server-2: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "nomad\s\+agent\s\+-config\s\+nomad-server2.hcl\s\+>\s\+nomad.log\s\+2>&1\s\+&" /root/.bash_history || fail-message "You haven't run your second Nomad server yet." 6 | 7 | exit 0 8 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-multi-server-cluster/01-manual-clustering/check-nomad-server-3: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "nomad\s\+agent\s\+-config\s\+nomad-server3.hcl\s\+>\s\+nomad.log\s\+2>&1\s\+&" /root/.bash_history || fail-message "You haven't run your third Nomad server yet." 6 | 7 | grep -q "nomad\s\+server\s\+members" /root/.bash_history || fail-message "You haven't checked the members of your Nomad cluster yet. Please do this on the Server 3 tab." 8 | 9 | grep -q "nomad\s\+node\s\+status" /root/.bash_history || fail-message "You haven't checked the staus of your Nomad nodes (clients) yet. Please do this on the Server 3 tab." 10 | 11 | exit 0 12 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-multi-server-cluster/01-manual-clustering/setup-nomad-client-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | until [ -f /opt/instruqt/bootstrap/host-bootstrap-completed ]; do 4 | echo "Waiting for instruqt bootstrap to complete" 5 | sleep 1 6 | done 7 | 8 | # Uncomment out mapping of 127.0.0.1 to hostname in /etc/hosts and options in /etc/resolv.conf 9 | sed -i "/${HOSTNAME}/d" /etc/hosts 10 | sed -i '/options/d' /etc/resolv.conf 11 | 12 | # Some pings to estabish connectivity 13 | ping -c 1 nomad-server-1 14 | ping -c 1 nomad-server-2 15 | ping -c 1 nomad-server-3 16 | ping -c 1 nomad-client-1 17 | ping -c 1 nomad-client-2 18 | 19 | mkdir nomad 20 | 21 | # Write Nomad Client 1 Config 22 | cat <<-EOF > /root/nomad/nomad-client1.hcl 23 | # Setup data dir 24 | data_dir = "/tmp/nomad/client1" 25 | 26 | # Give the agent a unique name. 27 | name = "client1" 28 | 29 | # Enable the client 30 | client { 31 | enabled = true 32 | servers = ["nomad-server-1"] 33 | } 34 | EOF 35 | 36 | exit 0 37 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-multi-server-cluster/01-manual-clustering/setup-nomad-client-2: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | until [ -f /opt/instruqt/bootstrap/host-bootstrap-completed ]; do 4 | echo "Waiting for instruqt bootstrap to complete" 5 | sleep 1 6 | done 7 | 8 | # Uncomment out mapping of 127.0.0.1 to hostname in /etc/hosts and options in /etc/resolv.conf 9 | sed -i "/${HOSTNAME}/d" /etc/hosts 10 | sed -i '/options/d' /etc/resolv.conf 11 | 12 | # Some pings to estabish connectivity 13 | ping -c 1 nomad-server-1 14 | ping -c 1 nomad-server-2 15 | ping -c 1 nomad-server-3 16 | ping -c 1 nomad-client-1 17 | ping -c 1 nomad-client-2 18 | 19 | mkdir nomad 20 | 21 | # Write Nomad Client 2 Config 22 | cat <<-EOF > /root/nomad/nomad-client2.hcl 23 | # Setup data dir 24 | data_dir = "/tmp/nomad/client2" 25 | 26 | # Give the agent a unique name. 27 | name = "client2" 28 | 29 | # Enable the client 30 | client { 31 | enabled = true 32 | servers = ["nomad-server-1"] 33 | } 34 | EOF 35 | 36 | exit 0 37 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-multi-server-cluster/01-manual-clustering/setup-nomad-server-2: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | until [ -f /opt/instruqt/bootstrap/host-bootstrap-completed ]; do 4 | echo "Waiting for instruqt bootstrap to complete" 5 | sleep 1 6 | done 7 | 8 | # Uncomment out mapping of 127.0.0.1 to hostname in /etc/hosts and options in /etc/resolv.conf 9 | sed -i "/${HOSTNAME}/d" /etc/hosts 10 | sed -i '/options/d' /etc/resolv.conf 11 | 12 | # Some pings to estabish connectivity 13 | ping -c 1 nomad-server-1 14 | ping -c 1 nomad-server-2 15 | ping -c 1 nomad-server-3 16 | ping -c 1 nomad-client-1 17 | ping -c 1 nomad-client-2 18 | 19 | mkdir nomad 20 | 21 | # Write Nomad Server Config 22 | cat <<-EOF > /root/nomad/nomad-server2.hcl 23 | # Setup data dir 24 | data_dir = "/tmp/nomad/server2" 25 | 26 | # Give the agent a unique name. 27 | name = "server2" 28 | 29 | # Enable the server 30 | server { 31 | enabled = true 32 | bootstrap_expect = 3 33 | server_join { 34 | retry_join = ["nomad-server-1"] 35 | } 36 | } 37 | EOF 38 | 39 | exit 0 40 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-multi-server-cluster/01-manual-clustering/setup-nomad-server-3: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | until [ -f /opt/instruqt/bootstrap/host-bootstrap-completed ]; do 4 | echo "Waiting for instruqt bootstrap to complete" 5 | sleep 1 6 | done 7 | 8 | # Uncomment out mapping of 127.0.0.1 to hostname in /etc/hosts and options in /etc/resolv.conf 9 | sed -i "/${HOSTNAME}/d" /etc/hosts 10 | sed -i '/options/d' /etc/resolv.conf 11 | 12 | # Some pings to estabish connectivity 13 | ping -c 1 nomad-server-1 14 | ping -c 1 nomad-server-2 15 | ping -c 1 nomad-server-3 16 | ping -c 1 nomad-client-1 17 | ping -c 1 nomad-client-2 18 | 19 | mkdir nomad 20 | 21 | # Write Nomad Server Config 22 | cat <<-EOF > /root/nomad/nomad-server3.hcl 23 | # Setup data dir 24 | data_dir = "/tmp/nomad/server3" 25 | 26 | # Give the agent a unique name. 27 | name = "server3" 28 | 29 | # Enable the server 30 | server { 31 | enabled = true 32 | bootstrap_expect = 3 33 | server_join { 34 | retry_join = ["nomad-server-1"] 35 | } 36 | } 37 | EOF 38 | 39 | exit 0 40 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-multi-server-cluster/01-manual-clustering/solve-nomad-client-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=~/.bash_history 5 | set -o history 6 | 7 | cd nomad 8 | 9 | # Run the Nomad agent 10 | nomad agent -config nomad-client1.hcl > nomad.log 2>&1 & 11 | 12 | exit 0 13 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-multi-server-cluster/01-manual-clustering/solve-nomad-client-2: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=~/.bash_history 5 | set -o history 6 | 7 | cd nomad 8 | 9 | # Run the Nomad agent 10 | nomad agent -config nomad-client2.hcl > nomad.log 2>&1 & 11 | 12 | exit 0 13 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-multi-server-cluster/01-manual-clustering/solve-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=~/.bash_history 5 | set -o history 6 | 7 | cd nomad 8 | 9 | # Run the Nomad server 10 | nomad agent -config nomad-server1.hcl > nomad.log 2>&1 & 11 | 12 | exit 0 13 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-multi-server-cluster/01-manual-clustering/solve-nomad-server-2: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=~/.bash_history 5 | set -o history 6 | 7 | cd nomad 8 | 9 | # Run the Nomad server 10 | nomad agent -config nomad-server2.hcl > nomad.log 2>&1 & 11 | 12 | exit 0 13 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-multi-server-cluster/01-manual-clustering/solve-nomad-server-3: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=~/.bash_history 5 | set -o history 6 | 7 | cd nomad 8 | 9 | # Run the Nomad server 10 | nomad agent -config nomad-server3.hcl > nomad.log 2>&1 & 11 | 12 | # Check the Nomad cluster members 13 | nomad server members 14 | 15 | # Check the Nomad nodes 16 | nomad node status 17 | 18 | # Sleep 19 | sleep 30 20 | 21 | exit 0 22 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-multi-server-cluster/02-automatic-clustering-with-consul/check-nomad-client-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "consul\s\+agent\s\+-config-file\s\+consul-client1.json\s\+>\s\+consul.log\s\+2>&1\s\+&" /root/.bash_history || fail-message "You haven't run your first Consul client yet." 6 | 7 | grep -q "nomad\s\+agent\s\+-config\s\+nomad-client1.hcl\s\+>\s\+nomad.log\s\+2>&1\s\+&" /root/.bash_history || fail-message "You haven't run your first Nomad client yet." 8 | 9 | exit 0 10 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-multi-server-cluster/02-automatic-clustering-with-consul/check-nomad-client-2: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "consul\s\+agent\s\+-config-file\s\+consul-client2.json\s\+>\s\+consul.log\s\+2>&1\s\+&" /root/.bash_history || fail-message "You haven't run your second Consul client yet." 6 | 7 | grep -q "nomad\s\+agent\s\+-config\s\+nomad-client2.hcl\s\+>\s\+nomad.log\s\+2>&1\s\+&" /root/.bash_history || fail-message "You haven't run your second Nomad client yet." 8 | 9 | exit 0 10 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-multi-server-cluster/02-automatic-clustering-with-consul/check-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "consul\s\+agent\s\+-config-file\s\+consul-server1.json\s\+>\s\+consul.log\s\+2>&1\s\+&" /root/.bash_history || fail-message "You haven't run your first Consul server yet." 6 | 7 | grep -q "nomad\s\+agent\s\+-config\s\+nomad-server1.hcl\s\+>\s\+nomad.log\s\+2>&1\s\+&" /root/.bash_history || fail-message "You haven't run your first Nomad server yet." 8 | 9 | consul_clients=$(consul members | grep alive |wc -l) 10 | if [ $consul_clients -ne 5 ]; then 11 | fail-message "There are not 5 running Consul clients." 12 | fi 13 | 14 | nomad_servers=$(nomad server members | grep alive | wc -l) 15 | if [ $nomad_servers -ne 3 ]; then 16 | fail-message "There are not 3 running Nomad servers." 17 | fi 18 | 19 | nomad_clients=$(nomad node status | grep ready | wc -l) 20 | if [ $nomad_clients -ne 2 ]; then 21 | fail-message "There are not 2 running Nomad clients." 22 | fi 23 | 24 | exit 0 25 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-multi-server-cluster/02-automatic-clustering-with-consul/check-nomad-server-2: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "consul\s\+agent\s\+-config-file\s\+consul-server2.json\s\+>\s\+consul.log\s\+2>&1\s\+&" /root/.bash_history || fail-message "You haven't run your second Consul server yet." 6 | 7 | grep -q "nomad\s\+agent\s\+-config\s\+nomad-server2.hcl\s\+>\s\+nomad.log\s\+2>&1\s\+&" /root/.bash_history || fail-message "You haven't run your second Nomad server yet." 8 | 9 | exit 0 10 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-multi-server-cluster/02-automatic-clustering-with-consul/check-nomad-server-3: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "consul\s\+agent\s\+-config-file\s\+consul-server3.json\s\+>\s\+consul.log\s\+2>&1\s\+&" /root/.bash_history || fail-message "You haven't run your third Consul server yet." 6 | 7 | grep -q "consul\s\+members" /root/.bash_history || fail-message "You haven't checked the members of your Consul cluster yet." 8 | 9 | grep -q "nomad\s\+agent\s\+-config\s\+nomad-server3.hcl\s\+>\s\+nomad.log\s\+2>&1\s\+&" /root/.bash_history || fail-message "You haven't run your third Nomad server yet." 10 | 11 | grep -q "nomad\s\+server\s\+members" /root/.bash_history || fail-message "You haven't checked the members of your Nomad cluster yet. Please do this on the Server 3 tab." 12 | 13 | grep -q "nomad\s\+node\s\+status" /root/.bash_history || fail-message "You haven't checked the staus of your Nomad nodes (clients) yet. Please do this on the Server 3 tab." 14 | 15 | exit 0 16 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-multi-server-cluster/02-automatic-clustering-with-consul/setup-nomad-client-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | pkill nomad 4 | rm -fr /tmp/nomad 5 | 6 | # Write Nomad Client 1 Config 7 | cat <<-EOF > /root/nomad/nomad-client1.hcl 8 | # Setup data dir 9 | data_dir = "/tmp/nomad/client1" 10 | 11 | # Give the agent a unique name. 12 | name = "client1" 13 | 14 | # Enable the client 15 | client { 16 | enabled = true 17 | } 18 | EOF 19 | 20 | # Write Consul Client 1 Config 21 | cat <<-EOF > /root/nomad/consul-client1.json 22 | { 23 | "ui": true, 24 | "log_level": "INFO", 25 | "data_dir": "/tmp/consul/client1", 26 | "node_name": "client1", 27 | "bind_addr": "{{ GetInterfaceIP \"ens4\" }}", 28 | "client_addr": "{{ GetInterfaceIP \"ens4\" }}", 29 | "retry_join": [ 30 | "nomad-server-1", 31 | "nomad-server-2", 32 | "nomad-server-3" 33 | ], 34 | "ports": { 35 | "grpc": 8502 36 | }, 37 | "connect": { 38 | "enabled": true 39 | } 40 | } 41 | EOF 42 | 43 | # Install CNI plugins 44 | curl -L -o cni-plugins.tgz https://github.com/containernetworking/plugins/releases/download/v0.8.3/cni-plugins-linux-amd64-v0.8.3.tgz 45 | mkdir -p /opt/cni/bin 46 | tar -C /opt/cni/bin -xzf cni-plugins.tgz 47 | 48 | # Configure iptables 49 | echo 1 > /proc/sys/net/bridge/bridge-nf-call-arptables 50 | echo 1 > /proc/sys/net/bridge/bridge-nf-call-ip6tables 51 | echo 1 > /proc/sys/net/bridge/bridge-nf-call-iptables 52 | 53 | exit 0 54 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-multi-server-cluster/02-automatic-clustering-with-consul/setup-nomad-client-2: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | pkill nomad 4 | rm -fr /tmp/nomad 5 | 6 | # Write Nomad Client 2 Config 7 | cat <<-EOF > /root/nomad/nomad-client2.hcl 8 | # Setup data dir 9 | data_dir = "/tmp/nomad/client2" 10 | 11 | # Give the agent a unique name 12 | name = "client2" 13 | 14 | # Enable the client 15 | client { 16 | enabled = true 17 | } 18 | EOF 19 | 20 | # Write Consul Client 2 Config 21 | cat <<-EOF > /root/nomad/consul-client2.json 22 | { 23 | "ui": true, 24 | "log_level": "INFO", 25 | "data_dir": "/tmp/consul/client2", 26 | "node_name": "client2", 27 | "bind_addr": "{{ GetInterfaceIP \"ens4\" }}", 28 | "client_addr": "{{ GetInterfaceIP \"ens4\" }}", 29 | "retry_join": [ 30 | "nomad-server-1", 31 | "nomad-server-2", 32 | "nomad-server-3" 33 | ], 34 | "ports": { 35 | "grpc": 8502 36 | }, 37 | "connect": { 38 | "enabled": true 39 | } 40 | } 41 | EOF 42 | 43 | # Install CNI plugins 44 | curl -L -o cni-plugins.tgz https://github.com/containernetworking/plugins/releases/download/v0.8.3/cni-plugins-linux-amd64-v0.8.3.tgz 45 | mkdir -p /opt/cni/bin 46 | tar -C /opt/cni/bin -xzf cni-plugins.tgz 47 | 48 | # Configure iptables 49 | echo 1 > /proc/sys/net/bridge/bridge-nf-call-arptables 50 | echo 1 > /proc/sys/net/bridge/bridge-nf-call-ip6tables 51 | echo 1 > /proc/sys/net/bridge/bridge-nf-call-iptables 52 | 53 | exit 0 54 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-multi-server-cluster/02-automatic-clustering-with-consul/setup-nomad-server-2: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | 4 | pkill nomad 5 | rm -fr /tmp/nomad 6 | 7 | # Write Nomad Server 2 Config 8 | cat <<-EOF > /root/nomad/nomad-server2.hcl 9 | # Setup data dir 10 | data_dir = "/tmp/nomad/server2" 11 | 12 | # Give the agent a unique name. 13 | name = "server2" 14 | 15 | # Enable the server 16 | server { 17 | enabled = true 18 | bootstrap_expect = 3 19 | } 20 | EOF 21 | 22 | # Write Consul Server 2 Config 23 | cat <<-EOF > /root/nomad/consul-server2.json 24 | { 25 | "server": true, 26 | "ui": true, 27 | "log_level": "INFO", 28 | "data_dir": "/tmp/consul/server2", 29 | "node_name": "server2", 30 | "bind_addr": "{{ GetInterfaceIP \"ens4\" }}", 31 | "client_addr": "{{ GetInterfaceIP \"ens4\" }}", 32 | "bootstrap_expect": 3, 33 | "retry_join": [ 34 | "nomad-server-1", 35 | "nomad-server-3" 36 | ], 37 | "ports": { 38 | "grpc": 8502 39 | }, 40 | "connect": { 41 | "enabled": true 42 | } 43 | } 44 | EOF 45 | 46 | # Install CNI plugins 47 | curl -L -o cni-plugins.tgz https://github.com/containernetworking/plugins/releases/download/v0.8.3/cni-plugins-linux-amd64-v0.8.3.tgz 48 | mkdir -p /opt/cni/bin 49 | tar -C /opt/cni/bin -xzf cni-plugins.tgz 50 | 51 | # Configure iptables 52 | echo 1 > /proc/sys/net/bridge/bridge-nf-call-arptables 53 | echo 1 > /proc/sys/net/bridge/bridge-nf-call-ip6tables 54 | echo 1 > /proc/sys/net/bridge/bridge-nf-call-iptables 55 | 56 | exit 0 57 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-multi-server-cluster/02-automatic-clustering-with-consul/setup-nomad-server-3: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | 4 | pkill nomad 5 | rm -fr /tmp/nomad 6 | 7 | # Write Nomad Server 3 Config 8 | cat <<-EOF > /root/nomad/nomad-server3.hcl 9 | # Setup data dir 10 | data_dir = "/tmp/nomad/server3" 11 | 12 | # Give the agent a unique name. 13 | name = "server3" 14 | 15 | # Enable the server 16 | server { 17 | enabled = true 18 | bootstrap_expect = 3 19 | } 20 | EOF 21 | 22 | # Write Consul Server 3 Config 23 | cat <<-EOF > /root/nomad/consul-server3.json 24 | { 25 | "server": true, 26 | "ui": true, 27 | "log_level": "INFO", 28 | "data_dir": "/tmp/consul/server3", 29 | "node_name": "server3", 30 | "bind_addr": "{{ GetInterfaceIP \"ens4\" }}", 31 | "client_addr": "{{ GetInterfaceIP \"ens4\" }}", 32 | "bootstrap_expect": 3, 33 | "retry_join": [ 34 | "nomad-server-1", 35 | "nomad-server-2" 36 | ], 37 | "ports": { 38 | "grpc": 8502 39 | }, 40 | "connect": { 41 | "enabled": true 42 | } 43 | } 44 | EOF 45 | 46 | # Install CNI plugins 47 | curl -L -o cni-plugins.tgz https://github.com/containernetworking/plugins/releases/download/v0.8.3/cni-plugins-linux-amd64-v0.8.3.tgz 48 | mkdir -p /opt/cni/bin 49 | tar -C /opt/cni/bin -xzf cni-plugins.tgz 50 | 51 | # Configure iptables 52 | echo 1 > /proc/sys/net/bridge/bridge-nf-call-arptables 53 | echo 1 > /proc/sys/net/bridge/bridge-nf-call-ip6tables 54 | echo 1 > /proc/sys/net/bridge/bridge-nf-call-iptables 55 | 56 | exit 0 57 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-multi-server-cluster/02-automatic-clustering-with-consul/solve-nomad-client-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=~/.bash_history 5 | set -o history 6 | 7 | cd nomad 8 | 9 | # Run the Consul agent 10 | consul agent -config-file consul-client1.json > consul.log 2>&1 & 11 | 12 | # Sleep 13 | sleep 15 14 | 15 | # Run the Nomad agent 16 | nomad agent -config nomad-client1.hcl > nomad.log 2>&1 & 17 | 18 | exit 0 19 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-multi-server-cluster/02-automatic-clustering-with-consul/solve-nomad-client-2: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=~/.bash_history 5 | set -o history 6 | 7 | cd nomad 8 | 9 | # Run the Consul agent 10 | consul agent -config-file consul-client2.json > consul.log 2>&1 & 11 | 12 | # Sleep 13 | sleep 15 14 | 15 | # Run the Nomad agent 16 | nomad agent -config nomad-client2.hcl > nomad.log 2>&1 & 17 | 18 | exit 0 19 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-multi-server-cluster/02-automatic-clustering-with-consul/solve-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=~/.bash_history 5 | set -o history 6 | 7 | cd nomad 8 | 9 | # Run the Consul agent 10 | consul agent -config-file consul-server1.json > consul.log 2>&1 & 11 | 12 | # Sleep 13 | sleep 15 14 | 15 | # Run the Nomad server 16 | nomad agent -config nomad-server1.hcl > nomad.log 2>&1 & 17 | 18 | exit 0 19 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-multi-server-cluster/02-automatic-clustering-with-consul/solve-nomad-server-2: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=~/.bash_history 5 | set -o history 6 | 7 | cd nomad 8 | 9 | # Run the Consul agent 10 | consul agent -config-file consul-server2.json > consul.log 2>&1 & 11 | 12 | # Sleep 13 | sleep 15 14 | 15 | # Run the Nomad server 16 | nomad agent -config nomad-server2.hcl > nomad.log 2>&1 & 17 | 18 | exit 0 19 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-multi-server-cluster/02-automatic-clustering-with-consul/solve-nomad-server-3: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=~/.bash_history 5 | set -o history 6 | 7 | cd nomad 8 | 9 | # Run the Consul agent 10 | consul agent -config-file consul-server3.json > consul.log 2>&1 & 11 | 12 | # Sleep 13 | sleep 15 14 | 15 | # Check the members of the Consul cluster 16 | consul members 17 | 18 | # Run the Nomad server 19 | nomad agent -config nomad-server3.hcl > nomad.log 2>&1 & 20 | 21 | # Sleep 22 | sleep 30 23 | 24 | # Check the Nomad cluster members 25 | nomad server members 26 | 27 | # Check the Nomad nodes 28 | nomad node status 29 | 30 | exit 0 31 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-multi-server-cluster/03-nomad-and-consul-connect/check-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "nomad\s\+job\s\+run\s\+connect.nomad" /root/.bash_history || fail-message "You haven't run the connect.nomad job yet." 6 | 7 | # Check that countdash job is running correctly 8 | nomad_allocations=$(nomad job status countdash | grep Healthy -A3 | grep "1 1 1" | wc -l) 9 | if [ $nomad_allocations -ne 2 ]; then 10 | fail-message "The countdash job does not have 2 healthy allocations." 11 | fi 12 | 13 | exit 0 14 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-multi-server-cluster/03-nomad-and-consul-connect/setup-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | # Write connect.nomad to /root/nomad/connect.nomad 4 | cat <<-EOF > /root/nomad/connect.nomad 5 | job "countdash" { 6 | datacenters = ["dc1"] 7 | 8 | group "api" { 9 | network { 10 | mode = "bridge" 11 | } 12 | 13 | service { 14 | name = "count-api" 15 | port = "9001" 16 | 17 | connect { 18 | sidecar_service {} 19 | } 20 | } 21 | 22 | task "web" { 23 | driver = "docker" 24 | 25 | config { 26 | image = "hashicorpnomad/counter-api:v1" 27 | } 28 | } 29 | } 30 | 31 | group "dashboard" { 32 | network { 33 | mode = "bridge" 34 | 35 | port "http" { 36 | static = 9002 37 | to = 9002 38 | } 39 | } 40 | 41 | service { 42 | name = "count-dashboard" 43 | port = "9002" 44 | 45 | connect { 46 | sidecar_service { 47 | proxy { 48 | upstreams { 49 | destination_name = "count-api" 50 | local_bind_port = 8080 51 | } 52 | } 53 | } 54 | } 55 | } 56 | 57 | task "dashboard" { 58 | driver = "docker" 59 | 60 | env { 61 | COUNTING_SERVICE_URL = "http://\${NOMAD_UPSTREAM_ADDR_count_api}" 62 | } 63 | 64 | config { 65 | image = "hashicorpnomad/counter-dashboard:v1" 66 | } 67 | } 68 | } 69 | } 70 | EOF 71 | 72 | exit 0 73 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-multi-server-cluster/03-nomad-and-consul-connect/solve-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=~/.bash_history 5 | set -o history 6 | 7 | cd nomad 8 | 9 | # Run the connect.nomad Job 10 | nomad job run connect.nomad 11 | 12 | # sleep 13 | sleep 60 14 | 15 | exit 0 16 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-multi-server-cluster/Makefile: -------------------------------------------------------------------------------- 1 | ../../common/Makefile -------------------------------------------------------------------------------- /instruqt-tracks/nomad-multi-server-cluster/config.yml: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | 4 | version: "3" 5 | virtualmachines: 6 | - name: nomad-server-1 7 | image: instruqt-hashicorp/hashistack-2004-0-13-1 8 | shell: /bin/bash -l 9 | environment: 10 | CONSUL_HTTP_ADDR: nomad-server-1:8500 11 | machine_type: n1-standard-1 12 | - name: nomad-server-2 13 | image: instruqt-hashicorp/hashistack-2004-0-13-1 14 | shell: /bin/bash -l 15 | environment: 16 | CONSUL_HTTP_ADDR: nomad-server-2:8500 17 | machine_type: n1-standard-1 18 | - name: nomad-server-3 19 | image: instruqt-hashicorp/hashistack-2004-0-13-1 20 | shell: /bin/bash -l 21 | environment: 22 | CONSUL_HTTP_ADDR: nomad-server-3:8500 23 | machine_type: n1-standard-1 24 | - name: nomad-client-1 25 | image: instruqt-hashicorp/hashistack-2004-0-13-1 26 | shell: /bin/bash -l 27 | environment: 28 | CONSUL_HTTP_ADDR: nomad-client-1:8500 29 | machine_type: n1-standard-1 30 | - name: nomad-client-2 31 | image: instruqt-hashicorp/hashistack-2004-0-13-1 32 | shell: /bin/bash -l 33 | environment: 34 | CONSUL_HTTP_ADDR: nomad-client-2:8500 35 | machine_type: n1-standard-1 36 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-simple-cluster/Makefile: -------------------------------------------------------------------------------- 1 | ../../common/Makefile -------------------------------------------------------------------------------- /instruqt-tracks/nomad-simple-cluster/config.yml: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | 4 | version: "2" 5 | virtualmachines: 6 | - name: nomad-server 7 | image: instruqt-hashicorp/hashistack-2004-0-13-1 8 | shell: /bin/bash -l 9 | machine_type: n1-standard-1 10 | - name: nomad-client-1 11 | image: instruqt-hashicorp/hashistack-2004-0-13-1 12 | shell: /bin/bash -l 13 | machine_type: n1-standard-1 14 | - name: nomad-client-2 15 | image: instruqt-hashicorp/hashistack-2004-0-13-1 16 | shell: /bin/bash -l 17 | machine_type: n1-standard-1 18 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-simple-cluster/create-first-job-spec/check-nomad-server: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "nomad\s\+job\s\+init\s\+-short" /root/.bash_history || fail-message "You have not created the sample Nomad job specification file yet." 6 | 7 | exit 0 8 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-simple-cluster/create-first-job-spec/solve-nomad-server: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=~/.bash_history 5 | set -o history 6 | 7 | # CD to the Nomad directory 8 | cd nomad 9 | 10 | # Create a new job specification file 11 | nomad job init -short 12 | 13 | exit 0 14 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-simple-cluster/modifying-a-job/check-nomad-server: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "count\s\+=\s\+3" /root/nomad/example.nomad || fail-message "You have not set the count to 3 in example.nomad yet." 6 | 7 | grep -q "cd\s\+nomad" /root/.bash_history || fail-message "You have not navigated to the nomad directory yet." 8 | 9 | grep -q "nomad\s\+job\s\+plan\s\+example.nomad" /root/.bash_history || fail-message "You have not planned the Nomad job yet." 10 | 11 | grep -q "nomad\s\+job\s\+run\s\+-check-index.*example.nomad" /root/.bash_history || fail-message "You have not run the planned Nomad job yet or forgot to use the '-check-index' argument." 12 | 13 | grep -q "nomad\s\+job\s\+stop\s\+example" /root/.bash_history || fail-message "You have not stopped the job with the Nomad CLI yet." 14 | 15 | exit 0 16 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-simple-cluster/modifying-a-job/solve-nomad-server: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=~/.bash_history 5 | set -o history 6 | 7 | # CD to the Nomad directory 8 | cd nomad 9 | 10 | # Change count to 3 in example.nomad 11 | sed -i 's/task/count = 3\n\t\ttask/g' example.nomad 12 | 13 | # Plan the job 14 | nomad job plan example.nomad > planjob.log 15 | 16 | # Extract the job modify index 17 | job_modify_index=$(sed -n 1p planjob.log | cut -d'"' -f2) 18 | 19 | # Run the job 20 | nomad job run -check-index $job_modify_index example.nomad 21 | 22 | # Stop the job 23 | nomad job stop example 24 | 25 | exit 0 26 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-simple-cluster/run-the-server-and-clients/check-nomad-client-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "nomad\s\+agent\s\+-config\s\+client1.hcl\s\+>\s\+nomad.log\s\+2>&1\s\+&" /root/.bash_history || fail-message "You haven't run your first Nomad client yet." 6 | 7 | exit 0 8 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-simple-cluster/run-the-server-and-clients/check-nomad-client-2: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "nomad\s\+agent\s\+-config\s\+client2.hcl\s\+>\s\+nomad.log\s\+2>&1\s\+&" /root/.bash_history || fail-message "You haven't run your second Nomad client yet." 6 | 7 | exit 0 8 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-simple-cluster/run-the-server-and-clients/check-nomad-server: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "nomad\s\+agent\s\+-config\s\+server.hcl\s\+>\s\+nomad.log\s\+2>&1\s\+&" /root/.bash_history || fail-message "You haven't run your Nomad server yet." 6 | 7 | grep -q "nomad\s\+server\s\+members" /root/.bash_history || fail-message "You haven't checked the members of your Nomad cluster yet." 8 | 9 | grep -q "nomad\s\+node\s\+status" /root/.bash_history || fail-message "You haven't checked the staus of your Nomad nodes (clients) yet." 10 | 11 | nomad_servers=$(nomad server members | grep alive | wc -l) 12 | if [ $nomad_servers -ne 1 ]; then 13 | fail-message "There is not 1 running Nomad server." 14 | fi 15 | 16 | nomad_clients=$(nomad node status | grep ready | wc -l) 17 | if [ $nomad_clients -ne 2 ]; then 18 | fail-message "There are not 2 running Nomad clients." 19 | fi 20 | 21 | exit 0 22 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-simple-cluster/run-the-server-and-clients/setup-nomad-client-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | # Uncomment out mapping of 127.0.0.1 to hostname in /etc/hosts and options in /etc/resolv.conf 4 | sed -i "/${HOSTNAME}/d" /etc/hosts 5 | sed -i '/options/d' /etc/resolv.conf 6 | 7 | # Some pings to estabish connectivity 8 | ping -c 1 nomad-server 9 | ping -c 1 nomad-client-1 10 | ping -c 1 nomad-client-2 11 | 12 | mkdir nomad 13 | 14 | # Write Nomad Client 1 Config 15 | cat <<-EOF > /root/nomad/client1.hcl 16 | # Setup data dir 17 | data_dir = "/tmp/nomad/client1" 18 | 19 | # Give the agent a unique name. Defaults to hostname 20 | name = "client1" 21 | 22 | # Enable the client 23 | client { 24 | enabled = true 25 | servers = ["nomad-server"] 26 | } 27 | EOF 28 | 29 | exit 0 30 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-simple-cluster/run-the-server-and-clients/setup-nomad-client-2: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | # Uncomment out mapping of 127.0.0.1 to hostname in /etc/hosts and options in /etc/resolv.conf 4 | sed -i "/${HOSTNAME}/d" /etc/hosts 5 | sed -i '/options/d' /etc/resolv.conf 6 | 7 | # Some pings to estabish connectivity 8 | ping -c 1 nomad-server 9 | ping -c 1 nomad-client-1 10 | ping -c 1 nomad-client-2 11 | 12 | mkdir nomad 13 | 14 | # Write Nomad Client 2 Config 15 | cat <<-EOF > /root/nomad/client2.hcl 16 | # Setup data dir 17 | data_dir = "/tmp/nomad/client2" 18 | 19 | # Give the agent a unique name. Defaults to hostname 20 | name = "client2" 21 | 22 | # Enable the client 23 | client { 24 | enabled = true 25 | servers = ["nomad-server"] 26 | } 27 | EOF 28 | 29 | exit 0 30 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-simple-cluster/run-the-server-and-clients/solve-nomad-client-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=~/.bash_history 5 | set -o history 6 | 7 | # CD to nomad directory 8 | cd nomad 9 | 10 | # Run the Nomad agent 11 | nomad agent -config client1.hcl > nomad.log 2>&1 & 12 | 13 | exit 0 14 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-simple-cluster/run-the-server-and-clients/solve-nomad-client-2: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=~/.bash_history 5 | set -o history 6 | 7 | # CD to nomad directory 8 | cd nomad 9 | 10 | # Run the Nomad agent 11 | nomad agent -config client2.hcl > nomad.log 2>&1 & 12 | 13 | exit 0 14 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-simple-cluster/run-the-server-and-clients/solve-nomad-server: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=~/.bash_history 5 | set -o history 6 | 7 | # cd to nomad directory 8 | cd nomad 9 | 10 | # Run the Nomad server 11 | nomad agent -config server.hcl > nomad.log 2>&1 & 12 | 13 | # Sleep 14 | sleep 30 15 | 16 | # Check the Nomad cluster members 17 | nomad server members 18 | 19 | # Check the Nomad nodes 20 | nomad node status 21 | 22 | # Sleep 23 | sleep 30 24 | 25 | exit 0 26 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-simple-cluster/run-your-first-job/check-nomad-server: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "cd\s\+nomad" /root/.bash_history || fail-message "You have not navigated to the nomad directory yet." 6 | 7 | grep -q "nomad\s\+job\s\+run\s\+example.nomad" /root/.bash_history || fail-message "You have not run the Nomad job yet." 8 | 9 | grep -q "nomad\s\+job\s\+status\s\+example" /root/.bash_history || fail-message "You have not checked the status of the Nomad job yet." 10 | 11 | grep -q "nomad\s\+eval\s\+status\s\+.*" /root/.bash_history || fail-message "You have not checked the status of the Nomad job's evaluation yet." 12 | 13 | grep -q "nomad\s\+alloc\s\+status\s\+.*" /root/.bash_history || fail-message "You have not checked the status of the Nomad job's allocation yet." 14 | 15 | grep -q "nomad\s\+alloc\s\+logs\s\+.*\s\+redis" /root/.bash_history || fail-message "You have not inspected the logs of the redis task of the Nomad job yet." 16 | 17 | # Check that countdash job is running correctly 18 | nomad_allocations=$(nomad job status example | grep Healthy -A3 | grep "1 1 1" | wc -l) 19 | if [ $nomad_allocations -ne 1 ]; then 20 | fail-message "The example job does not have 1 healthy allocation." 21 | fi 22 | 23 | exit 0 24 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-simple-cluster/run-your-first-job/solve-nomad-server: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=~/.bash_history 5 | set -o history 6 | 7 | # CD to the Nomad directory 8 | cd nomad 9 | 10 | # Run the job 11 | nomad job run example.nomad > runjob.log 12 | 13 | # Sleep 14 | sleep 60 15 | 16 | # Extract evaluation and allocation IDs 17 | eval_id=$(sed -n 1p runjob.log | cut -d'"' -f2) 18 | alloc_id=$(sed -n 3p runjob.log | cut -d'"' -f2) 19 | 20 | # Check the job status 21 | nomad job status example 22 | 23 | # Check the evaluation status 24 | nomad eval status $eval_id 25 | 26 | # Check the allocation status 27 | nomad alloc status $alloc_id 28 | 29 | # Inspect the redis task logs from the job 30 | nomad alloc logs $alloc_id redis 31 | 32 | exit 0 33 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-update-strategies/Makefile: -------------------------------------------------------------------------------- 1 | ../../common/Makefile -------------------------------------------------------------------------------- /instruqt-tracks/nomad-update-strategies/blue-green-deployment/check-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "cd\s\+/root/nomad/jobs" /root/.bash_history || fail-message "You have not navigated to /root/nomad/jobs on the Nomad server yet." 6 | 7 | grep -q "nomad\s\+job\s\+plan\s\+chat-app-light-docker.nomad" /root/.bash_history || fail-message "You have not planned the blue/green deployment of the chat-app job on the Nomad server yet." 8 | 9 | grep -q "nomad\s\+job\s\+run.*chat-app-light-docker.nomad" /root/.bash_history || fail-message "You have not run the blue/green deployment of the chat-app job on the Nomad server yet." 10 | 11 | grep -q "nomad\s\+job\s\+status\s\+chat-app" /root/.bash_history || fail-message "You have not checked the status of the chat-app job on the Nomad server yet." 12 | 13 | # Comment this out to allow Nomad UI to be used 14 | # grep -q "nomad\s\+deployment\s\+promote" /root/.bash_history || fail-message "You have not promoted the blue/green deployment of the chat-app job on the Nomad server yet." 15 | 16 | exit 0 17 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-update-strategies/blue-green-deployment/solve-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=~/.bash_history 5 | set -o history 6 | 7 | # Change directory 8 | cd /root/nomad/jobs 9 | 10 | # Plan a blue/green deployment of the chat-app job 11 | nomad job plan chat-app-light-docker.nomad 12 | 13 | # Run a blue/green deployment of the chat-app job 14 | nomad job run -detach chat-app-light-docker.nomad 15 | 16 | # Sleep 17 | sleep 180 18 | 19 | # Check the status of the blue/green deployment 20 | nomad job status chat-app > blue-green-status.txt 21 | 22 | # Determine the ID of the active deployment 23 | blue_green_deployment_id=$(cat blue-green-status.txt | grep "Latest Deployment" -A1 | grep ID | cut -d'=' -f2 | cut -d' ' -f2) 24 | 25 | # Promote the deployment 26 | nomad deployment promote $blue_green_deployment_id 27 | 28 | exit 0 29 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-update-strategies/canary-deployment/check-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "cd\s\+/root/nomad/jobs" /root/.bash_history || fail-message "You have not navigated to /root/nomad/jobs on the Nomad server yet." 6 | 7 | grep -q "nomad\s\+job\s\+plan\s\+chat-app-dark-docker.nomad" /root/.bash_history || fail-message "You have not planned the canary deployment of the chat-app job on the Nomad server yet." 8 | 9 | grep -q "nomad\s\+job\s\+run.*chat-app-dark-docker.nomad" /root/.bash_history || fail-message "You have not run the canary deployment of the chat-app job on the Nomad server yet." 10 | 11 | grep -q "nomad\s\+job\s\+status\s\+chat-app" /root/.bash_history || fail-message "You have not checked the status of the canary deployment of the chat-app job on the Nomad server yet." 12 | 13 | # Comment out so Nomad UI can also be used. 14 | #grep -q "nomad\s\+deployment\s\+fail" /root/.bash_history || fail-message "You have not failed the canary deployment of the chat-app job on the Nomad server yet." 15 | 16 | exit 0 17 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-update-strategies/canary-deployment/solve-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=~/.bash_history 5 | set -o history 6 | 7 | # Change directory 8 | cd /root/nomad/jobs 9 | 10 | # Plan the job 11 | nomad job plan chat-app-dark-docker.nomad 12 | 13 | # Run the job 14 | nomad job run -detach chat-app-dark-docker.nomad 15 | 16 | # Sleep 17 | sleep 120 18 | 19 | # Check the status 20 | nomad job status chat-app > canary-status.txt 21 | 22 | # Determine the ID of the active deployment 23 | canary_deployment_id=$(cat canary-status.txt | grep "Latest Deployment" -A1 | grep ID | cut -d'=' -f2 | cut -d' ' -f2) 24 | 25 | # Roll back the deployment 26 | nomad deployment fail $canary_deployment_id 27 | 28 | exit 0 29 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-update-strategies/config.yml: -------------------------------------------------------------------------------- 1 | # Copyright (c) HashiCorp, Inc. 2 | # SPDX-License-Identifier: MPL-2.0 3 | 4 | version: "2" 5 | virtualmachines: 6 | - name: nomad-server-1 7 | image: instruqt-hashicorp/hashistack-2004-0-13-1 8 | shell: /bin/bash -l 9 | environment: 10 | CONSUL_HTTP_ADDR: nomad-server-1:8500 11 | machine_type: n1-standard-1 12 | - name: nomad-client-1 13 | image: instruqt-hashicorp/hashistack-2004-0-13-1 14 | shell: /bin/bash -l 15 | environment: 16 | CONSUL_HTTP_ADDR: nomad-client-1:8500 17 | machine_type: n1-standard-1 18 | - name: nomad-client-2 19 | image: instruqt-hashicorp/hashistack-2004-0-13-1 20 | shell: /bin/bash -l 21 | environment: 22 | CONSUL_HTTP_ADDR: nomad-client-2:8500 23 | machine_type: n1-standard-1 24 | - name: nomad-client-3 25 | image: instruqt-hashicorp/hashistack-2004-0-13-1 26 | shell: /bin/bash -l 27 | environment: 28 | CONSUL_HTTP_ADDR: nomad-client-3:8500 29 | machine_type: n1-standard-1 30 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-update-strategies/deploy-the-jobs/check-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "cd\s\+/root/nomad/jobs" /root/.bash_history || fail-message "You have not navigated to /root/nomad/jobs on the Nomad server yet." 6 | 7 | grep -q "nomad\s\+job\s\+run.*mongodb.nomad" /root/.bash_history || fail-message "You have not run the mongodb.nomad job on the Nomad server yet." 8 | 9 | grep -q "nomad\s\+job\s\+status\s\+mongodb" /root/.bash_history || fail-message "You have not checked the status of the mongodb job with the CLI on the Nomad server yet." 10 | 11 | grep -q "nomad\s\+job\s\+run.*chat-app-light-binary.nomad" /root/.bash_history || fail-message "You have not run the chat-app-light-binary.nomad job on the Nomad server yet." 12 | 13 | grep -q "nomad\s\+job\s\+status\s\+chat-app" /root/.bash_history || fail-message "You have not checked the status of the chat-app job on the Nomad server yet." 14 | 15 | grep -q "nomad\s\+job\s\+run.*nginx.nomad" /root/.bash_history || fail-message "You have not run the nginx.nomad job on the Nomad server yet." 16 | 17 | exit 0 18 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-update-strategies/deploy-the-jobs/solve-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=~/.bash_history 5 | set -o history 6 | 7 | # Change directory 8 | cd /root/nomad/jobs 9 | 10 | # Run mongodb Job 11 | nomad job run -detach mongodb.nomad 12 | 13 | # Sleep 14 | sleep 30 15 | 16 | # Check mongodb job Status 17 | nomad job status mongodb 18 | 19 | # Run chat-app Job 20 | nomad job run -detach chat-app-light-binary.nomad 21 | 22 | # Sleep 23 | sleep 120 24 | 25 | # Check chat-app Status 26 | nomad job status chat-app 27 | 28 | # Run nginx Job 29 | nomad job run -detach nginx.nomad 30 | 31 | # Sleep 32 | sleep 30 33 | 34 | exit 0 35 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-update-strategies/rolling-update/check-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "cd\s\+/root/nomad/jobs" /root/.bash_history || fail-message "You have not navigated to /root/nomad/jobs on the Nomad server yet." 6 | 7 | grep -q "nomad\s\+job\s\+plan\s\+chat-app-dark-binary.nomad" /root/.bash_history || fail-message "You have not planned a new deployment of the chat-app job on the Nomad server yet." 8 | 9 | grep -q "nomad\s\+job\s\+run.*chat-app-dark-binary.nomad" /root/.bash_history || fail-message "You have not run the rolling update of the chat-app job on the Nomad server yet." 10 | 11 | grep -q "nomad\s\+job\s\+status\s\+chat-app" /root/.bash_history || fail-message "You have not checked the status of the rolling update of the chat-app job on the Nomad server yet." 12 | 13 | exit 0 14 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-update-strategies/rolling-update/solve-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=~/.bash_history 5 | set -o history 6 | 7 | # Change directory 8 | cd /root/nomad/jobs 9 | 10 | # Plan chat-app.nomad deployment 11 | nomad job plan chat-app-dark-binary.nomad 12 | 13 | # Run chat-app.nomad Job 14 | nomad job run -detach chat-app-dark-binary.nomad 15 | 16 | # Sleep 17 | sleep 120 18 | 19 | # Check status of chat-app Job 20 | nomad job status chat-app 21 | 22 | exit 0 23 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-update-strategies/verify-nomad-cluster-health/check-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | set -e 4 | 5 | grep -q "consul\s\+members" /root/.bash_history || fail-message "You have not run 'consul members' on the Server yet." 6 | 7 | grep -q "nomad\s\+server\s\+members" /root/.bash_history || fail-message "You have not run 'nomad server members' on the Server yet." 8 | 9 | grep -q "nomad\s\+node\s\+status" /root/.bash_history || fail-message "You have not run 'nomad node status' on the Server yet." 10 | 11 | consul_clients=$(consul members | grep alive | wc -l) 12 | if [ $consul_clients -ne 4 ]; then 13 | fail-message "There are not 4 running Consul clients." 14 | fi 15 | 16 | nomad_servers=$(nomad server members | grep alive | wc -l) 17 | if [ $nomad_servers -ne 1 ]; then 18 | fail-message "The Nomad server is not running." 19 | fi 20 | 21 | nomad_clients=$(nomad node status | grep ready | wc -l) 22 | if [ $nomad_clients -ne 3 ]; then 23 | fail-message "There are not 3 running Nomad clients." 24 | fi 25 | 26 | exit 0 27 | -------------------------------------------------------------------------------- /instruqt-tracks/nomad-update-strategies/verify-nomad-cluster-health/solve-nomad-server-1: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | #Enable bash history 4 | HISTFILE=/root/.bash_history 5 | set -o history 6 | 7 | # Run consul members 8 | consul members 9 | 10 | # Run nomad server members 11 | nomad server members 12 | 13 | # Run nomad node status 14 | nomad node status 15 | 16 | exit 0 17 | --------------------------------------------------------------------------------