├── .devcontainer ├── Dockerfile └── devcontainer.json ├── .fixtures.yml ├── .gitattributes ├── .github └── workflows │ └── build.yaml ├── .gitignore ├── .pdkignore ├── .puppet-lint.rc ├── .rspec ├── .rubocop.yml ├── .sync.yml ├── .travis-switch-to-github-actions.yml ├── .vscode └── extensions.json ├── .yardopts ├── CHANGELOG.md ├── DEVELOPMENT.md ├── Gemfile ├── LICENSE ├── Makefile ├── Puppetfile ├── README.md ├── REFERENCE.md ├── REFERENCE_CONFIGURATION.md ├── Rakefile ├── TODO.md ├── Vagrantfile ├── WINDOWS_UPDATE_PROBLEMS_AND_RESOLUTIONS.md ├── bolt-project.yaml ├── bolt └── inventory.yaml ├── files ├── bash │ ├── available_updates_deb.sh │ ├── available_updates_rh.sh │ ├── available_updates_sles.sh │ ├── os_test.sh │ ├── reboot_required_deb.sh │ ├── reboot_required_rh.sh │ ├── reboot_required_sles.sh │ ├── update_deb.sh │ ├── update_rh.sh │ └── update_sles.sh └── powershell │ └── TaskUtils.ps1 ├── functions ├── build_workflow.pp ├── filter_results.pp ├── process_errors.pp └── target_names.pp ├── images └── patching_architecture_bolt.png ├── lib ├── puppet │ └── functions │ │ └── patching │ │ └── snapshot_vmware.rb └── puppet_x │ └── encore │ └── patching │ ├── http_helper.rb │ └── orion_client.rb ├── manifests ├── init.pp ├── params.pp └── script.pp ├── metadata.json ├── pdk.yaml ├── plans ├── available_updates.pp ├── check_online.pp ├── check_puppet.pp ├── deploy_scripts.pp ├── get_facts.pp ├── get_targets.pp ├── init.pp ├── monitoring_multiple.pp ├── monitoring_prometheus.pp ├── monitoring_solarwinds.pp ├── ordered_groups.pp ├── post_update.pp ├── pre_post_update.pp ├── pre_update.pp ├── puppet_facts.pp ├── reboot_required.pp ├── set_facts.pp ├── snapshot_kvm.pp ├── snapshot_vmware.pp └── update_history.pp ├── spec ├── default_facts.yml └── spec_helper.rb └── tasks ├── available_updates.json ├── available_updates_linux.json ├── available_updates_linux.sh ├── available_updates_windows.json ├── available_updates_windows.ps1 ├── cache_remove.json ├── cache_remove_linux.json ├── cache_remove_linux.sh ├── cache_remove_windows.json ├── cache_remove_windows.ps1 ├── cache_update.json ├── cache_update_linux.json ├── cache_update_linux.sh ├── cache_update_windows.json ├── cache_update_windows.ps1 ├── history.json ├── monitoring_prometheus.json ├── monitoring_prometheus.rb ├── monitoring_solarwinds.json ├── monitoring_solarwinds.rb ├── post_update.json ├── pre_post_update_linux.json ├── pre_post_update_linux.sh ├── pre_post_update_windows.json ├── pre_post_update_windows.ps1 ├── pre_update.json ├── puppet_facts.json ├── puppet_facts.rb ├── reboot_required.json ├── reboot_required_linux.json ├── reboot_required_linux.sh ├── reboot_required_windows.json ├── reboot_required_windows.ps1 ├── snapshot_kvm.json ├── snapshot_kvm.sh ├── update.json ├── update_history.json ├── update_history_linux.json ├── update_history_linux.sh ├── update_history_windows.json ├── update_history_windows.ps1 ├── update_linux.json ├── update_linux.sh ├── update_windows.json └── update_windows.ps1 /.devcontainer/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM puppet/pdk:latest 2 | 3 | # [Optional] Uncomment this section to install additional packages. 4 | # RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \ 5 | # && apt-get -y install --no-install-recommends 6 | 7 | -------------------------------------------------------------------------------- /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | // For format details, see https://aka.ms/devcontainer.json. For config options, see the README at: 2 | // https://github.com/microsoft/vscode-dev-containers/tree/v0.140.1/containers/puppet 3 | { 4 | "name": "Puppet Development Kit (Community)", 5 | "dockerFile": "Dockerfile", 6 | 7 | // Set *default* container specific settings.json values on container create. 8 | "settings": { 9 | "terminal.integrated.shell.linux": "/bin/bash" 10 | }, 11 | 12 | // Add the IDs of extensions you want installed when the container is created. 13 | "extensions": [ 14 | "puppet.puppet-vscode", 15 | "rebornix.Ruby" 16 | ] 17 | 18 | // Use 'forwardPorts' to make a list of ports inside the container available locally. 19 | // "forwardPorts": [], 20 | 21 | // Use 'postCreateCommand' to run commands after the container is created. 22 | // "postCreateCommand": "pdk --version", 23 | } 24 | -------------------------------------------------------------------------------- /.fixtures.yml: -------------------------------------------------------------------------------- 1 | # This file can be used to install module dependencies for unit testing 2 | # See https://github.com/puppetlabs/puppetlabs_spec_helper#using-fixtures for details 3 | --- 4 | fixtures: 5 | forge_modules: 6 | # stdlib: "puppetlabs/stdlib" 7 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | *.rb eol=lf 2 | *.erb eol=lf 3 | *.pp eol=lf 4 | *.sh eol=lf 5 | *.epp eol=lf 6 | -------------------------------------------------------------------------------- /.github/workflows/build.yaml: -------------------------------------------------------------------------------- 1 | name: build 2 | 3 | on: 4 | push: 5 | branches: ['*'] 6 | tags: 7 | - v* 8 | pull_request: 9 | type: [opened, reopened, edited] 10 | schedule: 11 | # run every night at midnight 12 | - cron: '0 0 * * *' 13 | 14 | jobs: 15 | unit: 16 | name: '${{matrix.name}} - puppet (${{matrix.puppet}})' 17 | runs-on: ubuntu-latest 18 | strategy: 19 | fail-fast: false 20 | matrix: 21 | include: 22 | # note: actions/setup-ruby only allows using a major.minor release of ruby 23 | - ruby: '2.7' 24 | puppet: "7.0" 25 | check: "check:symlinks check:git_ignore check:dot_underscore check:test_file rubocop syntax lint metadata_lint" 26 | name: 'static' 27 | - ruby: '2.5' 28 | puppet: "6.0" 29 | check: "parallel_spec" 30 | name: 'spec' 31 | - ruby: '2.7' 32 | puppet: "7.0" 33 | check: "parallel_spec" 34 | name: 'spec' 35 | env: 36 | CHECK: '${{ matrix.check }}' 37 | PUPPET_GEM_VERSION: '~> ${{ matrix.puppet }}' 38 | # lock to 2.1.0 because 2.2.0 is causing issues during builds 39 | BUNDLER_GEM_VERSION: '~> 2.1.0' 40 | steps: 41 | - name: Checkout repository 42 | uses: actions/checkout@v2 43 | - name: Setup Ruby 44 | uses: actions/setup-ruby@v1 45 | with: 46 | ruby-version: '${{ matrix.ruby }}' 47 | - name: Bundle prep 48 | run: | 49 | gem install bundler -v "$BUNDLER_GEM_VERSION" 50 | bundle -v 51 | rm -f Gemfile.lock 52 | # Update system gems if requested. This is useful to temporarily workaround troubles in the test runner" 53 | # See https://github.com/puppetlabs/pdk-templates/commit/705154d5c437796b821691b707156e1b056d244f for an example of how this was used" 54 | # Ignore exit code of SIGPIPE'd yes to not fail with shell's pipefail set" 55 | [ -z "$RUBYGEMS_VERSION" ] || (yes || true) | gem update --system $RUBYGEMS_VERSION 56 | gem --version 57 | bundle -v 58 | bundle config path vendor/bundle 59 | bundle config without 'system_tests' 60 | bundle lock 61 | # restore cache AFTER doing 'bundle lock' so that Gemfile.lock exists 62 | - uses: actions/cache@v2 63 | with: 64 | path: vendor/bundle 65 | key: ${{ runner.os }}-${{ matrix.puppet }}-${{ matrix.ruby }}-gems-test-${{ hashFiles('**/Gemfile.lock') }} 66 | restore-keys: | 67 | ${{ runner.os }}-${{ matrix.puppet }}-${{ matrix.ruby }}-gems-test- 68 | - name: Bundle install 69 | run: | 70 | bundle install --jobs $(nproc) --retry 3 71 | - name: Test 72 | run: 'bundle exec rake $CHECK' 73 | 74 | deploy: 75 | name: 'deploy to forge' 76 | needs: unit 77 | runs-on: ubuntu-latest 78 | # only run deploy on tags that start with 'v' 79 | if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v') 80 | # define a "strategy" here so we can use ruby/puppet as variables below 81 | strategy: 82 | fail-fast: false 83 | matrix: 84 | include: 85 | # note: actions/setup-ruby only allows using a major.minor release of ruby 86 | - ruby: '2.7' 87 | puppet: "7.0" 88 | env: 89 | PUPPET_GEM_VERSION: '~> ${{ matrix.puppet }}' 90 | # lock to 2.1.0 because 2.2.0 is causing issues during builds 91 | BUNDLER_GEM_VERSION: '~> 2.1.0' 92 | steps: 93 | - name: Checkout repository 94 | uses: actions/checkout@v2 95 | - name: Setup Ruby 96 | uses: actions/setup-ruby@v1 97 | with: 98 | ruby-version: '${{ matrix.ruby }}' 99 | - name: Bundle prep 100 | run: | 101 | gem install bundler -v "$BUNDLER_GEM_VERSION" 102 | bundle -v 103 | rm -f Gemfile.lock 104 | # Update system gems if requested. This is useful to temporarily workaround troubles in the test runner" 105 | # See https://github.com/puppetlabs/pdk-templates/commit/705154d5c437796b821691b707156e1b056d244f for an example of how this was used" 106 | # Ignore exit code of SIGPIPE'd yes to not fail with shell's pipefail set" 107 | [ -z "$RUBYGEMS_VERSION" ] || (yes || true) | gem update --system $RUBYGEMS_VERSION 108 | gem --version 109 | bundle -v 110 | bundle config path vendor/bundle 111 | bundle config without 'system_tests' 112 | bundle lock 113 | # restore cache AFTER doing 'bundle lock' so that Gemfile.lock exists 114 | - uses: actions/cache@v2 115 | with: 116 | path: vendor/bundle 117 | key: ${{ runner.os }}-${{ matrix.puppet }}-${{ matrix.ruby }}-gems-test-${{ hashFiles('**/Gemfile.lock') }} 118 | restore-keys: | 119 | ${{ runner.os }}-${{ matrix.puppet }}-${{ matrix.ruby }}-gems-test- 120 | - name: Bundle install 121 | run: | 122 | bundle install --jobs $(nproc) --retry 3 123 | - name: Build and Deploy 124 | env: 125 | # TODO configure secrets here: 126 | # https://docs.github.com/en/free-pro-team@latest/actions/reference/encrypted-secrets 127 | BLACKSMITH_FORGE_USERNAME: '${{ secrets.PUPPET_FORGE_USERNAME }}' 128 | BLACKSMITH_FORGE_PASSWORD: '${{ secrets.PUPPET_FORGE_PASSWORD }}' 129 | run: | 130 | bundle exec rake module:build 131 | bundle exec rake module:push 132 | 133 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .git/ 2 | .*.sw[op] 3 | .metadata 4 | .yardoc 5 | .yardwarns 6 | *.iml 7 | /.bundle/ 8 | /.idea/ 9 | /.vagrant/ 10 | /coverage/ 11 | /bin/ 12 | /doc/ 13 | /Gemfile.local 14 | /Gemfile.lock 15 | /junit/ 16 | /log/ 17 | /pkg/ 18 | /spec/fixtures/manifests/ 19 | /spec/fixtures/modules/* 20 | /tmp/ 21 | /vendor/ 22 | /convert_report.txt 23 | /update_report.txt 24 | .DS_Store 25 | .project 26 | .envrc 27 | /inventory.yaml 28 | /spec/fixtures/litmus_inventory.yaml 29 | .resource_types 30 | .modules 31 | .task_cache.json 32 | .plan_cache.json 33 | .rerun.json 34 | bolt-debug.log 35 | .librarian 36 | .kitchen 37 | .tmp 38 | .bundle 39 | bolt.log 40 | Puppetfile.lock 41 | modules 42 | -------------------------------------------------------------------------------- /.pdkignore: -------------------------------------------------------------------------------- 1 | .git/ 2 | .*.sw[op] 3 | .metadata 4 | .yardoc 5 | .yardwarns 6 | *.iml 7 | /.bundle/ 8 | /.idea/ 9 | /.vagrant/ 10 | /coverage/ 11 | /bin/ 12 | /doc/ 13 | /Gemfile.local 14 | /Gemfile.lock 15 | /junit/ 16 | /log/ 17 | /pkg/ 18 | /spec/fixtures/manifests/ 19 | /spec/fixtures/modules/* 20 | /tmp/ 21 | /vendor/ 22 | /convert_report.txt 23 | /update_report.txt 24 | .DS_Store 25 | .project 26 | .envrc 27 | /inventory.yaml 28 | /spec/fixtures/litmus_inventory.yaml 29 | .resource_types 30 | .modules 31 | .task_cache.json 32 | .plan_cache.json 33 | .rerun.json 34 | bolt-debug.log 35 | /.fixtures.yml 36 | /Gemfile 37 | /.gitattributes 38 | /.github/ 39 | /.gitignore 40 | /.pdkignore 41 | /.puppet-lint.rc 42 | /Rakefile 43 | /rakelib/ 44 | /.rspec 45 | /..yml 46 | /.yardopts 47 | /spec/ 48 | /.vscode/ 49 | /.sync.yml 50 | /.devcontainer/ 51 | -------------------------------------------------------------------------------- /.puppet-lint.rc: -------------------------------------------------------------------------------- 1 | --relative 2 | -------------------------------------------------------------------------------- /.rspec: -------------------------------------------------------------------------------- 1 | --color 2 | --format documentation 3 | -------------------------------------------------------------------------------- /.sync.yml: -------------------------------------------------------------------------------- 1 | --- 2 | .gitignore: 3 | paths: 4 | - .rerun.json 5 | - .librarian 6 | - .kitchen 7 | - .tmp 8 | - .bundle 9 | - bolt.log 10 | - Puppetfile.lock 11 | - modules 12 | .gitlab-ci.yml: 13 | # we don't use GitLab 14 | unmanaged: true 15 | appveyor.yml: 16 | # we don't use Appveyor 17 | unmanaged: true 18 | Gemfile: 19 | required: 20 | ':development': 21 | - gem: 'puppet-blacksmith' 22 | version: '>= 5.0.0' 23 | - gem: 'r10k' 24 | version: '>= 3.0.0' 25 | # cri is needed by r10k, but due to a bug in the cri gem v2.15.7 it breaks r10k 26 | # see: https://github.com/puppetlabs/r10k/issues/930 27 | - gem: 'cri' 28 | version: '2.15.12' 29 | - gem: 'yaml-lint' 30 | version: '>= 0.0.10' 31 | # Rakefile: 32 | # extras: 33 | # - "# exclude plans because puppet-syntax doesn't support them yet: https://github.com/voxpupuli/puppet-syntax/issues/95" 34 | # - 'PuppetSyntax.exclude_paths = ["plans/**/*", "vendor/**/*"]' 35 | spec/spec_helper.rb: 36 | mock_with: ':rspec' 37 | .travis.yml: 38 | # we've converted over to GitHub actions 39 | unmanaged: true 40 | # .travis.yml: 41 | # deploy_to_forge: 42 | # enabled: true 43 | # user: encore 44 | # secure: "rSxtW5opNBOGqj0iWgLV2t9tWJvFgBRUVeH25Wi9XqVkl3O+Yki0xsBAkrtG4UXrtbkYm5plF8GnwbsahH6XA/91ydl6uQkI/Xhn7EbC2FXTeWdNadPc/kQWDkRxgJ1ChWscS6WRoWApTDWadjQCNMzl4CgzYHyz8H9YBHFa/isPxBdGmoZYJ3/qPPxdjUSexeLxZf8gxVTJHJwe5VJik0CQ1eOu31//WDXUo6Erm0OuivWC5C3OnnrezH3vW27mqp4MCr1Uv6kw7NETM6seEHQdDKl1itOcXpayOMlw/nxTVBas03M7CfCz3CC2d9qzWjb5J2KxSjwO3RJo/oX8MaTrJDQ+ydcPC9T6uO7dX/Wm9hzHTBah4MzzMHtHfNBCS8Rmpson8BCfZVolRaWRjBRwSB54H2jOgBvclqgR2skVWhuvpFKhxAhH2et5HdYf81L8TgW24tjfbBsf4hYSrqG/sM0yzt/cY4u0N4SVN+6P86DRuvbdTKH+KpAPRRtTgHw0i+3E9wce4XFs6B5JrSSjjHR3sWXdZMb8bVkugHJXyQgTQKf94aNyRfu5tt6174SaBBSOLLR+CDtb8MP1egYfCRXWqA2OQhNcB/2/XhrE01e5CGCj8jepJ8APHfwTLZWVPlHETQr1iFqYZsGjPMaoB1SR4hUN5BaZ2n0g77A=" 45 | 46 | 47 | -------------------------------------------------------------------------------- /.travis-switch-to-github-actions.yml: -------------------------------------------------------------------------------- 1 | --- 2 | os: linux 3 | dist: xenial 4 | language: ruby 5 | cache: bundler 6 | before_install: 7 | - bundle -v 8 | - rm -f Gemfile.lock 9 | - "# Update system gems if requested. This is useful to temporarily workaround troubles in the test runner" 10 | - "# See https://github.com/puppetlabs/pdk-templates/commit/705154d5c437796b821691b707156e1b056d244f for an example of how this was used" 11 | - "# Ignore exit code of SIGPIPE'd yes to not fail with shell's pipefail set" 12 | - '[ -z "$RUBYGEMS_VERSION" ] || (yes || true) | gem update --system $RUBYGEMS_VERSION' 13 | - gem --version 14 | - bundle -v 15 | script: 16 | - 'bundle exec rake $CHECK' 17 | bundler_args: --without system_tests 18 | rvm: 19 | - 2.5.7 20 | stages: 21 | - static 22 | - spec 23 | - acceptance 24 | - 25 | if: tag =~ ^v\d 26 | name: deploy 27 | jobs: 28 | fast_finish: true 29 | include: 30 | - 31 | env: CHECK="check:symlinks check:git_ignore check:dot_underscore check:test_file rubocop syntax lint metadata_lint" 32 | stage: static 33 | - 34 | env: PUPPET_GEM_VERSION="~> 5.0" CHECK=parallel_spec 35 | rvm: 2.4.5 36 | stage: spec 37 | - 38 | env: PUPPET_GEM_VERSION="~> 6.0" CHECK=parallel_spec 39 | rvm: 2.5.7 40 | stage: spec 41 | - 42 | env: DEPLOY_TO_FORGE=yes 43 | stage: deploy 44 | branches: 45 | only: 46 | - master 47 | - /^v\d/ 48 | notifications: 49 | email: false 50 | deploy: 51 | provider: puppetforge 52 | username: encore 53 | password: 54 | secure: "dAqckSD7HGROLA3AMl7CT5Mrw1NLqYBMRUMlR44dNuuFcUfigdW0VnIQrwotip304zJ68bWNpN2xi9QBfB1MDa8tymEQ35apaxvltJlTdKazlMnukl8TVp5WvtTBrwziwMfb99JZFLNMuReqZh+JHkfQjyjtLAglEf+QtNgmOzcXihUMl1kbV5XnrGQ7UDbc65ReT3khTIfZu6iCXqY/3JHOHLBd+EYc01EfOA/JQb1V/gIAf0zNSum4LZ4HoWrPXjts+w8pigJcCM0jxwpBFAex3uKSOTGC2lUG/xTwZXKT8lXWqZlbpAoahDDuH3CsvAA9FnnWQoXGIwryeZ2W0IVwYJvOvYtoepyd3//Tsvtma6wJuzqxlggpVCcb7Pm9IKXiUCXMnbQ4HP6YR/Mk5yt/+1Xl2RwJCve4fII9mo8OG657JyOio8BO+mznGJiZ5fCpQwtt3J8nlhnP5fDumNiggfhh2pzzZmSRp/LlsmVyJvWwweNskbX69F1g6r2NvEmiFedLjAg2jeOcRI7YiOsNsXwn4F/87RG6rOKw469MIcH74Td6oYYgSjaOMnLz07ZqL8YNxTwXhH7kc1MB92J+0GYX0iM38WpJFr30o/fu+wJzitKBSiFB2fQ1av1akBGFrBMX1GQIAj6USMQSpcIvVcCzCNcSS9cNUqTw7gI=" 55 | on: 56 | tags: true 57 | all_branches: true 58 | condition: "$DEPLOY_TO_FORGE = yes" 59 | -------------------------------------------------------------------------------- /.vscode/extensions.json: -------------------------------------------------------------------------------- 1 | { 2 | "recommendations": [ 3 | "puppet.puppet-vscode", 4 | "Shopify.ruby-lsp" 5 | ] 6 | } 7 | -------------------------------------------------------------------------------- /.yardopts: -------------------------------------------------------------------------------- 1 | --markup markdown 2 | -------------------------------------------------------------------------------- /DEVELOPMENT.md: -------------------------------------------------------------------------------- 1 | # Development Tips & Tricks 2 | 3 | ## Quick start 4 | 5 | ```shell 6 | mkdir site-modules && ln -s ../ site-modules/patching 7 | vagrant up centos 8 | bolt task run facts --targets vagrant_centos 9 | ``` 10 | 11 | ## Ideology 12 | 13 | We've setup this repo to be used with the following: 14 | * bolt 15 | * vagrant 16 | 17 | The repo itself is a `boltdir`, so you should be able to run command such as `bolt task show` 18 | and see a list of tasks. This works by having a `bolt.yaml` in the root directory pointing 19 | at `site-modules` which has a symlink back to this repo's directory. 20 | 21 | To test on some servers, you can use Vagrant to spin up new boxes: 22 | 23 | ```shell 24 | # spin up a CentOS 7 box 25 | BOX=centos/7 vagrant up centos 26 | 27 | # spin up a CentOS 8 box 28 | BOX=generic/centos8 vagrant up centos 29 | 30 | # spin up a Ubuntu 16.04 box 31 | BOX=generic/ubuntu1604 vagrant up ubuntu 32 | 33 | # spin up a Ubuntu 18.04 box 34 | BOX=generic/ubuntu1804 vagrant up ubuntu 35 | ``` 36 | 37 | The `bolt/inventory.yaml` file then contains host entries for both the `centos` and `ubuntu` 38 | vagrant boxes, along with paths to the proper Vagrant SSH keys. 39 | 40 | You can test out bolt tasks/plans on them by doing something like: 41 | ```shell 42 | bolt task run facts --targets vagrant_centos 43 | bolt task run facts --targets vagrant_ubuntu 44 | ``` 45 | -------------------------------------------------------------------------------- /Gemfile: -------------------------------------------------------------------------------- 1 | source ENV['GEM_SOURCE'] || 'https://rubygems.org' 2 | 3 | def location_for(place_or_version, fake_version = nil) 4 | git_url_regex = %r{\A(?(https?|git)[:@][^#]*)(#(?.*))?} 5 | file_url_regex = %r{\Afile:\/\/(?.*)} 6 | 7 | if place_or_version && (git_url = place_or_version.match(git_url_regex)) 8 | [fake_version, { git: git_url[:url], branch: git_url[:branch], require: false }].compact 9 | elsif place_or_version && (file_url = place_or_version.match(file_url_regex)) 10 | ['>= 0', { path: File.expand_path(file_url[:path]), require: false }] 11 | else 12 | [place_or_version, { require: false }] 13 | end 14 | end 15 | 16 | group :development do 17 | gem "json", '= 2.1.0', require: false if Gem::Requirement.create(['>= 2.5.0', '< 2.7.0']).satisfied_by?(Gem::Version.new(RUBY_VERSION.dup)) 18 | gem "json", '= 2.3.0', require: false if Gem::Requirement.create(['>= 2.7.0', '< 3.0.0']).satisfied_by?(Gem::Version.new(RUBY_VERSION.dup)) 19 | gem "json", '= 2.5.1', require: false if Gem::Requirement.create(['>= 3.0.0', '< 3.0.5']).satisfied_by?(Gem::Version.new(RUBY_VERSION.dup)) 20 | gem "json", '= 2.6.1', require: false if Gem::Requirement.create(['>= 3.1.0', '< 3.1.3']).satisfied_by?(Gem::Version.new(RUBY_VERSION.dup)) 21 | gem "json", '= 2.6.3', require: false if Gem::Requirement.create(['>= 3.2.0', '< 4.0.0']).satisfied_by?(Gem::Version.new(RUBY_VERSION.dup)) 22 | gem "racc", '~> 1.4.0', require: false if Gem::Requirement.create(['>= 2.7.0', '< 3.0.0']).satisfied_by?(Gem::Version.new(RUBY_VERSION.dup)) 23 | gem "deep_merge", '~> 1.0', require: false 24 | gem "voxpupuli-puppet-lint-plugins", '~> 5.0', require: false 25 | gem "facterdb", '~> 1.26', require: false 26 | gem "metadata-json-lint", '~> 4.0', require: false 27 | gem "rspec-puppet-facts", '~> 3.0', require: false 28 | gem "dependency_checker", '~> 1.0.0', require: false 29 | gem "parallel_tests", '= 3.12.1', require: false 30 | gem "pry", '~> 0.10', require: false 31 | gem "simplecov-console", '~> 0.9', require: false 32 | gem "puppet-debugger", '~> 1.0', require: false 33 | gem "rubocop", '~> 1.50.0', require: false 34 | gem "rubocop-performance", '= 1.16.0', require: false 35 | gem "rubocop-rspec", '= 2.19.0', require: false 36 | gem "rb-readline", '= 0.5.5', require: false, platforms: [:mswin, :mingw, :x64_mingw] 37 | gem "rexml", '>= 3.0.0', '< 3.2.7', require: false 38 | gem "puppet-blacksmith", '>= 5.0.0', require: false 39 | gem "r10k", '>= 3.0.0', require: false 40 | gem "cri", '2.15.12', require: false 41 | gem "yaml-lint", '>= 0.0.10', require: false 42 | end 43 | group :development, :release_prep do 44 | gem "puppet-strings", '~> 4.0', require: false 45 | gem "puppetlabs_spec_helper", '~> 7.3', require: false 46 | end 47 | group :system_tests do 48 | gem "puppet_litmus", '~> 1.0', require: false, platforms: [:ruby, :x64_mingw] 49 | gem "CFPropertyList", '< 3.0.7', require: false, platforms: [:mswin, :mingw, :x64_mingw] 50 | gem "serverspec", '~> 2.41', require: false 51 | end 52 | 53 | puppet_version = ENV['PUPPET_GEM_VERSION'] 54 | facter_version = ENV['FACTER_GEM_VERSION'] 55 | hiera_version = ENV['HIERA_GEM_VERSION'] 56 | 57 | gems = {} 58 | 59 | gems['puppet'] = location_for(puppet_version) 60 | 61 | # If facter or hiera versions have been specified via the environment 62 | # variables 63 | 64 | gems['facter'] = location_for(facter_version) if facter_version 65 | gems['hiera'] = location_for(hiera_version) if hiera_version 66 | 67 | gems.each do |gem_name, gem_params| 68 | gem gem_name, *gem_params 69 | end 70 | 71 | # Evaluate Gemfile.local and ~/.gemfile if they exist 72 | extra_gemfiles = [ 73 | "#{__FILE__}.local", 74 | File.join(Dir.home, '.gemfile'), 75 | ] 76 | 77 | extra_gemfiles.each do |gemfile| 78 | if File.file?(gemfile) && File.readable?(gemfile) 79 | eval(File.read(gemfile), binding) 80 | end 81 | end 82 | # vim: syntax=ruby 83 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Description: 3 | # Executes validations and tests for this puppet module 4 | # 5 | ROOT_DIR := $(shell dirname $(realpath $(lastword $(MAKEFILE_LIST)))) 6 | PUPPET_VERSION := "~> 6.0" 7 | KITCHEN_GEMFILE ?= $(ROOT_DIR)/build/kitchen/Gemfile 8 | TEST_NAME ?= puppet6 9 | 10 | RBENV_PATH := $(HOME)/.rbenv 11 | export RBENV_VERSION := 2.5.1 12 | 13 | # Run all targets 14 | .PHONY: all 15 | # Not running unit tests since we don't have any 16 | all: setup test 17 | 18 | # list all makefile targets 19 | .PHONY: list 20 | list: 21 | @$(MAKE) -pRrq -f $(lastword $(MAKEFILE_LIST)) : 2>/dev/null | awk -v RS= -F: '/^# File/,/^# Finished Make data base/ {if ($$1 !~ "^[#.]") {print $$1}}' | sort | egrep -v -e '^[^[:alnum:]]' -e '^$@$$' | xargs 22 | 23 | # setup CI environment 24 | .PHONY: setup 25 | setup: .setup .rbenv 26 | 27 | # setup CI environment 28 | .PHONY: test 29 | test: .setup .test-setup .test 30 | 31 | # cleanup CI environment 32 | .PHONY: clean 33 | clean: .clean 34 | 35 | # runs kitchen unit tests 36 | .PHONY: kitchen 37 | kitchen: setup .kitchen 38 | 39 | ################################################################################ 40 | 41 | .PHONY: .rbenv 42 | .rbenv: 43 | @echo 44 | @echo "==================== rbenv ====================" 45 | @echo 46 | if [ ! -d "$(RBENV_PATH)" ]; then \ 47 | git clone https://github.com/rbenv/rbenv.git $(RBENV_PATH); \ 48 | cd $(RBENV_PATH) && src/configure && make -C src; \ 49 | echo 'export PATH="$$HOME/.rbenv/bin:$$PATH"' >> ~/.bashrc; \ 50 | echo 'eval "$$(rbenv init -)"' >> ~/.bashrc; \ 51 | git clone https://github.com/rbenv/ruby-build.git $(RBENV_PATH)/plugins/ruby-build; \ 52 | fi; 53 | 54 | .PHONY: .setup 55 | .setup: .rbenv 56 | @echo 57 | @echo "==================== setup ====================" 58 | @echo 59 | # TODO install bundler (yum -y install rubygem-bundler) 60 | # TODO install rake (yum -y install rubygem-rake) 61 | # TODO install ruby-devel (yum -y install ruby-devel) 62 | # TODO install docker (yum -y install docker) 63 | whoami 64 | echo $(HOME) 65 | echo $(SHELL) 66 | echo $(PATH) 67 | # https://github.com/rbenv/ruby-build/wiki#suggested-build-environment 68 | #yum install -y gcc bzip2 openssl-devel libyaml-devel libffi-devel readline-devel zlib-devel gdbm-devel ncurses-devel 69 | rbenv install --skip-existing $(RBENV_VERSION) 70 | ruby --version 71 | rbenv local $(RBENV_VERSION) 72 | ruby --version 73 | 74 | .PHONY: .test-setup 75 | .test-setup: 76 | @echo 77 | @echo "==================== test-setup ====================" 78 | @echo 79 | bundle -v 80 | gem install bundler 81 | bundle -v 82 | rm -f $(ROOT_DIR)/Gemfile.lock 83 | gem --version 84 | PUPPET_GEM_VERSION=$(PUPPET_VERSION) bundle install --without system_tests --path="$${BUNDLE_PATH:-$(ROOT_DIR)/vendor/bundle}" 85 | 86 | .PHONY: .test 87 | .test: 88 | @echo 89 | @echo "==================== test ====================" 90 | @echo 91 | PUPPET_GEM_VERSION=$(PUPPET_VERSION) bundle exec rake syntax lint metadata_lint check:symlinks check:git_ignore check:dot_underscore check:test_file rubocop parallel_spec 92 | 93 | .PHONY: .clean 94 | .clean: 95 | @echo 96 | @echo "==================== clean ====================" 97 | @echo 98 | rm -rf $(ROOT_DIR)/.bundle 99 | rm -rf $(ROOT_DIR)/vendor 100 | rm -f $(ROOT_DIR)/Gemfile.lock 101 | find "$(ROOT_DIR)" -type d -name '.kitchen' | xargs -r -t -n1 rm -rf 102 | find "$(ROOT_DIR)" -type d -name '.librarian' -or -type d -name '.tmp' | xargs -r -t -n1 rm -rf 103 | rm -rf $(ROOT_DIR)/build/kitchen/.bundle 104 | rm -rf $(ROOT_DIR)/build/kitchen/vendor 105 | rm -rf $(ROOT_DIR)/spec/fixtures 106 | 107 | .PHONY: .kitchen 108 | .kitchen: 109 | @echo 110 | @echo "==================== kitchen ====================" 111 | @echo 112 | BUNDLE_GEMFILE=$(KITCHEN_GEMFILE) bundle install 113 | BUNDLE_GEMFILE=$(KITCHEN_GEMFILE) bundle exec kitchen test --debug $(TEST_NAME) 114 | -------------------------------------------------------------------------------- /Puppetfile: -------------------------------------------------------------------------------- 1 | forge "http://forge.puppetlabs.com" 2 | 3 | mod "puppetlabs/stdlib" 4 | -------------------------------------------------------------------------------- /Rakefile: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require 'bundler' 4 | require 'puppet_litmus/rake_tasks' if Gem.loaded_specs.key? 'puppet_litmus' 5 | require 'puppetlabs_spec_helper/rake_tasks' 6 | require 'puppet-syntax/tasks/puppet-syntax' 7 | require 'puppet-strings/tasks' if Gem.loaded_specs.key? 'puppet-strings' 8 | 9 | PuppetLint.configuration.send('disable_relative') 10 | -------------------------------------------------------------------------------- /TODO.md: -------------------------------------------------------------------------------- 1 | # Enhancements 2 | - put chocolatey test into a function in test utils 3 | - refactor update.ps1 4 | - update.ps1 support updating specific packages 5 | - Migrate VMware credentials to a `remote` transport 6 | 7 | # New features 8 | - Promote content 9 | - save promoted patches so we can migrate between WSUS environments 10 | - Monitoring enable/disable 11 | - Notifications 12 | - slack 13 | - email 14 | - ServiceNow change integrataion 15 | - Inventory plugins 16 | - Satellite/Foreman 17 | - WSUS 18 | - IPA 19 | - AD 20 | - VMware 21 | - ServiceNow 22 | - Reverse clustered workflow 23 | - Network patching 24 | - VMware patching 25 | - Should we add 'facts' to describe end-node's customizations that mirror vars in bolt plans? 26 | -------------------------------------------------------------------------------- /Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | # options: 5 | # 'virtualbox' 6 | # 'libvirt' 7 | provider = ENV['PROVIDER'] ? ENV['PROVIDER'] : 'libvirt' 8 | provider = provider.to_sym 9 | # virtualbox: 10 | # - centos/6 11 | # - centos/7 12 | # - generic/centos8 13 | # - 'ubuntu/trusty64' 14 | # - 'ubuntu/xenial64' 15 | # libvirt 16 | # - centos/6 17 | # - centos/7 18 | # - generic/centos8 19 | # - generic/ubuntu1404 20 | # - generic/ubuntu1604 21 | box_ubuntu = ENV['BOX'] ? ENV['BOX'] : 'generic/ubuntu1604' 22 | box_centos = ENV['BOX'] ? ENV['BOX'] : 'generic/centos8' 23 | 24 | # Vagrantfile API/syntax version. Don't touch unless you know what you're doing! 25 | VAGRANTFILE_API_VERSION = "2" 26 | 27 | Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| 28 | config.vm.define "centos" do |centos| 29 | hostname = "patching-centos.localdomain" 30 | # Box details 31 | centos.vm.box = box_centos 32 | centos.vm.hostname = hostname 33 | centos.vm.network :private_network, ip: "192.168.121.100" 34 | centos.vm.synced_folder '.', '/vagrant', disabled: true 35 | 36 | # Box Specifications 37 | if provider == :virtualbox 38 | centos.vm.provider :virtualbox do |vb| 39 | vb.name = hostname 40 | vb.memory = 2048 41 | vb.cpus = 2 42 | vb.customize ["modifyvm", :id, "--uartmode1", "disconnected"] 43 | end 44 | elsif provider == :libvirt 45 | centos.vm.provider :libvirt do |lv| 46 | lv.host = hostname 47 | lv.memory = 2048 48 | lv.cpus = 2 49 | lv.uri = "qemu:///system" 50 | lv.storage_pool_name = "images" 51 | end 52 | else 53 | raise RuntimeError.new("Unsupported provider: #{provider}") 54 | end 55 | end 56 | 57 | config.vm.define "ubuntu" do |ubuntu| 58 | hostname = "patching-ubuntu.localdomain" 59 | # Box details 60 | ubuntu.vm.box = box_ubuntu 61 | # older version so we have some updates 62 | ubuntu.vm.box_version = '1.9.18' 63 | ubuntu.vm.hostname = hostname 64 | ubuntu.vm.network :private_network, ip: "192.168.121.101" 65 | ubuntu.vm.synced_folder '.', '/vagrant', disabled: true 66 | 67 | # Box Specifications 68 | if provider == :virtualbox 69 | ubuntu.vm.provider :virtualbox do |vb| 70 | vb.name = hostname 71 | vb.memory = 2048 72 | vb.cpus = 2 73 | vb.customize ["modifyvm", :id, "--uartmode1", "disconnected"] 74 | end 75 | elsif provider == :libvirt 76 | ubuntu.vm.provider :libvirt do |lv| 77 | lv.host = hostname 78 | lv.memory = 2048 79 | lv.cpus = 2 80 | lv.uri = "qemu:///system" 81 | lv.storage_pool_name = "images" 82 | end 83 | else 84 | raise RuntimeError.new("Unsupported provider: #{provider}") 85 | end 86 | end 87 | end 88 | -------------------------------------------------------------------------------- /WINDOWS_UPDATE_PROBLEMS_AND_RESOLUTIONS.md: -------------------------------------------------------------------------------- 1 | # Windows update HRESULT problems and fixes 2 | 3 | - hresult: 0x8024402C 4 | possibilities: 5 | - problem: a proxy setting in the system that is broken 6 | solutions: 7 | - fix the proxy config by going to IE -> Internet Options -> Connections 8 | - either set proxy to "auto detect" or fix the proxy server 9 | - problem: DNS is broken on the server and can't ping / access the WSUS server by name 10 | troubleshooting: try to ping the WSUS server by hostname 11 | solutions: 12 | - fix DNS so you can ping the WSUS server 13 | - could be bad DNS on the NIC 14 | - could be bad firewall policy on the host 15 | - could be bad firewall policy on the network 16 | - hresult: 0x8024000E 17 | possibilities: 18 | - problem: windows update cache is corrupt 19 | solutions: 20 | - clean the windows update cache: bolt task run patching::cache_remove 21 | 22 | -------------------------------------------------------------------------------- /bolt-project.yaml: -------------------------------------------------------------------------------- 1 | # Usage: 2 | # bolt puppetfile install 3 | # bolt plan run patching::available_updates 4 | name: patching 5 | 6 | # paths are relative to this bolt.yaml file 7 | modulepath: "./modules:./site-modules" 8 | inventoryfile: "./bolt/inventory.yaml" 9 | log: 10 | './bolt.log': 11 | level: debug 12 | append: true 13 | console: 14 | level: notice 15 | -------------------------------------------------------------------------------- /bolt/inventory.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | version: 2 3 | config: 4 | ssh: 5 | host-key-check: false 6 | user: vagrant 7 | run-as: root 8 | 9 | groups: 10 | - name: patching 11 | targets: 12 | - name: vagrant_centos 13 | uri: 192.168.121.100 14 | config: 15 | ssh: 16 | private-key: .vagrant/machines/centos/libvirt/private_key 17 | - name: vagrant_ubuntu 18 | uri: 192.168.121.101 19 | config: 20 | ssh: 21 | private-key: .vagrant/machines/ubuntu/libvirt/private_key 22 | -------------------------------------------------------------------------------- /files/bash/available_updates_deb.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ## Check for available updates (must have them in our cache already) 4 | PKGS=$(apt list --upgradable 2>/dev/null | awk 'NR>1 {print $0}' | sort) 5 | cat </dev/null | egrep -v "is broken|^Security:|^Loaded plugins" | awk '/^[[:alnum:]]/ {print $0}' | sort) 12 | 13 | # If there are no updates then we need to log the transaction and return an empty list 14 | if [ -z "$PKGS" ]; then 15 | # get the last transaction 16 | LAST_LOG=$(tac "$RESULT_FILE" | sed '/{/q' | tac) 17 | LINE_COUNT=$(echo "$LAST_LOG" | wc -l) 18 | # If the last transaction included updates (line is greater than one), log a new transaction with no updates 19 | # This prevents the last transaction from being repeated in consecutive runs with no updates 20 | if [ "$LINE_COUNT" -gt 1 ]; then 21 | echo '{ "failed": [], "installed": [], "upgraded": [] }' >> $RESULT_FILE 22 | fi 23 | fi 24 | 25 | # return the results in JSON format. an empty list is returned if no updates 26 | cat < 15 | read -d \< ENTITY CONTENT 16 | local ret=$? 17 | TAG_NAME=${ENTITY%% *} 18 | ATTRIBUTES=${ENTITY#* } 19 | return $ret 20 | } 21 | 22 | # parse_attr 23 | # Some the the attributes have illegal variables in them from the DOM (valid in DOM 24 | # invalid in BASH). This replaces '-' with "_" in the attribute name and strips 25 | # all characters from the end if they are outside the last quote. 26 | parse_attr () { 27 | local ATTR_ARRAY=( $ATTRIBUTES ) 28 | ATTRIBUTES="" 29 | 30 | for ATTR in "${ATTR_ARRAY[@]}" 31 | do 32 | LINE=$(echo $ATTR | awk -F = -v OFS== '{gsub(/-/, "_", $1); print}') 33 | LINE=${LINE%\"*} 34 | LINE="${LINE}\"" 35 | ATTRIBUTES="${ATTRIBUTES} ${LINE}" 36 | done 37 | } 38 | 39 | # parse_dom 40 | # There are two interesting segments update and source. An example entry is: 41 | # 42 | # The GCC Preprocessor 43 | # This Package contains just the preprocessor that is used by the X11 packages. 44 | # 45 | # 46 | # 47 | # 48 | # We are parsing the update line into a variable, then when the source line comes along we are dumping it, and the source alias into the JSON output. 49 | parse_dom () { 50 | if [[ $TAG_NAME = "update" ]] ; then 51 | parse_attr 52 | 53 | local $ATTRIBUTES 54 | PKG_NAME=${name} 55 | PKG_VER=${edition} 56 | fi 57 | 58 | if [[ $TAG_NAME = "source" ]] ; then 59 | if [ -n $comma ]; then 60 | echo -n "$comma" 61 | fi 62 | 63 | parse_attr 64 | local $ATTRIBUTES 65 | echo " {" 66 | echo " \"name\": ${PKG_NAME}," 67 | echo " \"repo\": ${alias}," 68 | echo " \"version\": ${PKG_VER}" 69 | 70 | echo -n " }" 71 | comma=',' 72 | fi 73 | } 74 | 75 | ############# 76 | # Main code # 77 | ############# 78 | PKGS=$(zypper -x lu 2>/dev/null) 79 | echo "{" 80 | echo -n ' "updates": [' 81 | 82 | comma='' 83 | while read_dom; do 84 | parse_dom 85 | done <<< "$PKGS" 86 | 87 | echo '' 88 | cat < /dev/null | grep Distributor | awk '{print $3}') 8 | export OS_TEST_RH=$(sed -e "s~\(.*\)release.*~\1~g" /etc/redhat-release 2> /dev/null) 9 | 10 | if [[ -n "$OS_TEST_RH" ]]; then 11 | export OS_RELEASE='RHEL' 12 | elif [[ -n "$OS_TEST_DEB" ]]; then 13 | # treat as Ubuntu, but Debian would also work 14 | export OS_RELEASE='UBUNTU' 15 | else 16 | # default 17 | export OS_RELEASE='UNKNOWN' 18 | fi 19 | fi 20 | -------------------------------------------------------------------------------- /files/bash/reboot_required_deb.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # default 4 | export REBOOT_REQUIRED="false" 5 | if [[ -f /var/run/reboot-required ]]; then 6 | export REBOOT_REQUIRED="true" 7 | fi 8 | -------------------------------------------------------------------------------- /files/bash/reboot_required_rh.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | RH_RELEASE=$(sed -r -e 's/^.* release ([0-9]+(\.[0-9]+)?).*$/\1/' /etc/redhat-release) 4 | RH_MAJOR="${RH_RELEASE%%.*}" 5 | 6 | # default 7 | export REBOOT_REQUIRED="false" 8 | 9 | # needs-restarting comes with yum-utils package, sometimes this isn't installed 10 | if [[ -x /usr/bin/needs-restarting ]]; then 11 | if [[ $RH_MAJOR -eq 6 ]]; then 12 | ## needs-restarting on RHEL6 prints things to STDOUT when a restart is needed 13 | ## otherwise it prints nothing, so we check to see if the STDOUT contains data 14 | ## to determine if we need to reboot 15 | check=$(needs-restarting) 16 | if [[ -n $check ]]; then 17 | export REBOOT_REQUIRED="true" 18 | fi 19 | elif [[ $RH_MAJOR -ge 7 ]]; then 20 | ## needs-restarting on RHEL7 returns an exit code of 1 if a reboot is needed, otherwise 21 | ## a reboot is not required 22 | check=$(needs-restarting -r) 23 | EXIT_STATUS=$? 24 | if [[ $EXIT_STATUS -eq 1 ]]; then 25 | export REBOOT_REQUIRED="true" 26 | fi 27 | else 28 | echo "ERROR - Unknown RedHat/CentOS version: RH_RELEASE=${RH_RELEASE} RH_MAJOR=${RH_MAJOR}" >&2 29 | exit 3 30 | fi 31 | else 32 | echo "ERROR - /usr/bin/needs-restarting isn't present on a RedHat/CentOS host. You probably need to install the package: yum-utils" >&2 33 | exit 4 34 | fi 35 | -------------------------------------------------------------------------------- /files/bash/reboot_required_sles.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # default 4 | export REBOOT_REQUIRED="false" 5 | 6 | # from the man page: 7 | # needs-rebooting 8 | # Checks if the reboot-needed flag was set by a previous update or install of a core library or service. + The reboot-needed flag is set when a package from a predefined list (/etc/zypp/needreboot) is updated or installed. 9 | # Exit code ZYPPER_EXIT_INF_REBOOT_NEEDED indicates that a reboot is needed, otherwise the exit code is set to ZYPPER_EXIT_OK. 10 | # 11 | # 102 - ZYPPER_EXIT_INF_REBOOT_NEEDED 12 | # Returned after a successful installation of a patch which requires reboot of computer. 13 | 14 | check=$(zypper needs-rebooting) 15 | EXIT_STATUS=$? 16 | if [[ $EXIT_STATUS -eq 102 ]]; then 17 | export REBOOT_REQUIRED="true" 18 | fi 19 | -------------------------------------------------------------------------------- /files/bash/update_deb.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Required environment variables 3 | # export PACKAGES - list of packages to update 4 | # export RESULT_FILE - name of the file to write JSON results to 5 | # export LOG_FILE - name of the file to write OS specific patching logs to 6 | 7 | ## Apt package manager 8 | UPDATE_OUTPUT=$(apt-get -y update) 9 | STATUS=$? 10 | 11 | # Write the current date in UTC to the log file 12 | echo "-----$(date -u)-----" &>> "$LOG_FILE" 13 | 14 | # Write the output to the log file 15 | echo "$UPDATE_OUTPUT" &>> "$LOG_FILE" 16 | 17 | if [[ $STATUS -ne 0 ]]; then 18 | echo "apt-get -y update FAILED, you probably forgot to run this as sudo or there is a network error." 19 | exit $STATUS 20 | fi 21 | 22 | # Check for errors in the apt-get update output 23 | if echo "$UPDATE_OUTPUT" | grep -q "Err:"; then 24 | echo "apt-get -y update completed with errors. Check the log file for details." 25 | tee -a "${RESULT_FILE}" <> "$LOG_FILE" 50 | STATUS=$? 51 | 52 | # Check if there are no updates and no errors 53 | if grep -q "0 upgraded, 0 newly installed, 0 to remove\.?$" "$LOG_FILE"; then 54 | tee -a "${RESULT_FILE}" <> "$LOG_FILE" 69 | 70 | # the log has two sections: 71 | # Install: 72 | # Upgrade: 73 | # This pulls the Install: and Upgrade: lines and removes that beginning heading 74 | # leaving us with a single, very long, line of packages 75 | LAST_INSTALL=$(echo -n "$LAST_LOG" | grep '^Install: ' | sed 's/Install: //g') 76 | LAST_UPGRADE=$(echo -n "$LAST_LOG" | grep '^Upgrade: ' | sed 's/Upgrade: //g') 77 | 78 | # Packages are in a long with with the format: 79 | # Install: 80 | # : (, automatic), etc... 81 | # Upgrade: 82 | # : (, ), etc... 83 | # 84 | # We want to split up this long line into individual lines, one for each package 85 | # we do this by splitting on the ), that separates each package 86 | LAST_INSTALL_PACKAGES=$(echo -n "$LAST_INSTALL" | sed 's/), /)\n/g') 87 | LAST_UPGRADE_PACKAGES=$(echo -n "$LAST_UPGRADE" | sed 's/), /)\n/g') 88 | 89 | # Initialize failed packages array 90 | declare -A FAILED_PACKAGES 91 | 92 | # Check for failed packages 93 | if grep -q "dpkg: error processing package" "$LOG_FILE"; then 94 | ERRORS=$(grep "dpkg: error processing package" "$LOG_FILE") 95 | while read -r line; do 96 | package=$(echo "$line" | awk '{print $5}') 97 | # Ensure the package name is valid and not empty 98 | if [[ -n "$package" && "$package" != "?" ]]; then 99 | FAILED_PACKAGES["$package"]="Installation failed - PLEASE SEE $LOG_FILE FOR DETAILS" 100 | fi 101 | done <<< "$ERRORS" 102 | fi 103 | 104 | # print out all Installed packages as JSON 105 | tee -a "${RESULT_FILE}" <: (, automatic), etc... 117 | pkg=$(echo "$line" | awk '{print $1}') 118 | # package is: : 119 | name=$(echo "$pkg" | awk -F':' '{print $1}') 120 | 121 | # This gets the version number and removes the '(' and ',' characters 122 | # from the string 123 | version=$(echo "$line" | awk '{print $2}' | sed 's/(\|,//g') 124 | 125 | if [ -n "$comma" ]; then 126 | echo "$comma" | tee -a "${RESULT_FILE}" 127 | fi 128 | echo " {" | tee -a "${RESULT_FILE}" 129 | echo " \"name\": \"${name}\"," | tee -a "${RESULT_FILE}" 130 | echo " \"version\": \"${version}\"" | tee -a "${RESULT_FILE}" 131 | echo -n " }" | tee -a "${RESULT_FILE}" 132 | comma=',' 133 | done <<< "$LAST_INSTALL_PACKAGES" 134 | tee -a "${RESULT_FILE}" <: (, ), etc... 151 | pkg=$(echo "$line" | awk '{print $1}') 152 | # package is: : 153 | name=$(echo "$pkg" | awk -F':' '{print $1}') 154 | 155 | # This gets the old version number and removes the '(' and ',' characters 156 | # from the string 157 | version_old=$(echo "$line" | awk '{print $2}' | sed 's/(\|,//g') 158 | # Get the new version and remove the ')' from the string 159 | version=$(echo "$line" | awk '{print $3}' | sed 's/)//g') 160 | 161 | if [ -n "$comma" ]; then 162 | echo "$comma" | tee -a "${RESULT_FILE}" 163 | fi 164 | echo " {" | tee -a "${RESULT_FILE}" 165 | echo " \"name\": \"${name}\"," | tee -a "${RESULT_FILE}" 166 | echo " \"version\": \"${version}\"," | tee -a "${RESULT_FILE}" 167 | echo " \"version_old\": \"${version_old}\"" | tee -a "${RESULT_FILE}" 168 | echo -n " }" | tee -a "${RESULT_FILE}" 169 | comma=',' 170 | done <<< "$LAST_UPGRADE_PACKAGES" 171 | tee -a "${RESULT_FILE}" <> "$LOG_FILE") 9 | STATUS=$? 10 | case $STATUS in 11 | [1-4] | 6) 12 | echo "zypper --non-interactive update FAILED with an error. Please investigate." 13 | exit $STATUS 14 | ;; 15 | 5) 16 | echo "zypper --non-interactive update FAILED with insufficient privelidges. You probably forgot to run this as sudo." 17 | exit $STATUS 18 | ;; 19 | 7) 20 | echo "zypper --non-interactive update FAILED due to conflicting zypper run. Please try again once zypper is not running." 21 | exit $STATUS 22 | ;; 23 | 8) 24 | echo "zypper --non-interactive update FAILED due to dependency errors. Please investigate." 25 | exit $STATUS 26 | ;; 27 | *) 28 | # all other exit codes are a form of success. 29 | ;; 30 | esac 31 | 32 | # The zypp/history file logs commands as well as results. 33 | # Look for the last "|'zypper' 'up'|" line and print out everything in the file after that 34 | # to get our previous transaction. 35 | LAST_LOG=$(tac /var/log/zypp/history | sed "/|'zypper' 'up'|/q" | tac) 36 | echo "$LAST_LOG" >> "$LOG_FILE" 37 | 38 | # The log file contains install as well as other information items. 39 | # We are only interested in the lines containing "|install|" 40 | LAST_INSTALL=$(echo -n "$LAST_LOG" | grep '|install|') 41 | 42 | # print out all Installed packages as JSON 43 | tee -a "${RESULT_FILE}" <|install||||||| 55 | name=$(echo "$line" | awk -F '|' '{print $3}') 56 | version=$(echo "$line" | awk -F '|' '{print $4}') 57 | arch=$(echo "$line" | awk -F '|' '{print $5}') 58 | repo=$(echo "$line" | awk -F '|' '{print $7}') 59 | 60 | if [ -n $comma ]; then 61 | echo "$comma" | tee -a "${RESULT_FILE}" 62 | fi 63 | echo " {" | tee -a "${RESULT_FILE}" 64 | echo " \"name\": \"${name}\"," | tee -a "${RESULT_FILE}" 65 | echo " \"version\": \"${version}\"" | tee -a "${RESULT_FILE}" 66 | echo " \"arch\": \"${arch}\"," | tee -a "${RESULT_FILE}" 67 | echo " \"repo\": \"${repo}\"" | tee -a "${RESULT_FILE}" 68 | echo -n " }" | tee -a "${RESULT_FILE}" 69 | comma=',' 70 | done <<< "$LAST_INSTALL" 71 | tee -a "${RESULT_FILE}" < 'patching::cache_update', 'type' => 'task', 'params' => { '_noop' => $noop, '_catch_errors' => true } }, 19 | # Check for available updates 20 | # { 'name' => 'patching::available_updates', 'type' => 'plan', 'params' => { 21 | # 'provider' => $update_provider_group, 22 | # 'format' => 'pretty', 23 | # 'noop' => $noop 24 | # } }, 25 | # Disable monitoring 26 | # { 'name' => $monitoring_plan_group, 'type' => 'plan', 'params' => { 27 | # 'action' => 'disable', 28 | # 'noop' => $noop 29 | # } }, 30 | # Create VM snapshots 31 | # { 'name' => $snapshot_plan_group, 'type' => 'plan', 'params' => { 32 | # 'action' => 'create', 33 | # 'noop' => $noop 34 | # } }, 35 | # Run pre-patching script 36 | # { 'name' => $pre_update_plan_group, 'type' => 'plan', 'params' => { 'noop' => $noop } }, 37 | # Run package updates 38 | # { 'name' => 'patching::update', 'type' => 'task', 'params' => { 39 | # 'provider' => $update_provider_group, 40 | # '_catch_errors' => true, 41 | # 'noop' => $noop 42 | # } }, 43 | # Run post-patching script 44 | # { 'name' => $post_update_plan_group, 'type' => 'plan', 'params' => { 'noop' => $noop } }, 45 | # Check if reboot required 46 | # { 'name' => 'patching::reboot_required', 'type' => 'plan', 'params' => { 47 | # 'strategy' => $reboot_strategy_group, 48 | # 'message' => $reboot_message_group, 49 | # 'wait' => $reboot_wait_group, 50 | # 'disconnect_wait' => $disconnect_wait_group, 51 | # 'noop' => $noop 52 | # } }, 53 | # Remove VM snapshots 54 | # { 'name' => $snapshot_plan_group, 'type' => 'plan', 'params' => { 55 | # 'action' => 'delete', 56 | # 'noop' => $noop 57 | # } }, 58 | # Enable monitoring 59 | # { 'name' => $monitoring_plan_group, 'type' => 'plan', 'params' => { 60 | # 'action' => 'enable', 61 | # 'noop' => $noop 62 | # } }, 63 | # ] 64 | function patching::build_workflow( 65 | Optional[String] $update_provider_group, 66 | Optional[Boolean] $monitoring_enabled_group, 67 | Optional[String] $monitoring_plan_group, 68 | Optional[Boolean] $snapshot_create_group, 69 | Optional[Boolean] $snapshot_delete_group, 70 | Optional[String] $snapshot_plan_group, 71 | Optional[String] $pre_update_plan_group, 72 | Optional[String] $post_update_plan_group, 73 | Optional[Enum['only_required', 'never', 'always']] $reboot_strategy_group, 74 | Optional[String] $reboot_message_group, 75 | Optional[Integer] $reboot_wait_group, 76 | Optional[Integer] $disconnect_wait_group, 77 | Boolean $noop 78 | ) >> Array[Hash] { 79 | # Initialize an array with tasks/plans that are always included 80 | $initial = [ 81 | { 82 | 'name' => 'patching::cache_update', 83 | 'type' => 'task', 84 | 'params' => { 85 | '_noop' => $noop, 86 | '_catch_errors' => true, 87 | } 88 | }, 89 | { 90 | 'name' => 'patching::available_updates', 91 | 'type' => 'plan', 92 | 'params' => { 93 | 'provider' => $update_provider_group, 94 | 'format' => 'pretty', 95 | 'noop' => $noop, 96 | } 97 | }, 98 | ] 99 | 100 | # Determine if monitoring should be disabled 101 | if $monitoring_enabled_group and $monitoring_plan_group and $monitoring_plan_group != 'disabled' { 102 | $monitoring_plan = [ 103 | { 104 | 'name' => $monitoring_plan_group, 105 | 'type' => 'plan', 106 | 'params' => { 107 | 'action' => 'disable', 108 | 'noop' => $noop, 109 | } 110 | }, 111 | ] 112 | $monitoring_reenable_group = true 113 | } else { 114 | $monitoring_plan = [] 115 | } 116 | 117 | # Determine if snapshots should be created 118 | if $snapshot_create_group and $snapshot_plan_group and $snapshot_plan_group != 'disabled' { 119 | $snapshot_plan = [ 120 | { 121 | 'name' => $snapshot_plan_group, 122 | 'type' => 'plan', 123 | 'params' => { 124 | 'action' => 'create', 125 | 'noop' => $noop, 126 | } 127 | }, 128 | ] 129 | } else { 130 | $snapshot_plan = [] 131 | } 132 | 133 | # Continue adding the rest of the tasks/plans in order 134 | $update = [ 135 | { 136 | 'name' => $pre_update_plan_group, 137 | 'type' => 'plan', 138 | 'params' => { 139 | 'noop' => $noop, 140 | } 141 | }, 142 | { 143 | 'name' => 'patching::update', 144 | 'type' => 'task', 145 | 'params' => { 146 | 'provider' => $update_provider_group, 147 | '_catch_errors' => true, 148 | '_noop' => $noop, 149 | } 150 | }, 151 | { 152 | 'name' => $post_update_plan_group, 153 | 'type' => 'plan', 154 | 'params' => { 155 | 'noop' => $noop, 156 | } 157 | }, 158 | { 159 | 'name' => 'patching::reboot_required', 160 | 'type' => 'plan', 161 | 'params' => { 162 | 'strategy' => $reboot_strategy_group, 163 | 'message' => $reboot_message_group, 164 | 'wait' => $reboot_wait_group, 165 | 'disconnect_wait' => $disconnect_wait_group, 166 | 'noop' => $noop, 167 | } 168 | }, 169 | ] 170 | 171 | # Conditionally append the remove VM snapshots and enable monitoring plans at the end 172 | if $snapshot_delete_group and $snapshot_plan_group and $snapshot_plan_group != 'disabled' { 173 | $snapshot_delete = [ 174 | { 175 | 'name' => $snapshot_plan_group, 176 | 'type' => 'plan', 177 | 'params' => { 178 | 'action' => 'delete', 179 | 'noop' => $noop, 180 | } 181 | }, 182 | ] 183 | } else { 184 | $snapshot_delete = [] 185 | } 186 | 187 | if $monitoring_reenable_group and $monitoring_plan_group and $monitoring_plan_group != 'disabled' { 188 | $monitoring_reenable = [ 189 | { 190 | 'name' => $monitoring_plan_group, 191 | 'type' => 'plan', 192 | 'params' => { 193 | 'action' => 'enable', 194 | 'noop' => $noop, 195 | } 196 | }, 197 | ] 198 | } else { 199 | $monitoring_reenable = [] 200 | } 201 | 202 | # Return the complete workflow array 203 | return $initial + $monitoring_plan + $snapshot_plan + $update + $snapshot_delete + $monitoring_reenable 204 | } 205 | -------------------------------------------------------------------------------- /functions/filter_results.pp: -------------------------------------------------------------------------------- 1 | # Function to abstract the processing/error handling of patching results 2 | function patching::filter_results( 3 | Variant[ResultSet, Hash, Error] $results, 4 | String $task_plan_name 5 | ) >> Hash { 6 | if $results =~ Error { 7 | $_results = $results.details['result_set'] 8 | } else { 9 | $_results = $results 10 | } 11 | 12 | if $_results =~ Hash { 13 | if $_results['failed_results'].empty { 14 | $failed_results = {} 15 | } else { 16 | $failed_results = $_results['failed_results'].reduce({}) |$memo, $entry| { 17 | $name = $entry[0] 18 | $message = $entry[1] 19 | $details = { 20 | 'plan_or_task_name' => $task_plan_name, 21 | 'message' => $message, 22 | } 23 | $memo + { $name => $details } 24 | } 25 | } 26 | if $task_plan_name == 'patching::available_updates' { 27 | $result = { 28 | 'ok_targets' => $_results['has_updates'], 29 | 'failed_results' => $failed_results, 30 | 'no_updates' => $_results['no_updates'], 31 | } 32 | return $result 33 | } else { 34 | $result = { 35 | 'ok_targets' => $_results['ok_targets'], 36 | 'failed_results' => $failed_results, 37 | } 38 | return $result 39 | } 40 | } 41 | 42 | $failed_results = if !$_results.error_set.empty { 43 | # Return the result of iterating over the error_set to populate the failed_results hash 44 | $_results.error_set.reduce({}) |$memo, $error| { 45 | $name = $error.target.name 46 | if $error.value['_output'] { 47 | $message = $error.value['_output'] 48 | } else { 49 | $message = $error.error.message 50 | } 51 | $details = { 52 | 'plan_or_task_name' => $task_plan_name, 53 | 'message' => $message, 54 | } 55 | $memo + { $name => $details } 56 | } 57 | } else { 58 | {} 59 | } 60 | 61 | # Log the failed targets if any 62 | if !$_results.error_set.empty { 63 | alert("The following hosts failed during ${task_plan_name}:") 64 | alert($failed_results.keys.join("\n")) 65 | log::info($failed_results) 66 | } 67 | 68 | # Extract the list of targets that succeeded 69 | $ok_targets = $_results.ok_set.targets.map |$target| { $target.name } 70 | 71 | $result_set = { 72 | 'ok_targets' => $ok_targets, 73 | 'failed_results' => $failed_results, 74 | } 75 | 76 | # Return a hash containing the ok_targets and failed_results 77 | return $result_set 78 | } 79 | -------------------------------------------------------------------------------- /functions/process_errors.pp: -------------------------------------------------------------------------------- 1 | # Function to process patching results with targets that failed 2 | function patching::process_errors( 3 | Hash $patching_results, 4 | ) >> String { 5 | $failed_results = $patching_results['failed_results'] 6 | $start = ['Patching failed for the following hosts:'] 7 | $error_messages = $failed_results.reduce($start) |$acc, $result| { 8 | $host = $result[0] 9 | $task = $result[1]['plan_or_task_name'] 10 | $message = $result[1]['message'] 11 | log::info("${host} - ${task}: ${message}") 12 | $acc + ["Host: ${host} - Task: ${task}"] 13 | } 14 | 15 | return $error_messages.join("\n") 16 | } 17 | -------------------------------------------------------------------------------- /functions/target_names.pp: -------------------------------------------------------------------------------- 1 | # @summary Returns an array of names, one for each target, based on the $name_property 2 | # 3 | # @param [TargetSpec] targets 4 | # List of targets to extract the name from 5 | # 6 | # @param [Enum['hostname', 'name', 'uri']] name_property 7 | # Property in the Target to use as the name 8 | # 9 | # @return [Array[String]] Array of names, one for each target 10 | function patching::target_names( 11 | TargetSpec $targets, 12 | Enum['hostname', 'name', 'uri'] $name_property, 13 | ) >> Array[String] { 14 | $targets.map |$n| { 15 | case $name_property { 16 | 'hostname': { 17 | regsubst($n.uri, '^([^.]+).*','\1') 18 | } 19 | 'name': { 20 | $n.name 21 | } 22 | 'uri': { 23 | $n.uri 24 | } 25 | default: { 26 | fail_plan("Unsupported patching_target_name_property: ${name_property}") 27 | } 28 | } 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /images/patching_architecture_bolt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EncoreTechnologies/puppet-patching/bdd3077a45297ed2a9bd91d1cd5ff07ad5ea4098/images/patching_architecture_bolt.png -------------------------------------------------------------------------------- /lib/puppet/functions/patching/snapshot_vmware.rb: -------------------------------------------------------------------------------- 1 | require 'rbvmomi' 2 | 3 | # Creates/deletes snapshots on VMs using the VMware vSphere API. 4 | Puppet::Functions.create_function(:'patching::snapshot_vmware') do 5 | # @param vm_names Array of VM names to create/delete snapshots on 6 | # @param snapshot_name Name of the snapshot to create/delete 7 | # @param vsphere_host Hostname/IP of the vSphere server 8 | # @param vsphere_username Username to use for authenticating to vSphere 9 | # @param vsphere_password Password to use for authenticating to vSphere 10 | # @param vsphere_datacenter Datacenter in the vSphere to use when search for VMs 11 | # @param vsphere_insecure Flag to enable HTTPS without SSL verification 12 | # @param snapshot_description Description of the snapshot, when creating. 13 | # @param snapshot_memory Snapshot the VMs memory, when creating. 14 | # @param snapshot_quiesce Quiesce/flush the VMs filesystem when creating the snapshot 15 | # @param action Action to perform on the snapshot, 'create' or 'delete' 16 | # @return Results from the snapshot create/delete tasks 17 | dispatch :snapshot_vmware do 18 | required_param 'Array', :vm_names 19 | required_param 'String', :snapshot_name 20 | required_param 'String', :vsphere_host 21 | required_param 'String', :vsphere_username 22 | required_param 'String', :vsphere_password 23 | required_param 'String', :vsphere_datacenter 24 | optional_param 'Boolean', :vsphere_insecure 25 | optional_param 'String', :snapshot_description 26 | optional_param 'Boolean', :snapshot_memory 27 | optional_param 'Boolean', :snapshot_quiesce 28 | optional_param 'String', :action 29 | return_type 'Hash' 30 | end 31 | 32 | def snapshot_vmware(vm_names, 33 | snapshot_name, 34 | vsphere_host, 35 | vsphere_username, 36 | vsphere_password, 37 | vsphere_datacenter, 38 | vsphere_insecure = true, 39 | snapshot_description = nil, 40 | snapshot_memory = false, 41 | snapshot_quiesce = false, 42 | action = 'create') 43 | begin 44 | # Check to make sure a valid action was chosen 45 | available_actions = ['create', 'delete'] 46 | unless available_actions.include? action 47 | raise "#{action} is an invalid action. Please choose from create or delete" 48 | end 49 | 50 | # Compose vsphere credentials 51 | credentials = { 52 | host: vsphere_host, 53 | user: vsphere_username, 54 | password: vsphere_password, 55 | insecure: vsphere_insecure, 56 | } 57 | 58 | # Establish a connection to vsphere 59 | vim = RbVmomi::VIM.connect credentials 60 | 61 | # Get the vsphere Datacenter that we are interested in 62 | dc = vim.serviceInstance.find_datacenter(vsphere_datacenter) 63 | 64 | unless dc 65 | raise "Could not find datacenter with name: #{vsphere_datacenter}" 66 | end 67 | 68 | # Get all the VMs in the datacenter 69 | view_hash = { 70 | container: dc.vmFolder, 71 | type: ['VirtualMachine'], 72 | recursive: true, 73 | } 74 | all_vms = vim.serviceContent.viewManager.CreateContainerView(view_hash).view 75 | 76 | # Create a snapshot for each VM 77 | snapshot_error = [] 78 | snapshot_tasks = [] 79 | successful_vms = [] 80 | vm_names.each do |vm_name| 81 | snapshot_error_hash = { vm_name => '' } 82 | begin 83 | task_return = nil 84 | if action == 'create' 85 | task_return = create_snapshot_on_vm(all_vms, vm_name, snapshot_name, snapshot_description, snapshot_memory, snapshot_quiesce) 86 | elsif action == 'delete' 87 | task_return = delete_snapshot_from_vm(all_vms, vm_name, snapshot_name) 88 | end 89 | 90 | if task_return 91 | snapshot_tasks.push(task_return) 92 | successful_vms.push(vm_name) 93 | else 94 | snapshot_error_hash[vm_name] = 'Could not find vm' 95 | end 96 | rescue RbVmomi::Fault => err 97 | snapshot_error_hash[vm_name] = "RbVmomi::Fault: #{err.message}" 98 | rescue => err 99 | snapshot_error_hash[vm_name] = "General Error: #{err.message}" 100 | end 101 | 102 | if snapshot_error_hash[vm_name] != '' 103 | snapshot_error.push(snapshot_error_hash) 104 | end 105 | end 106 | 107 | # Wait for all snapshot tasks to finish 108 | completion_errors = wait_for_completion(snapshot_tasks) 109 | 110 | # Combine any errors that are present 111 | vmware_error_return = combine_errors(snapshot_error + completion_errors) 112 | 113 | # Return the results 114 | { 115 | 'ok_targets' => successful_vms, 116 | 'failed_results' => vmware_error_return 117 | } 118 | rescue RbVmomi::Fault => err 119 | raise "RbVmomi::Fault: #{err.message}" 120 | rescue => err 121 | raise "General Error: #{err.message}" 122 | end 123 | end 124 | 125 | def combine_errors(error_list) 126 | # Combine error lists. If there are multiple errors we will 127 | # combine the details information 128 | error_return = {} 129 | error_list.each do |error| 130 | error.each do |vm_name, details| 131 | if error_return.key?(vm_name) 132 | error_return[vm_name] += ", #{details}" 133 | else 134 | error_return[vm_name] = details 135 | end 136 | end 137 | end 138 | error_return 139 | end 140 | 141 | def create_snapshot_on_vm(all_vms, vm_name, snapshot_name, snapshot_description, snapshot_memory, snapshot_quiesce) 142 | # Find the VM and create the snapshot. Wait for the snapshot to be created. 143 | return_value = false 144 | all_vms.each do |vm| 145 | next unless vm.name == vm_name 146 | 147 | if vm.snapshot 148 | snapshots = find_snapshot(vm.snapshot.rootSnapshotList, snapshot_name) 149 | 150 | if snapshots.length == 1 151 | delete_snapshot_from_vm(all_vms, vm_name, snapshot_name) 152 | elsif snapshots.length > 1 153 | raise "There are #{snapshots.length} snapshots with the name #{snapshot_name} please remediate this or choose a different name before continuing" 154 | end 155 | end 156 | 157 | begin 158 | # Create the Snapshot 159 | snapshot_task = vm.CreateSnapshot_Task(name: snapshot_name, description: snapshot_description, memory: snapshot_memory, quiesce: snapshot_quiesce) 160 | return_value = snapshot_task 161 | break 162 | rescue => err 163 | raise "Creating snapshot failed with error: #{err}" 164 | end 165 | end 166 | return_value 167 | end 168 | 169 | def delete_snapshot_from_vm(all_vms, vm_name, snapshot_name) 170 | # Find the VM and delete the snapshot. Wait for the snapshot delete to finish before continuing 171 | return_value = false 172 | all_vms.each do |vm| 173 | next unless vm.name == vm_name 174 | 175 | # If the VM doesn't have snapshots then exit 176 | unless vm.snapshot 177 | return_value = true 178 | break 179 | end 180 | 181 | # Find the snapshot to delete 182 | snapshots = find_snapshot(vm.snapshot.rootSnapshotList, snapshot_name) 183 | if snapshots.empty? 184 | return_value = true 185 | break 186 | end 187 | 188 | # Delete the last Snapshot 189 | begin 190 | snapshot_task = snapshots[-1].RemoveSnapshot_Task(removeChildren: false) 191 | return_value = snapshot_task 192 | break 193 | rescue => err 194 | raise "Deleting snapshot failed with error: #{err}" 195 | end 196 | end 197 | 198 | return_value 199 | end 200 | 201 | def find_snapshot(snapshot_list, snapshot_name) 202 | # Find snapshot by name from the list of snapshots on the VM 203 | snapshot_return = [] 204 | snapshot_list.each do |vm_snapshot| 205 | if vm_snapshot.name == snapshot_name 206 | snapshot_return.push(vm_snapshot.snapshot) 207 | end 208 | 209 | unless vm_snapshot.childSnapshotList.empty? 210 | # If snapshot has child snapshots then search those also 211 | snapshot_return += find_snapshot(vm_snapshot.childSnapshotList, snapshot_name) 212 | end 213 | end 214 | snapshot_return 215 | end 216 | 217 | def wait_for_completion(snapshot_task_list) 218 | # Make sure all snapshot tasks have been completed 219 | completion_errors = [] 220 | snapshot_task_list.each do |snapshot_task| 221 | next unless snapshot_task.is_a?(RbVmomi::VIM::Task) 222 | 223 | begin 224 | snapshot_task.wait_for_completion 225 | rescue RbVmomi::Fault => err 226 | completion_errors_hash = { 227 | snapshot_task.info.entity.name => "RbVmomi::Fault: #{err.message}" 228 | } 229 | completion_errors.push(completion_errors_hash) 230 | rescue => err 231 | completion_errors_hash = { 232 | snapshot_task.info.entity.name => "General Error: #{err.message}" 233 | } 234 | completion_errors.push(completion_errors_hash) 235 | end 236 | end 237 | completion_errors 238 | end 239 | end -------------------------------------------------------------------------------- /lib/puppet_x/encore/patching/http_helper.rb: -------------------------------------------------------------------------------- 1 | require 'net/https' 2 | require 'ipaddr' 3 | require 'puppet_x' 4 | 5 | module PuppetX::Patching 6 | # Helper class for HTTP calls 7 | class HTTPHelper 8 | def initialize(username: nil, 9 | password: nil, 10 | ssl: false, 11 | ca_file: nil, 12 | redirect_limit: 10, 13 | headers: {}) 14 | @username = username 15 | @password = password 16 | @ssl = ssl 17 | @ca_file = ca_file 18 | @redirect_limit = redirect_limit 19 | @headers = headers 20 | end 21 | 22 | def execute(method, url, body: nil, headers: {}, redirect_limit: @redirect_limit) 23 | raise ArgumentError, 'HTTP redirect too deep' if redirect_limit.zero? 24 | 25 | # setup our HTTP class 26 | uri = URI.parse(url) 27 | http = Net::HTTP.new(uri.host, uri.port) 28 | http.use_ssl = @ssl 29 | 30 | # Configure SSL context if SSL is enabled 31 | if @ssl 32 | if @ca_file 33 | http.cert_store = OpenSSL::X509::Store.new 34 | http.cert_store.set_default_paths 35 | http.cert_store.add_file(@ca_file) 36 | end 37 | 38 | http.verify_mode = OpenSSL::SSL::VERIFY_PEER 39 | end 40 | 41 | # create our request 42 | req = net_http_request_class(method).new(uri) 43 | req.basic_auth(@username, @password) if @username && @password 44 | 45 | # copy headers into the request 46 | headers.each { |k, v| req[k] = v } 47 | # set the body in the request 48 | req.body = body if body 49 | 50 | # execute 51 | resp = http.request(req) 52 | 53 | # check response for success, redirect or error 54 | case resp 55 | when Net::HTTPSuccess then 56 | resp 57 | when Net::HTTPRedirection then 58 | execute(method, resp['location'], 59 | body: body, headers: headers, 60 | redirect_limit: redirect_limit - 1) 61 | else 62 | message = 'code=' + resp.code 63 | message += ' message=' + resp.message 64 | message += ' body=' + resp.body 65 | raise resp.error_type.new(message, resp) 66 | end 67 | end 68 | 69 | def net_http_request_class(method) 70 | Net::HTTP.const_get(method.capitalize, false) 71 | end 72 | 73 | def ip?(str) 74 | IPAddr.new(str) 75 | true 76 | rescue 77 | false 78 | end 79 | 80 | def get(url, body: nil, headers: @headers) 81 | execute('get', url, body: body, headers: headers, redirect_limit: @redirect_limit) 82 | end 83 | 84 | def post(url, body: nil, headers: @headers) 85 | execute('post', url, body: body, headers: headers, redirect_limit: @redirect_limit) 86 | end 87 | 88 | def delete(url, body: nil, headers: @headers) 89 | execute('delete', url, body: body, headers: headers, redirect_limit: @redirect_limit) 90 | end 91 | end 92 | end 93 | -------------------------------------------------------------------------------- /lib/puppet_x/encore/patching/orion_client.rb: -------------------------------------------------------------------------------- 1 | require 'puppet_x' 2 | require 'puppet_x/encore/patching/http_helper' 3 | 4 | module PuppetX::Patching 5 | # Abstraction of the SolarWinds Orion API 6 | class OrionClient < HTTPHelper 7 | def initialize(server, 8 | port: 17_778, 9 | username: nil, 10 | password: nil, 11 | ssl: true, 12 | ssl_verify: OpenSSL::SSL::VERIFY_NONE, 13 | redirect_limit: 10, 14 | headers: { 15 | 'Content-Type' => 'application/json', 16 | }) 17 | super(username: username, 18 | password: password, 19 | ssl: ssl, 20 | ssl_verify: ssl_verify, 21 | redirect_limit: redirect_limit, 22 | headers: headers) 23 | @server = server 24 | @port = port 25 | @scheme = ssl ? 'https' : 'http' 26 | end 27 | 28 | def make_url(endpoint) 29 | "#{@scheme}://#{@server}:#{@port}/SolarWinds/InformationService/v3/Json/#{endpoint}" 30 | end 31 | 32 | def query(query, params) 33 | body = { 34 | 'query' => query, 35 | 'parameters' => params, 36 | } 37 | resp = post(make_url('Query'), body: body.to_json) 38 | data = JSON.parse(resp.body) 39 | if data['results'] 40 | data['results'] 41 | else 42 | [] 43 | end 44 | end 45 | 46 | def invoke(entity, verb, body: nil) 47 | resp = post(make_url("Invoke/#{entity}/#{verb}"), body: body) 48 | JSON.parse(resp.body) 49 | end 50 | 51 | def get_node(hostname_or_ip, name_property: 'DNS') 52 | field = ip?(hostname_or_ip) ? 'IPAddress' : name_property 53 | field_list = ['NodeID', 'Uri', 'IPAddress', name_property].uniq 54 | q = "SELECT #{field_list.join(',')} FROM Orion.Nodes WHERE #{field}=@query_on" 55 | params = { 56 | 'query_on' => hostname_or_ip, 57 | } 58 | query(q, params) 59 | end 60 | 61 | def suppress_alerts(uri_array) 62 | body = [uri_array].to_json 63 | invoke('Orion.AlertSuppression', 'SuppressAlerts', body: body) 64 | end 65 | 66 | def resume_alerts(uri_array) 67 | body = [uri_array].to_json 68 | invoke('Orion.AlertSuppression', 'ResumeAlerts', body: body) 69 | end 70 | end 71 | end 72 | -------------------------------------------------------------------------------- /manifests/init.pp: -------------------------------------------------------------------------------- 1 | # @summary allows global customization of the patching resources 2 | # 3 | # @param patching_dir 4 | # Global directory as the base for `bin_dir` and `log_dir` 5 | # 6 | # @param bin_dir 7 | # Global directory where the scripts will be installed 8 | # 9 | # @param log_dir 10 | # Directory where log files will be written during patching 11 | # 12 | # @param owner 13 | # Default owner of installed scripts 14 | # 15 | # @param group 16 | # Default group of installed scripts 17 | # 18 | # @param mode 19 | # Default file mode of installed scripts 20 | # 21 | # @param scripts 22 | # Hash of script resources to instantiate. Useful for declaring script installs from hiera. 23 | # 24 | # @example Basic usage 25 | # include patching 26 | # 27 | # @example Customizing script location 28 | # class {'patching': 29 | # bin_dir => '/my/custom/patching/scripts', 30 | # } 31 | # 32 | # @example Customizing the owner/group/mode of the scripts 33 | # class {'patching': 34 | # owner => 'svc_patching', 35 | # group => 'svc_patching', 36 | # mode => '0700', 37 | # } 38 | # 39 | # @example Customizing from hiera 40 | # patching::bin_dir: '/my/custom/app/patching/dir' 41 | # patching::owner: 'svc_patching' 42 | # patching::group: 'svc_patching' 43 | # patching::mode: '0700' 44 | # 45 | # @example Deploying scripts from hiera 46 | # patching::scripts: 47 | # custom_app_pre_patch.sh: 48 | # source: 'puppet:///mymodule/patching/custom_app_pre_patch.sh' 49 | # custom_app_post_patch.sh: 50 | # source: 'puppet:///mymodule/patching/custom_app_post_patch.sh' 51 | # 52 | class patching ( 53 | $patching_dir = $patching::params::patching_dir, 54 | $bin_dir = $patching::params::bin_dir, 55 | $log_dir = $patching::params::log_dir, 56 | $owner = $patching::params::owner, 57 | $group = $patching::params::group, 58 | $mode = $patching::params::mode, 59 | Optional[Hash] $scripts = undef, 60 | ) inherits patching::params { 61 | if $patching_dir { 62 | ensure_resource('file', $patching_dir, { 63 | ensure => directory, 64 | owner => $owner, 65 | group => $group, 66 | }) 67 | } 68 | 69 | if $bin_dir { 70 | ensure_resource('file', $bin_dir, { 71 | ensure => directory, 72 | owner => $owner, 73 | group => $group, 74 | }) 75 | } 76 | 77 | if $log_dir { 78 | ensure_resource('file', $log_dir, { 79 | ensure => directory, 80 | owner => $owner, 81 | group => $group, 82 | }) 83 | } 84 | 85 | if $scripts { 86 | $defaults = { 87 | bin_dir => $bin_dir, 88 | owner => $owner, 89 | group => $group, 90 | mode => $mode, 91 | } 92 | ensure_resources('patching::script', $scripts, $defaults) 93 | } 94 | } 95 | -------------------------------------------------------------------------------- /manifests/params.pp: -------------------------------------------------------------------------------- 1 | # @summary params for the patching module resources 2 | class patching::params { 3 | case $facts['os']['family'] { 4 | 'Windows': { 5 | $patching_dir = 'C:/ProgramData/patching' 6 | $bin_dir = "${patching_dir}/bin" 7 | $log_dir = "${patching_dir}/log" 8 | $owner = 'Administrator' 9 | $group = 'Administrator' 10 | $mode = '0770' 11 | } 12 | 'RedHat': { 13 | $patching_dir = '/opt/patching' 14 | $bin_dir = "${patching_dir}/bin" 15 | $log_dir = "${patching_dir}/log" 16 | $owner = 'root' 17 | $group = 'root' 18 | $mode = '0770' 19 | } 20 | 'Debian': { 21 | $patching_dir = '/opt/patching' 22 | $bin_dir = "${patching_dir}/bin" 23 | $log_dir = "${patching_dir}/log" 24 | $owner = 'root' 25 | $group = 'root' 26 | $mode = '0770' 27 | } 28 | 'Suse': { 29 | $patching_dir = '/opt/patching' 30 | $bin_dir = "${patching_dir}/bin" 31 | $log_dir = "${patching_dir}/log" 32 | $owner = 'root' 33 | $group = 'root' 34 | $mode = '0770' 35 | } 36 | default: { 37 | fail("Unsupported OS family: ${facts['os']['family']}") 38 | } 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /manifests/script.pp: -------------------------------------------------------------------------------- 1 | # @summary manages a script for custom patching actions 2 | # 3 | # @param source 4 | # Source (puppet path) for the `file` resource of the script. 5 | # Either `source` our `content` must be specified. If neither are specified an error will be thrown. 6 | # 7 | # @param content 8 | # Content (raw string, result of `template()`, etc) for the `file` resource of the script. 9 | # Either `source` our `content` must be specified. If neither are specified an error will be thrown. 10 | # 11 | # @param bin_dir 12 | # Directory where the script will be installed 13 | # 14 | # @param owner 15 | # Owner of the script file 16 | # 17 | # @param group 18 | # Group of the script file 19 | # 20 | # @param mode 21 | # File mode to set on the script 22 | # 23 | # @example Basic usage from static file 24 | # include patching 25 | # patching::script { 'pre_patch.sh': 26 | # source => 'puppet://mymodule/patching/custom_app_pre_patch.sh', 27 | # } 28 | # 29 | # @example Basic usage from template 30 | # include patching 31 | # patching::script { 'pre_patch.sh': 32 | # content => template('mymodule/patching/custom_app_pre_patch.sh'), 33 | # } 34 | # 35 | # @example Installing the script into a different path with a different name 36 | # include patching 37 | # patching::script { 'custom_app_pre_patch.sh': 38 | # content => template('mymodule/patching/custom_app_pre_patch.sh'), 39 | # bin_dir => '/my/custom/app/patching/dir', 40 | # } 41 | # 42 | # @example Installing multiple scripts into a different path 43 | # class {'patching': 44 | # bin_dir => '/my/custom/app/patching/dir', 45 | # } 46 | # 47 | # # we don't have to override bin_dir on each of these because 48 | # # we configured it gobally in the patching class above 49 | # patching::script { 'custom_app_pre_patch.sh': 50 | # content => template('mymodule/patching/custom_app_pre_patch.sh'), 51 | # } 52 | # patching::script { 'custom_app_post_patch.sh': 53 | # content => template('mymodule/patching/custom_app_post_patch.sh'), 54 | # } 55 | # 56 | # @example From hiera 57 | # patching::bin_dir: '/my/custom/app/patching/dir' 58 | # patching::scripts: 59 | # custom_app_pre_patch.sh: 60 | # source: 'puppet:///mymodule/patching/custom_app_pre_patch.sh' 61 | # custom_app_post_patch.sh: 62 | # source: 'puppet:///mymodule/patching/custom_app_post_patch.sh' 63 | # 64 | define patching::script ( 65 | $source = undef, 66 | $content = undef, 67 | $bin_dir = $patching::bin_dir, 68 | $owner = $patching::owner, 69 | $group = $patching::group, 70 | $mode = $patching::mode, 71 | ) { 72 | if $source { 73 | file { "${bin_dir}/${name}": 74 | ensure => file, 75 | source => $source, 76 | owner => $owner, 77 | group => $group, 78 | mode => $mode, 79 | } 80 | } 81 | elsif $content { 82 | file { "${bin_dir}/${name}": 83 | ensure => file, 84 | content => $content, 85 | owner => $owner, 86 | group => $group, 87 | mode => $mode, 88 | } 89 | } 90 | else { 91 | fail("Must specify either 'source' or 'content', we received 'undef' for both.") 92 | } 93 | } 94 | -------------------------------------------------------------------------------- /metadata.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "encore-patching", 3 | "version": "1.8.1", 4 | "author": "Encore Technologies", 5 | "summary": "Implements OS patching workflows using Bolt tasks and plans.", 6 | "license": "Apache-2.0", 7 | "source": "https://github.com/EncoreTechnologies/puppet-patching", 8 | "project_page": "https://github.com/EncoreTechnologies/puppet-patching", 9 | "issues_url": "https://github.com/EncoreTechnologies/puppet-patching/issues", 10 | "dependencies": [ 11 | { 12 | "name": "puppetlabs/puppet_agent", 13 | "version_requirement": ">= 2.2.0 < 5.0.0" 14 | }, 15 | { 16 | "name": "puppetlabs/reboot", 17 | "version_requirement": ">= 3.0.0 < 5.0.0" 18 | }, 19 | { 20 | "name": "puppetlabs/stdlib", 21 | "version_requirement": ">= 4.13.1 < 10.0.0" 22 | } 23 | ], 24 | "operatingsystem_support": [ 25 | { 26 | "operatingsystem": "SLES", 27 | "operatingsystemrelease": [ 28 | "12", 29 | "15" 30 | ] 31 | }, 32 | { 33 | "operatingsystem": "CentOS", 34 | "operatingsystemrelease": [ 35 | "7", 36 | "8" 37 | ] 38 | }, 39 | { 40 | "operatingsystem": "OracleLinux", 41 | "operatingsystemrelease": [ 42 | "7", 43 | "8" 44 | ] 45 | }, 46 | { 47 | "operatingsystem": "RedHat", 48 | "operatingsystemrelease": [ 49 | "7", 50 | "8" 51 | ] 52 | }, 53 | { 54 | "operatingsystem": "Scientific", 55 | "operatingsystemrelease": [ 56 | "7" 57 | ] 58 | }, 59 | { 60 | "operatingsystem": "Debian", 61 | "operatingsystemrelease": [ 62 | "8" 63 | ] 64 | }, 65 | { 66 | "operatingsystem": "Ubuntu", 67 | "operatingsystemrelease": [ 68 | "16.04", 69 | "18.04" 70 | ] 71 | }, 72 | { 73 | "operatingsystem": "windows", 74 | "operatingsystemrelease": [ 75 | "2008 R2", 76 | "2012 R2", 77 | "2016", 78 | "2019", 79 | "10" 80 | ] 81 | } 82 | ], 83 | "requirements": [ 84 | { 85 | "name": "puppet", 86 | "version_requirement": ">= 4.10.0 < 8.0.0" 87 | } 88 | ], 89 | "pdk-version": "3.2.0", 90 | "template-url": "https://github.com/puppetlabs/pdk-templates#3.2.0.1", 91 | "template-ref": "tags/3.2.0.1-0-g52b3cba" 92 | } 93 | -------------------------------------------------------------------------------- /pdk.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | ignore: [] 3 | -------------------------------------------------------------------------------- /plans/available_updates.pp: -------------------------------------------------------------------------------- 1 | # @summary Checks all targets for available updates reported by their Operating System. 2 | # 3 | # This uses the patching::available_updates task to query each Target's 4 | # Operating System for available updates. The results from the OS are parsed and formatted 5 | # into easy to consume JSON data, such that further code can be written against the 6 | # output. 7 | # 8 | # - RHEL: This ultimately performs a yum check-update. 9 | # - Ubuntu: This ultimately performs a apt upgrade --simulate. 10 | # - Windows: 11 | # - Windows Update API: Queries the WUA for updates. This is the standard update mechanism 12 | # for Windows. 13 | # - Chocolatey: If installed, runs choco outdated. 14 | # If not installed, Chocolatey is ignored. 15 | # 16 | # @param [TargetSpec] targets 17 | # Set of targets to run against. 18 | # @param [Enum['none', 'pretty', 'csv']] format 19 | # Output format for printing user-friendly information during the plan run. 20 | # This also determines the format of the information returned from this plan. 21 | # 22 | # - 'none' : Prints no data to the screen. Returns the raw ResultSet from 23 | # the patching::available_updates task 24 | # - 'pretty' : Prints the data out in a easy to consume format, one line per host, 25 | # showing the number of available updates per host. Returns a Hash containing 26 | # two keys: 'has_updates' - an array of TargetSpec that have updates available, 27 | # 'no_updates' - an array of hosts that have no updates available. 28 | # - 'csv' : Prints and returns CSV formatted data, one row for each update of each host. 29 | # @param [Boolean] noop 30 | # Run this plan in noop mode, meaning no changes will be made to end systems. 31 | # In this case, noop mode has no effect. 32 | # @param [Optional[String]] provider 33 | # What update provider to use. For Linux (RHEL, Debian, SUSE, etc.) this parameter 34 | # is not used. For Windows the available values are: 'windows', 'chocolatey', 'all' 35 | # (both 'windows' and 'chocolatey'). The default value for Windows is 'all'. If 'all' 36 | # is passed and Chocolatey isn't installed then Chocolatey will simply be skipped. 37 | # If 'chocolatey' is passed and Chocolatey isn't installed, then this will error. 38 | # 39 | # @example CLI - Basic Usage 40 | # bolt plan run patching::available_updates --targets linux_hosts 41 | # 42 | # @example CLI - Get available update information in CSV format for creating reports 43 | # bolt plan run patching::available_updates --targets linux_hosts format=csv 44 | # 45 | # @example Plan - Basic Usage 46 | # run_plan('patching::available_updates', $linux_hosts) 47 | # 48 | # @example Plan - Get available update information in CSV format for creating reports 49 | # run_plan('patching::available_updates', $linux_hosts, 50 | # format => 'csv') 51 | # 52 | plan patching::available_updates ( 53 | TargetSpec $targets, 54 | # TODO JSON 55 | Enum['none', 'pretty', 'csv'] $format = 'pretty', 56 | Boolean $noop = false, 57 | Optional[String] $provider = undef, 58 | ) { 59 | $available_results = run_task('patching::available_updates', $targets, 60 | provider => $provider, 61 | _noop => $noop, 62 | _catch_errors => true) 63 | case $format { 64 | 'none': { 65 | return($available_results) 66 | } 67 | 'pretty': { 68 | out::message("Host update status: ('+' has available update; '-' no update) [num updates]") 69 | $has_updates = $available_results.filter_set|$res| { !$res['updates'].empty() }.targets 70 | $no_updates = $available_results.filter_set|$res| { $res['updates'].empty() }.targets 71 | $filtered_results = patching::filter_results($available_results, 'patching::available_updates') 72 | $available_results.each|$res| { 73 | if $res.value['updates'] { 74 | $num_updates = $res['updates'].size 75 | $symbol = ($num_updates > 0) ? { true => '+' , default => '-' } 76 | out::message(" ${symbol} ${res.target.name} [${num_updates}]") 77 | } 78 | } 79 | return({ 80 | 'has_updates' => $has_updates, 81 | 'no_updates' => $no_updates, 82 | 'failed_results' => $filtered_results['failed_results'], 83 | }) 84 | } 85 | 'csv': { 86 | $csv_header = "hostname,num_updates,name,version (linux only),kbs (windows only)\n" 87 | $csv = $available_results.reduce($csv_header) |$res_memo, $res| { 88 | $hostname = $res.target.name 89 | $num_updates = $res['updates'].length 90 | $host_updates = $res['updates'].reduce('') |$up_memo, $up| { 91 | $name = $up['name'] 92 | $version = ('version' in $up) ? { 93 | true => $up['version'], 94 | default => '', 95 | } 96 | $kb_ids = ('kb_ids' in $up) ? { 97 | true => $up['kb_ids'].join(','), 98 | default => '', 99 | } 100 | $csv_line = "${hostname},${num_updates},${name},${version},${kb_ids}" 101 | out::message($csv_line) 102 | "${up_memo}${csv_line}\n" 103 | } 104 | "${res_memo}${host_updates}" 105 | } 106 | file::write('available_updates.csv', $csv) 107 | out::message($csv) 108 | return($csv) 109 | } 110 | default: { 111 | fail_plan("unknown format: ${format}") 112 | } 113 | } 114 | } 115 | -------------------------------------------------------------------------------- /plans/check_online.pp: -------------------------------------------------------------------------------- 1 | # @summary Checks each node to see they're online. 2 | # 3 | # Online checks are done querying for the node's Puppet version using the 4 | # puppet_agent::version task. 5 | # This plan is designed to be used ad-hoc as a quick health check of your inventory. 6 | # It is the intention of this plan to be used as "first pass" when onboarding new targets 7 | # into a Bolt rotation. 8 | # One would build their inventory file of all targets from their trusted data sources. 9 | # Then take the inventory files and run this plan against them to isolate problem targets 10 | # and remediate them. 11 | # Once this plan runs successfuly on your inventory, you know that Bolt can connect 12 | # and can begin the patching proces. 13 | # 14 | # There are no results returned by this plan, instead data is pretty-printed to the screen in 15 | # two lists: 16 | # 17 | # 1. List of targets that failed to connect. This list is a YAML list where each line 18 | # is the name of a Target that failed to connect. 19 | # The intention here is that you can use this YAML list to modify your inventory 20 | # and remove these problem hosts from your groups. 21 | # 2. Details for each failed target. This provides details about the error that 22 | # occured when connecting. Failures can occur for many reasons, host being offline 23 | # host not listening on the right port, firewall blocking, invalid credentials, etc. 24 | # The idea here is to give the end-user a easily digestible summary so that action 25 | # can be taken to remediate these hosts. 26 | # 27 | # @param [TargetSpec] targets 28 | # Set of targets to run against. 29 | # 30 | # @example CLI - Basic usage 31 | # bolt plan run patching::check_online 32 | # 33 | plan patching::check_online ( 34 | TargetSpec $targets, 35 | ) { 36 | $_targets = get_targets($targets) 37 | ## This will check all targets to verify online by checking their Puppet agent version 38 | $targets_version = run_task('puppet_agent::version', $_targets, 39 | _catch_errors => true) 40 | # if we're filtering out offline targets, then only accept the ok_set from the task above 41 | if !$targets_version.error_set.empty() { 42 | $errors_array = Array($targets_version.error_set) 43 | $sorted_errors = $errors_array.sort|$a, $b| { 44 | compare($a.target.name, $b.target.name) 45 | } 46 | out::message('###################################') 47 | out::message('List of targets that failed to connect') 48 | $sorted_errors.each |$res| { 49 | $name = $res.target.name 50 | out::message("- ${name}") 51 | } 52 | out::message('###################################') 53 | out::message('Details for each failed target') 54 | $sorted_errors.each |$res| { 55 | $name = $res.target.name 56 | $issue_code = $res.error.issue_code 57 | $msg = $res.error.msg 58 | out::message("- name: ${name}") 59 | out::message(" error: [${issue_code}] ${msg}") 60 | } 61 | fail_plan('Unable to connect to the targets above!') 62 | } 63 | else { 64 | out::message('All targets succeeded!') 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /plans/check_puppet.pp: -------------------------------------------------------------------------------- 1 | # @summary Checks each node to see if Puppet is installed, then gather Facts on all targets. 2 | # 3 | # Executes the puppet_agent::version task to check if Puppet is installed 4 | # on all of the targets. Once finished, the result is split into two groups: 5 | # 6 | # 1. Targets with puppet 7 | # 2. Targets with no puppet 8 | # 9 | # The targets with puppet are queried for facts using the patching::puppet_facts plan. 10 | # Targets without puppet are queried for facts using the simpler facts plan. 11 | # 12 | # This plan is designed to be the first plan executed in a patching workflow. 13 | # It can be used to stop the patching process if any hosts are offline by setting 14 | # filter_offline_targets=false (default). It can also be used 15 | # to patch any hosts that are currently available and ignoring any offline targets 16 | # by setting filter_offline_targets=true. 17 | # 18 | # @param [TargetSpec] targets 19 | # Set of targets to run against. 20 | # @param [Boolean] filter_offline_targets 21 | # Flag to determine if offline targets should be filtered out of the list of targets 22 | # returned by this plan. If true, when running the puppet_agent::version 23 | # check, any targets that return an error will be filtered out and ignored. 24 | # Those targets will not be returned in any of the data structures in the result of 25 | # this plan. If false, then any targets that are offline will cause this plan to error 26 | # immediately when performing the online check. This will result in a halt of the 27 | # patching process. 28 | # 29 | # @return [Struct[{has_puppet => Array[TargetSpec], 30 | # no_puppet => Array[TargetSpec], 31 | # all => Array[TargetSpec]}]] 32 | # 33 | # @example CLI - Basic usage (error if any targets are offline) 34 | # bolt plan run patching::check_puppet --targets linux_hosts 35 | # 36 | # @example CLI - Filter offline targets (only return online targets) 37 | # bolt plan run patching::check_puppet --targets linux_hosts filter_offline_targets=true 38 | # 39 | # @example Plan - Basic usage (error if any targets are offline) 40 | # $results = run_plan('patching::check_puppet', $linux_hosts) 41 | # $targets_has_puppet = $results['has_puppet'] 42 | # $targets_no_puppet = $results['no_puppet'] 43 | # $targets_all = $results['all'] 44 | # 45 | # @example Plan - Filter offline targets (only return online targets) 46 | # $results = run_plan('patching::check_puppet', $linux_hosts, 47 | # filter_offline_targets => true) 48 | # $targets_online_has_puppet = $results['has_puppet'] 49 | # $targets_online_no_puppet = $results['no_puppet'] 50 | # $targets_online = $results['all'] 51 | # 52 | plan patching::check_puppet ( 53 | TargetSpec $targets, 54 | Boolean $filter_offline_targets = false, 55 | ) { 56 | $_targets = get_targets($targets) 57 | ## This will check all targets to verify online by checking their Puppet agent version 58 | $targets_version = run_task('puppet_agent::version', $_targets, 59 | _catch_errors => $filter_offline_targets) 60 | # if we're filtering out offline targets, then only accept the ok_set from the task above 61 | if $filter_offline_targets { 62 | out::message("Removing the following offline targets: ${$targets_version.error_set.map |$t| { $t.target.name }}") 63 | log::info($targets_version.error_set.map |$t| { $t.error.message }) 64 | $targets_filtered = $targets_version.ok_set 65 | } 66 | else { 67 | $targets_filtered = $targets_version 68 | } 69 | # targets without puppet will return a value {'verison' => undef} 70 | $targets_with_puppet = $targets_filtered.filter_set |$res| { $res['version'] != undef }.targets 71 | $targets_no_puppet = $targets_filtered.filter_set |$res| { $res['version'] == undef }.targets 72 | 73 | ## get facts from each node 74 | if !$targets_with_puppet.empty() { 75 | # run `puppet facts` on targets with Puppet because it returns a more complete 76 | # set of facts than just running `facter` 77 | run_plan('patching::puppet_facts', $targets_with_puppet) 78 | } 79 | if !$targets_no_puppet.empty() { 80 | # run `facter` if it's available otherwise get basic facts 81 | run_plan('facts', $targets_no_puppet) 82 | } 83 | 84 | return({ 85 | 'has_puppet' => $targets_with_puppet, 86 | 'no_puppet' => $targets_no_puppet, 87 | 'all' => $targets_with_puppet + $targets_no_puppet, 88 | }) 89 | } 90 | -------------------------------------------------------------------------------- /plans/deploy_scripts.pp: -------------------------------------------------------------------------------- 1 | # @summary Plan to deploy scripts from a bolt control node to a bunch of hosts using Puppet. 2 | # 3 | # TODO support deploying without Puppet on the end node? 4 | # 5 | # @param [TargetSpec] targets 6 | # Set of targets to run against. 7 | # 8 | # @param [Hash] scripts 9 | # Scripts hash that each represent patching::script reasources for deploying our scripts 10 | # 11 | # @param [Optional[String]] patching_dir 12 | # Global directory as the base for `bin_dir` and `log_dir` 13 | # 14 | # @param [Optional[String]] bin_dir 15 | # Global directory where the scripts will be installed 16 | # 17 | # @param [Optional[String]] bin_dir 18 | # Directory where log files will be written during patching 19 | 20 | # @param [Optional[String]] owner 21 | # Default owner of installed scripts 22 | # 23 | # @param [Optional[String]] group 24 | # Default group of installed scripts 25 | # 26 | # @param [Optional[String]] mode 27 | # Default file mode of installed scripts 28 | # 29 | # @example CLI deploy a pre patching script 30 | # bolt plan run patching::deploy_scripts scripts='{"pre_patch.sh": {"source": "puppet:///modules/test/patching/pre_patch.sh"}}' 31 | # 32 | # @example CLI deploy a pre and post patching script 33 | # bolt plan run patching::deploy_scripts scripts='{"pre_patch.sh": {"source": "puppet:///modules/test/patching/pre_patch.sh"}, "post_patch.sh": {"source": "puppet:///modules/test/patching/post_patch.sh"}}' 34 | plan patching::deploy_scripts( 35 | TargetSpec $targets, 36 | Hash $scripts, 37 | Optional[String] $patching_dir = undef, 38 | Optional[String] $bin_dir = undef, 39 | Optional[String] $log_dir = undef, 40 | Optional[String] $owner = undef, 41 | Optional[String] $group = undef, 42 | Optional[String] $mode = undef, 43 | ) { 44 | $_targets = run_plan('patching::get_targets', $targets) 45 | return apply($_targets) { 46 | include patching::params 47 | class { 'patching': 48 | scripts => $scripts, 49 | patching_dir => pick($patching_dir, $patching::params::patching_dir), 50 | bin_dir => pick($bin_dir, $patching::params::bin_dir), 51 | log_dir => pick($log_dir, $patching::params::log_dir), 52 | owner => pick($owner, $patching::params::owner), 53 | group => pick($group, $patching::params::group), 54 | mode => pick($mode, $patching::params::mode), 55 | } 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /plans/get_facts.pp: -------------------------------------------------------------------------------- 1 | # @summary Sets patching facts on targets 2 | # 3 | # @param [TargetSpec] targets 4 | # Set of targets to run against. 5 | # 6 | # @param [Variant[String, Array[String]]] names 7 | # Name or list of fact names to retrieve from the targets 8 | # 9 | # @example Get the patching_group fact (default) 10 | # bolt plan run patching::get_facts --targets xxx 11 | # 12 | # @example Get different facts 13 | # bolt plan run patching::get_facts --targets xxx names='["fact1", "fact2"]' 14 | # 15 | plan patching::get_facts ( 16 | TargetSpec $targets, 17 | Variant[String, Array[String]] $names = ['patching_group'], 18 | ) { 19 | # this will set all of the facts on the targets if they have Puppet or not 20 | $_targets = run_plan('patching::get_targets', $targets) 21 | 22 | # make sure facts is an array so we can treat it consistently 23 | if $names =~ Array { 24 | $_names = $names 25 | } 26 | else { 27 | $_names = [$names] 28 | } 29 | 30 | $_results = $_targets.map |$t| { 31 | $target_facts = $_names.reduce({}) |$memo, $n| { 32 | $memo + { $n => facts($t)[$n] } 33 | } 34 | Result($t, $target_facts) 35 | } 36 | return ResultSet($_results) 37 | } 38 | -------------------------------------------------------------------------------- /plans/get_targets.pp: -------------------------------------------------------------------------------- 1 | # @summary get_targets() except it also performs online checks and gathers facts in one step. 2 | # 3 | # A very common requirement when running individual plans from the commandline is that 4 | # each plan would need to perform the following steps: 5 | # - Convert the TargetSpec from a string into an Array[Target] using get_targets($targets) 6 | # - Check for targets that are online (calls plan patching::check_puppet 7 | # - Gather facts about the targets 8 | # 9 | # This plan combines all of that into one so that it can be reused in all of the other 10 | # plans within this module. It also adds some smart checking so that, if multiple plans 11 | # invoke each other, each of which call this plan. The online check and facts gathering 12 | # only hapens once. 13 | # 14 | # @param [TargetSpec] targets 15 | # Set of targets to run against. 16 | # 17 | # @return [Array[Target]] Targets converted to an array for use later in the calling plan 18 | # 19 | # @example Plan - Basic usage 20 | # plan mymodule::myplan ( 21 | # TargetSpec $targets 22 | # ) { 23 | # $targets = run_plan('patching::get_targets', $targets) 24 | # # do normal stuff with your $targets 25 | # } 26 | plan patching::get_targets ( 27 | TargetSpec $targets, 28 | ) { 29 | $_targets = get_targets($targets) 30 | $target_first_facts = facts($_targets[0]) 31 | if !$target_first_facts['os'] or !$target_first_facts['os']['family'] { 32 | run_plan('patching::check_puppet', $_targets) 33 | } 34 | return $_targets 35 | } 36 | -------------------------------------------------------------------------------- /plans/monitoring_multiple.pp: -------------------------------------------------------------------------------- 1 | # @summary Disable monitoring for targets in multiple services 2 | # 3 | # @param [TargetSpec] targets 4 | # Set of targets to run against. 5 | # 6 | # @param [Enum['enable', 'disable']] action 7 | # What action to perform on the monitored targets: 8 | # 9 | # - `enable` Resumes monitoring alerts 10 | # - 'disable' Supresses monitoring alerts 11 | # 12 | # @param [Boolean] noop 13 | # Flag to enable noop mode. 14 | # 15 | # @example Remote target definition for $monitoring_target 16 | # vars: 17 | # patching_monitoring_plan: 'patching::monitoring_multiple' 18 | # patching_monitoring_plan_multiple: 19 | # - plan: 'patching::monitoring_solarwinds' 20 | # target: 'solarwinds' 21 | # - plan: 'patching::monitoring_prometheus' 22 | # target: 'prometheus' 23 | # 24 | # groups: 25 | # - name: solarwinds 26 | # config: 27 | # transport: remote 28 | # remote: 29 | # port: 17778 30 | # username: 'domain\svc_bolt_sw' 31 | # password: 32 | # _plugin: pkcs7 33 | # encrypted_value: > 34 | # ENC[PKCS7,xxx] 35 | # targets: 36 | # - solarwinds.domain.tld 37 | # 38 | # - name: prometheus 39 | # config: 40 | # transport: remote 41 | # remote: 42 | # username: 'domain\prom_user' 43 | # password: 44 | # _plugin: pkcs7 45 | # encrypted_value: > 46 | # ENC[PKCS7,xxx] 47 | # targets: 48 | # - prometheus.domain.tld 49 | # 50 | plan patching::monitoring_multiple ( 51 | TargetSpec $targets, 52 | Enum['enable', 'disable'] $action, 53 | Array[Hash] $monitoring_plans = get_targets($targets)[0].vars['patching_monitoring_plan_multiple'], 54 | Boolean $noop = false, 55 | ) { 56 | # Loop over and run each monitoring plan 57 | $monitoring_plans.each |Hash $plan_hash| { 58 | if $plan_hash['target'] { 59 | run_plan($plan_hash['plan'], $targets, 60 | action => $action, 61 | monitoring_target => $plan_hash['target'], 62 | noop => $noop) 63 | } 64 | else { 65 | run_plan($plan_hash['plan'], $targets, 66 | action => $action, 67 | noop => $noop) 68 | } 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /plans/monitoring_prometheus.pp: -------------------------------------------------------------------------------- 1 | # @summary Create or remove alert silences for hosts in Prometheus. 2 | # 3 | # @param [TargetSpec] targets 4 | # Set of targets to run against. 5 | # 6 | # @param [Enum['enable', 'disable']] action 7 | # What action to perform on the monitored targets: 8 | # 9 | # - `enable` Resumes monitoring alerts 10 | # - 'disable' Supresses monitoring alerts 11 | # 12 | # @param [Optional[Integer]] monitoring_silence_duration 13 | # How long the alert silence will be alive for 14 | # 15 | # @param [Optional[Enum['minutes', 'hours', 'days', 'weeks']]] monitoring_silence_units 16 | # Goes with the silence duration to determine how long the alert silence will be alive for 17 | # 18 | # @param [TargetSpec] monitoring_target 19 | # Name or reference to the remote transport target of the Prometheus server. 20 | # The remote transport should have the following properties: 21 | # - [String] username 22 | # Username for authenticating with Prometheus 23 | # - [Password] password 24 | # Password for authenticating with Prometheus 25 | # 26 | # @param [Boolean] noop 27 | # Flag to enable noop mode. When noop mode is enabled no snapshots will be created or deleted. 28 | # 29 | # @example Remote target definition for $monitoring_target 30 | # vars: 31 | # patching_monitoring_target: 'prometheus' 32 | # patching_monitoring_silence_duration: 24 33 | # patching_monitoring_silence_units: 'hours' 34 | # 35 | # groups: 36 | # - name: prometheus 37 | # config: 38 | # transport: remote 39 | # remote: 40 | # username: 'domain\prom_user' 41 | # password: 42 | # _plugin: pkcs7 43 | # encrypted_value: > 44 | # ENC[PKCS7,xxx] 45 | # targets: 46 | # - prometheus.domain.tld 47 | # 48 | plan patching::monitoring_prometheus ( 49 | TargetSpec $targets, 50 | Enum['enable', 'disable'] $action, 51 | Optional[Integer] $monitoring_silence_duration = undef, 52 | Optional[Enum['minutes', 'hours', 'days', 'weeks']] $monitoring_silence_units = undef, 53 | Optional[TargetSpec] $monitoring_target = undef, 54 | Boolean $noop = false, 55 | Boolean $ssl_verify = get_targets($targets)[0].vars['patching_monitoring_ssl'], 56 | String $ssl_cert = get_targets($targets)[0].vars['patching_monitoring_ssl_cert'], 57 | ) { 58 | $_targets = run_plan('patching::get_targets', $targets) 59 | $group_vars = $_targets[0].vars 60 | 61 | # Set the silence to last for 2 hours by default 62 | $_monitoring_silence_duration = pick($monitoring_silence_duration, 63 | $group_vars['patching_monitoring_silence_duration'], 64 | 2) 65 | $_monitoring_silence_units = pick($monitoring_silence_units, 66 | $group_vars['patching_monitoring_silence_units'], 67 | 'hours') 68 | $_monitoring_target = pick($monitoring_target, 69 | $group_vars['patching_monitoring_target'], 70 | 'prometheus') 71 | 72 | # Create array of node names 73 | $target_names = patching::target_names($_targets, 'name') 74 | 75 | # Display status message 76 | case $action { 77 | 'enable': { 78 | out::message('Enabling monitoring for:') 79 | $target_names.each |$n| { 80 | out::message(" + ${n}") 81 | } 82 | } 83 | 'disable': { 84 | out::message('Disabling monitoring for:') 85 | $target_names.each |$n| { 86 | out::message(" - ${n}") 87 | } 88 | } 89 | default: { 90 | fail_plan("Unknown action: ${action}") 91 | } 92 | } 93 | 94 | if !$noop { 95 | $result = run_task('patching::monitoring_prometheus', $_monitoring_target, 96 | targets => $target_names, 97 | action => $action, 98 | prometheus_server => get_target($_monitoring_target).uri, 99 | silence_duration => $_monitoring_silence_duration, 100 | silence_units => $_monitoring_silence_units, 101 | ssl_verify => $ssl_verify, 102 | ssl_cert => $ssl_cert, 103 | _catch_errors => true, 104 | ) 105 | # Target is monitoring host so extract targets from the result of the task 106 | return $result[0].value 107 | } 108 | } 109 | -------------------------------------------------------------------------------- /plans/monitoring_solarwinds.pp: -------------------------------------------------------------------------------- 1 | # @summary Enable or disable monitoring alerts on hosts in SolarWinds. 2 | # 3 | # TODO config variables 4 | # 5 | # @param [TargetSpec] targets 6 | # Set of targets to run against. 7 | # 8 | # @param [Enum['enable', 'disable']] action 9 | # What action to perform on the monitored targets: 10 | # 11 | # - `enable` Resumes monitoring alerts 12 | # - 'disable' Supresses monitoring alerts 13 | # 14 | # @param [Optional[Enum['name', 'uri']]] target_name_property 15 | # Determines what property on the Target object will be used as the name when 16 | # mapping the Target to a Node in SolarWinds. 17 | # 18 | # - `uri` : use the `uri` property on the Target. This is preferred because 19 | # If you specify a list of Targets in the inventory file, the value shown in that 20 | # list is set as the `uri` and not the `name`, in this case `name` will be `undef`. 21 | # - `name` : use the `name` property on the Target, this is not preferred because 22 | # `name` is usually a short name or nickname. 23 | # 24 | # @param [TargetSpec] monitoring_target 25 | # Name or reference to the remote transport target of the Monitoring server. 26 | # This will be used when to determine how to communicate with the SolarWinds API. 27 | # The remote transport should have the following properties: 28 | # - [Integer] port 29 | # Port to use when communicating with SolarWinds API (default: 17778) 30 | # - [String] username 31 | # Username for authenticating with the SolarWinds API 32 | # - [Password] password 33 | # Password for authenticating with the SolarWinds API 34 | # 35 | # @param [Optional[String[1]]] monitoring_name_property 36 | # Determines what property to match in SolarWinds when looking up targets. 37 | # By default we determine if the target's name is an IP address, if it is then we 38 | # use the 'IPAddress' property, otherwise we use whatever property this is set to. 39 | # Available options that we've seen used are 'DNS' if the target's name is a DNS FQDN, 40 | # or 'Caption' if you're looking up by a nick-name for the target. 41 | # This can really be any field on the Orion.Nodes table. 42 | # 43 | # @param [Boolean] noop 44 | # Flag to enable noop mode. When noop mode is enabled no snapshots will be created or deleted. 45 | # 46 | # @example Remote target definition for $monitoring_target 47 | # vars: 48 | # patching_monitoring_plan: 'patching::monitoring_solarwinds' 49 | # patching_monitoring_target: 'solarwinds' 50 | # 51 | # groups: 52 | # - name: solarwinds 53 | # config: 54 | # transport: remote 55 | # remote: 56 | # port: 17778 57 | # username: 'domain\svc_bolt_sw' 58 | # password: 59 | # _plugin: pkcs7 60 | # encrypted_value: > 61 | # ENC[PKCS7,xxx] 62 | # targets: 63 | # - solarwinds.domain.tld 64 | # 65 | plan patching::monitoring_solarwinds ( 66 | TargetSpec $targets, 67 | Enum['enable', 'disable'] $action, 68 | Optional[Enum['name', 'uri']] $target_name_property = undef, 69 | Optional[TargetSpec] $monitoring_target = undef, 70 | Optional[String[1]] $monitoring_name_property = undef, 71 | Boolean $noop = false, 72 | ) { 73 | $_targets = run_plan('patching::get_targets', $targets) 74 | $group_vars = $_targets[0].vars 75 | $_target_name_property = pick($target_name_property, 76 | $group_vars['patching_monitoring_target_name_property'], 77 | 'uri') 78 | $_monitoring_name_property = pick($monitoring_name_property, 79 | $group_vars['patching_monitoring_name_property'], 80 | 'DNS') 81 | $_monitoring_target = pick($monitoring_target, 82 | $group_vars['patching_monitoring_target'], 83 | 'solarwinds') 84 | 85 | # Create array of node names 86 | $target_names = patching::target_names($_targets, $_target_name_property) 87 | 88 | # Display status message 89 | case $action { 90 | 'enable': { 91 | out::message('Enabling monitoring for:') 92 | $target_names.each |$n| { 93 | out::message(" + ${n}") 94 | } 95 | } 96 | 'disable': { 97 | out::message('Disabling monitoring for:') 98 | $target_names.each |$n| { 99 | out::message(" - ${n}") 100 | } 101 | } 102 | default: { 103 | fail_plan("Unknown action: ${action}") 104 | } 105 | } 106 | 107 | if !$noop { 108 | return run_task('patching::monitoring_solarwinds', $_monitoring_target, 109 | targets => $target_names, 110 | action => $action, 111 | name_property => $_monitoring_name_property) 112 | } 113 | } 114 | -------------------------------------------------------------------------------- /plans/ordered_groups.pp: -------------------------------------------------------------------------------- 1 | # @summary Takes a set of targets then groups and sorts them by the patching_order var set on the target. 2 | # 3 | # When patching hosts it is common that you don't want to patch them all at the same time, 4 | # for obvious reasons. To facilitate this we devised the concept of a "patching order". 5 | # Patching order is a mechanism to allow targets to be organized into groups and 6 | # then sorted so that a custom order can be defined for your specific usecase. 7 | # 8 | # The way one assigns a patching order to a target or group is using vars 9 | # in the Bolt inventory file. 10 | # 11 | # Example: 12 | # 13 | # ```yaml 14 | # --- 15 | # groups: 16 | # - name: primary_nodes 17 | # vars: 18 | # patching_order: 1 19 | # targets: 20 | # - sql01.domain.tld 21 | # 22 | # - name: backup_nodes 23 | # vars: 24 | # patching_order: 2 25 | # targets: 26 | # - sql02.domain.tld 27 | # ``` 28 | # 29 | # When the patching_order is defined at the group level, it is inherited 30 | # by all targets within that group. 31 | # 32 | # The reason this plan exists is that there is no concept of a "group" in the bolt 33 | # runtime, so we need to artificially recreate them using our patching_order 34 | # vars paradigm. 35 | # 36 | # An added benefit to this paradigm is that you may have grouped your targets logically 37 | # on a different dimension, say by application. If it's OK that multiple applications be 38 | # patched at the same time, we can assign the same patching order to multiple groups 39 | # in the inventory. Then, when run through this plan, they will be aggregated together 40 | # into one large group of targets that will all be patched concurrently. 41 | # 42 | # Example, app_xxx and app_zzz both can be patched at the same time, but app_yyy needs to go 43 | # later in the process: 44 | # 45 | # ```yaml 46 | # --- 47 | # groups: 48 | # - name: app_xxx 49 | # vars: 50 | # patching_order: 1 51 | # targets: 52 | # - xxx 53 | # 54 | # - name: app_yyy 55 | # vars: 56 | # patching_order: 2 57 | # targets: 58 | # - yyy 59 | # 60 | # - name: app_zzz 61 | # vars: 62 | # patching_order: 1 63 | # targets: 64 | # - zzz 65 | # ``` 66 | # 67 | # @param [TargetSpec] targets 68 | # Set of targets to created ordered groups of. 69 | # 70 | # @return [Array[Struct[{'order' => Data, 'targets' => Array[Target]}]]] 71 | # An array of hashes, each hash containing two properties: 72 | # 73 | # - 'order' : This is the value of the patching_order defined 74 | # in the inventory. This can be any datatype you wish, as long as it's 75 | # comparable with the sort() function. 76 | # - 'targets' : An array of targets in this group. 77 | # 78 | # This is returned as an Array, because an Array has a defined order when 79 | # you iterate over it using .each. Ordering is important in patching 80 | # so we wanted this to be very concrete. 81 | # 82 | # @example Basic usage 83 | # $ordered_groups = run_plan('patching::ordered_groups', $targets) 84 | # $ordered_groups.each |$group_hash| { 85 | # $group_order = $group_hash['order'] 86 | # $group_targets = $group_hash['targets'] 87 | # # run your patching process for the group 88 | # } 89 | # 90 | # 91 | plan patching::ordered_groups ( 92 | TargetSpec $targets, 93 | ) { 94 | $_targets = get_targets($targets) 95 | 96 | ## The following parses the targets for their patching_order variable. 97 | ## patching_order will dictate the order the systems are processed. 98 | $ordered_hash = $_targets.reduce({}) |$memo, $t| { 99 | $order_unknown_type = vars($t)['patching_order'] 100 | $order = String($order_unknown_type) 101 | if $order in $memo { 102 | $ordered_array = $memo[$order] << $t 103 | $memo + { $order => $ordered_array } 104 | } 105 | else { 106 | $memo + { $order => [$t] } 107 | } 108 | } 109 | 110 | # when iterating over a hash, it isn't guaranteed to iterate in the sorted order 111 | # of the keys, this pulls the keys out of the hash and sorts them 112 | # $ordered_hash = {'2' => ['b'], '3' => ['c'], '1' => ['a']} 113 | # 114 | # $ordered_keys = ['1', '2', '3'] 115 | $ordered_keys = sort(keys($ordered_hash)) 116 | out::message("Groups = ${ordered_keys}") 117 | $ordered_groups = $ordered_keys.map |$o| { 118 | $ordered_targets = $ordered_hash[$o].map |$t| { $t.name } 119 | out::message("Group '${o}' targets = ${ordered_targets}") 120 | # trust me, we have to assign to a variable here, it's a detail of the puppet 121 | # language parser that gets mad, but only because there is the loop above 122 | $group = { 'order' => $o, 'targets' => $ordered_hash[$o] } 123 | $group 124 | } 125 | 126 | return $ordered_groups 127 | } 128 | -------------------------------------------------------------------------------- /plans/post_update.pp: -------------------------------------------------------------------------------- 1 | # @summary Executes a custom post-update script on each node. 2 | # 3 | # Often in patching it is necessary to run custom commands before/after updates are 4 | # applied to a host. This plan allows for that customization to occur. 5 | # 6 | # By default it executes a Shell script on Linux and a PowerShell script on Windows hosts. 7 | # The default script paths are: 8 | # - Linux: `/opt/patching/bin/post_update.sh` 9 | # - Windows: `C:\ProgramData\patching\bin\post_update.ps1` 10 | # 11 | # One can customize the script paths by overriding them on the CLI, or when calling the plan 12 | # using the `script_linux` and `script_windows` parameters. 13 | # 14 | # The script paths can also be customzied in the inventory configuration `vars`: 15 | # Example: 16 | # 17 | # ``` yaml 18 | # vars: 19 | # patching_post_update_script_windows: C:\scripts\patching.ps1 20 | # patching_post_update_script_linux: /usr/local/bin/mysweetpatchingscript.sh 21 | # 22 | # groups: 23 | # # these targets will use the pre patching script defined in the vars above 24 | # - name: regular_nodes 25 | # targets: 26 | # - tomcat01.domain.tld 27 | # 28 | # # these targets will use the customized patching script set for this group 29 | # - name: sql_nodes 30 | # vars: 31 | # patching_post_update_script_linux: /bin/sqlpatching.sh 32 | # targets: 33 | # - sql01.domain.tld 34 | # ``` 35 | # 36 | # @param [TargetSpec] targets 37 | # Set of targets to run against. 38 | # @param [String[1]] script_linux 39 | # Path to the script that will be executed on Linux targets. 40 | # @param [String[1]] script_windows 41 | # Path to the script that will be executed on Windows targets. 42 | # @param [Boolean] noop 43 | # Flag to enable noop mode for the underlying plans and tasks. 44 | # 45 | # @return [ResultSet] 46 | # Returns the ResultSet from the underlying `run_task('patching::post_update')` 47 | # 48 | # @example CLI - Basic usage 49 | # bolt plan run patching::post_update --targets all_hosts 50 | # 51 | # @example CLI - Custom scripts 52 | # bolt plan run patching::post_update --targets all_hosts script_linux='/my/sweet/script.sh' script_windows='C:\my\sweet\script.ps1' 53 | # 54 | # @example Plan - Basic usage 55 | # run_plan('patching::post_update', $all_hosts) 56 | # 57 | # @example Plan - Custom scripts 58 | # run_plan('patching::post_update', $all_hosts, 59 | # script_linux => '/my/sweet/script.sh', 60 | # script_windows => 'C:\my\sweet\script.ps1') 61 | # 62 | plan patching::post_update ( 63 | TargetSpec $targets, 64 | String[1] $script_linux = '/opt/patching/bin/post_update.sh', 65 | String[1] $script_windows = 'C:\ProgramData\patching\bin\post_update.ps1', 66 | Boolean $noop = false, 67 | ) { 68 | $_targets = run_plan('patching::get_targets', $targets) 69 | $group_vars = $_targets[0].vars 70 | $_script_linux = pick($group_vars['patching_post_update_script_linux'], $script_linux) 71 | $_script_windows = pick($group_vars['patching_post_update_script_windows'], $script_windows) 72 | 73 | $result = run_plan('patching::pre_post_update', $_targets, 74 | task => 'patching::post_update', 75 | script_linux => $_script_linux, 76 | script_windows => $_script_windows, 77 | noop => $noop, 78 | update_phase => 'post',) 79 | 80 | $filtered_results = patching::filter_results($result, 'patching::post_update') 81 | 82 | if !$filtered_results['ok_targets'].empty { 83 | $filtered_results['ok_targets'].each |$target| { 84 | log::info("Post-update script ran successfully on ${target}") 85 | } 86 | } 87 | 88 | return $filtered_results 89 | } 90 | -------------------------------------------------------------------------------- /plans/pre_post_update.pp: -------------------------------------------------------------------------------- 1 | # @summary Common entry point for executing the pre/post update custom scripts 2 | # 3 | # @see patching::pre_update 4 | # @see patching::post_update 5 | # 6 | # @param [TargetSpec] targets 7 | # Set of targets to run against. 8 | # 9 | # @param [String[1]] task 10 | # Name of the pre/post update task to execute. 11 | # 12 | # @param [Optional[String[1]]] script_linux 13 | # Path to the script that will be executed on Linux targets. 14 | # 15 | # @param [Optional[String[1]]] script_windows 16 | # Path to the script that will be executed on Windows targets. 17 | # 18 | # @param [Boolean] noop 19 | # Flag to enable noop mode for the underlying plans and tasks. 20 | # 21 | # @param [String[1]] update_phase 22 | # Indicates whether the task is a pre-update or post-update task. 23 | # 24 | # @return [ResultSet] Returns the ResultSet from the execution of `task`. 25 | plan patching::pre_post_update ( 26 | TargetSpec $targets, 27 | String[1] $task, 28 | Optional[String[1]] $script_linux = undef, 29 | Optional[String[1]] $script_windows = undef, 30 | Boolean $noop = false, 31 | Enum['pre', 'post'] $update_phase, 32 | ) { 33 | out::message("pre_post_update - noop = ${noop}, update_phase = ${update_phase}, task = ${task}") 34 | $_targets = run_plan('patching::get_targets', $targets) 35 | 36 | # Log the update phase and task name 37 | log::info("Running ${update_phase} update script: ${task}") 38 | 39 | # split into linux vs Windows 40 | $targets_linux = $_targets.filter |$t| { facts($t)['os']['family'] != 'windows' } 41 | $targets_windows = $_targets.filter |$t| { facts($t)['os']['family'] == 'windows' } 42 | 43 | # run pre-patch scripts 44 | if !$targets_linux.empty() { 45 | $results_linux = run_task($task, $targets_linux, 46 | script => $script_linux, 47 | _noop => $noop, 48 | _catch_errors => true).results 49 | } 50 | else { 51 | $results_linux = [] 52 | } 53 | if !$targets_windows.empty() { 54 | $results_windows = run_task($task, $targets_windows, 55 | script => $script_windows, 56 | _noop => $noop, 57 | _catch_errors => true).results 58 | } 59 | else { 60 | $results_windows = [] 61 | } 62 | 63 | # TODO pretty print any scripts that were run 64 | 65 | # return ResultSet($results_linux + $results_windows) 66 | 67 | $result_set = ResultSet($results_linux + $results_windows) 68 | 69 | $filtered_results = patching::filter_results($result_set, $task) 70 | 71 | return $filtered_results 72 | } 73 | -------------------------------------------------------------------------------- /plans/pre_update.pp: -------------------------------------------------------------------------------- 1 | # @summary Executes a custom pre-update script on each node. 2 | # 3 | # Often in patching it is necessary to run custom commands before/after updates are 4 | # applied to a host. This plan allows for that customization to occur. 5 | # 6 | # By default it executes a Shell script on Linux and a PowerShell script on Windows hosts. 7 | # The default script paths are: 8 | # - Linux: `/opt/patching/bin/pre_update.sh` 9 | # - Windows: `C:\ProgramData\patching\bin\pre_update.ps1` 10 | # 11 | # One can customize the script paths by overriding them on the CLI, or when calling the plan 12 | # using the `script_linux` and `script_windows` parameters. 13 | # 14 | # The script paths can also be customzied in the inventory configuration `vars`: 15 | # Example: 16 | # 17 | # ``` yaml 18 | # vars: 19 | # patching_pre_update_script_windows: C:\scripts\patching.ps1 20 | # patching_pre_update_script_linux: /usr/local/bin/mysweetpatchingscript.sh 21 | # 22 | # groups: 23 | # # these targets will use the pre patching script defined in the vars above 24 | # - name: regular_nodes 25 | # targets: 26 | # - tomcat01.domain.tld 27 | # 28 | # # these targets will use the customized patching script set for this group 29 | # - name: sql_nodes 30 | # vars: 31 | # patching_pre_update_script_linux: /bin/sqlpatching.sh 32 | # targets: 33 | # - sql01.domain.tld 34 | # ``` 35 | # 36 | # @param [TargetSpec] targets 37 | # Set of targets to run against. 38 | # @param [String[1]] script_linux 39 | # Path to the script that will be executed on Linux targets. 40 | # @param [String[1]] script_windows 41 | # Path to the script that will be executed on Windows targets. 42 | # @param [Boolean] noop 43 | # Flag to enable noop mode for the underlying plans and tasks. 44 | # 45 | # @return [ResultSet] 46 | # Returns the ResultSet from the underlying `run_task('patching::pre_update')` 47 | # 48 | # @example CLI - Basic usage 49 | # bolt plan run patching::pre_update --targets all_hosts 50 | # 51 | # @example CLI - Custom scripts 52 | # bolt plan run patching::pre_update --targets all_hosts script_linux='/my/sweet/script.sh' script_windows='C:\my\sweet\script.ps1' 53 | # 54 | # @example Plan - Basic usage 55 | # run_plan('patching::pre_update', $all_hosts) 56 | # 57 | # @example Plan - Custom scripts 58 | # run_plan('patching::pre_update', $all_hosts, 59 | # script_linux => '/my/sweet/script.sh', 60 | # script_windows => 'C:\my\sweet\script.ps1') 61 | # 62 | plan patching::pre_update ( 63 | TargetSpec $targets, 64 | String[1] $script_linux = '/opt/patching/bin/pre_update.sh', 65 | String[1] $script_windows = 'C:\ProgramData\patching\bin\pre_update.ps1', 66 | Boolean $noop = false, 67 | ) { 68 | $_targets = run_plan('patching::get_targets', $targets) 69 | $group_vars = $_targets[0].vars 70 | $_script_linux = pick($group_vars['patching_pre_update_script_linux'], $script_linux) 71 | $_script_windows = pick($group_vars['patching_pre_update_script_windows'], $script_windows) 72 | 73 | $result = run_plan('patching::pre_post_update', $_targets, 74 | task => 'patching::pre_update', 75 | script_linux => $_script_linux, 76 | script_windows => $_script_windows, 77 | noop => $noop, 78 | update_phase => 'pre') 79 | 80 | $filtered_results = patching::filter_results($result, 'patching::pre_update') 81 | 82 | if !$filtered_results['ok_targets'].empty { 83 | $filtered_results['ok_targets'].each |$target| { 84 | log::info("Pre-update script ran successfully on ${target}") 85 | } 86 | } 87 | 88 | return $filtered_results 89 | } 90 | -------------------------------------------------------------------------------- /plans/puppet_facts.pp: -------------------------------------------------------------------------------- 1 | # @summary Plan thatr runs 'puppet facts' on the targets and sets them as facts on the Target objects. 2 | # 3 | # This is inspired by: https://github.com/puppetlabs/puppetlabs-facts/blob/master/plans/init.pp 4 | # Except instead of just running `facter` it runs `puppet facts` to set additional 5 | # facts that are only present when in the context of puppet. 6 | # 7 | # Under the hood it is executeing the `patching::puppet_facts` task. 8 | # 9 | # @param [TargetSpec] targets 10 | # Set of targets to run against. 11 | # 12 | # @return [ResultSet] The results from the execution of the `patching::puppet_facts` task. 13 | # 14 | plan patching::puppet_facts( 15 | TargetSpec $targets 16 | ) { 17 | $result_set = run_task('patching::puppet_facts', $targets) 18 | # puppet facts returns a structure like: 19 | # name: mynodename.domain.tld 20 | # values: 21 | # fact1: abc 22 | # fact2: def 23 | # 24 | # We only want to set the "values" as facts on the node 25 | $result_set.each |$result| { 26 | # Debian systems to not have values param they are just in the value return 27 | if 'values' in $result.value { 28 | $target_fact_values = $result.value['values'] 29 | } else { 30 | $target_fact_values = $result.value 31 | } 32 | 33 | add_facts($result.target, $target_fact_values) 34 | } 35 | return $result_set 36 | } 37 | -------------------------------------------------------------------------------- /plans/reboot_required.pp: -------------------------------------------------------------------------------- 1 | # @summary Querys a targets operating system to determine if a reboot is required and then reboots the targets that require rebooting. 2 | # 3 | # Patching in different environments comes with various unique requirements, one of those 4 | # is rebooting hosts. Sometimes hosts need to always be reboot, othertimes never rebooted. 5 | # 6 | # To provide this flexibility we created this function that wraps the `reboot` plan with 7 | # a `strategy` that is controllable as a parameter. This provides flexibilty in 8 | # rebooting specific targets in certain ways (by group). Along with the power to expand 9 | # our strategy offerings in the future. 10 | # 11 | # @param [TargetSpec] targets 12 | # Set of targets to run against. 13 | # @param [Enum['only_required', 'never', 'always']] strategy 14 | # Determines the reboot strategy for the run. 15 | # 16 | # - 'only_required' only reboots hosts that require it based on info reported from the OS 17 | # - 'never' never reboots the hosts 18 | # - 'always' will reboot the host no matter what 19 | # 20 | # @param [String] message 21 | # Message displayed to the user prior to the system rebooting 22 | # 23 | # @param [Integer] wait 24 | # Time in seconds that the plan waits before continuing after a reboot. This is necessary in case one 25 | # of the groups affects the availability of a previous group. 26 | # Two use cases here: 27 | # 1. A later group is a hypervisor. In this instance the hypervisor will reboot causing the 28 | # VMs to go offline and we need to wait for those child VMs to come back up before 29 | # collecting history metrics. 30 | # 2. A later group is a linux router. In this instance maybe the patching of the linux router 31 | # affects the reachability of previous hosts. 32 | # 33 | # @param [Integer] disconnect_wait How long (in seconds) to wait before checking whether the server has rebooted. Defaults to 10. 34 | # 35 | # @param [Boolean] noop 36 | # Flag to determine if this should be a noop operation or not. 37 | # If this is a noop, no hosts will ever be rebooted, however the "reboot required" information 38 | # will still be queried and returned. 39 | # 40 | # @return [Struct[{'required' => Array[TargetSpec], 'not_required' => Array[TargetSpec], 'attempted' => Array[TargetSpec], 'resultset' => ResultSet}]] 41 | # 42 | # - `required` : array of targets whose host OS reported a reboot is required 43 | # - `not_required` : array of targets whose host OS did not report a reboot being required 44 | # - `attempted` : array of targets where a reboot was attempted (potentially empty array) 45 | # - `resultset` : results from the `reboot` plan for the attempted hosts (potentially an empty `ResultSet`) 46 | # 47 | plan patching::reboot_required ( 48 | TargetSpec $targets, 49 | Enum['only_required', 'never', 'always'] $strategy = undef, 50 | String $message = undef, 51 | Integer $wait = undef, 52 | Integer $disconnect_wait = undef, 53 | Boolean $noop = false, 54 | ) { 55 | $_targets = run_plan('patching::get_targets', $targets) 56 | $group_vars = $_targets[0].vars 57 | $_strategy = pick($strategy, 58 | $group_vars['patching_reboot_strategy'], 59 | 'only_required') 60 | $_message = pick($message, 61 | $group_vars['patching_reboot_message'], 62 | 'NOTICE: This system is currently being updated.') 63 | $_wait = pick($wait, 64 | $group_vars['patching_reboot_wait'], 65 | 300) 66 | $_disconnect_wait = pick($disconnect_wait, 67 | $group_vars['patching_disconnect_wait'], 68 | 10) 69 | 70 | ## Check if reboot required. 71 | $reboot_results = run_task('patching::reboot_required', $_targets, _catch_errors => true) 72 | 73 | ## Check for errors during reboot check 74 | $check_filtered = patching::filter_results($reboot_results, 'patching::reboot_required') 75 | 76 | # print out pretty message 77 | out::message("Reboot strategy: ${_strategy}") 78 | out::message("Host reboot required status: ('+' reboot required; '-' reboot NOT required)") 79 | $targets_reboot_required = $reboot_results.filter_set|$res| { $res['reboot_required'] }.targets 80 | $targets_reboot_not_required = $reboot_results.filter_set|$res| { !$res['reboot_required'] }.targets 81 | $reboot_results.each|$res| { 82 | $symbol = ($res['reboot_required']) ? { true => '+' , default => '-' } 83 | out::message(" ${symbol} ${res.target.name}") 84 | } 85 | 86 | ## Reboot the hosts that require it 87 | ## skip if we're in noop mode (the reboot plan doesn't support $noop) 88 | if !$noop { 89 | case $_strategy { 90 | 'only_required': { 91 | if !$targets_reboot_required.empty() { 92 | $targets_reboot_attempted = $targets_reboot_required 93 | $reboot_resultset = run_plan('reboot', $targets_reboot_required, 94 | reconnect_timeout => $_wait, 95 | disconnect_wait => $_disconnect_wait, 96 | message => $_message, 97 | _catch_errors => true) 98 | } 99 | else { 100 | $targets_reboot_attempted = [] 101 | $reboot_resultset = ResultSet([]) 102 | } 103 | } 104 | 'always': { 105 | $targets_reboot_attempted = $targets 106 | $reboot_resultset = run_plan('reboot', $targets, 107 | reconnect_timeout => $_wait, 108 | disconnect_wait => $_disconnect_wait, 109 | message => $_message, 110 | _catch_errors => true) 111 | } 112 | 'never': { 113 | $targets_reboot_attempted = [] 114 | $reboot_resultset = ResultSet([]) 115 | } 116 | default: { 117 | fail_plan("Invalid strategy: ${_strategy}") 118 | } 119 | } 120 | } 121 | else { 122 | out::message('Noop specified, skipping all reboots.') 123 | $targets_reboot_attempted = [] 124 | $reboot_resultset = ResultSet([]) 125 | } 126 | 127 | ## Check for errors during reboot 128 | $reboot_filtered = patching::filter_results($reboot_resultset, 'reboot') 129 | 130 | ## Merge the failed results from both the check and the reboot 131 | $failed_results = $check_filtered['failed_results'] + $reboot_filtered['failed_results'] 132 | 133 | ## Return both sets of failures and successes (reboot not required/successfully rebooted) 134 | return({ 135 | 'ok_targets' => $_targets - $failed_results.keys, 136 | 'failed_results' => $failed_results, 137 | }) 138 | } 139 | -------------------------------------------------------------------------------- /plans/set_facts.pp: -------------------------------------------------------------------------------- 1 | # @summary Sets patching facts on targets 2 | # 3 | # For Linux targets the facts will be written to /etc/facter/facts.d/patching.yaml. 4 | # For Windows targets the facts will be written to 'C:/ProgramData/PuppetLabs/facter/facts.d/patching.yaml'. 5 | # 6 | # The contents of the patching.yaml file will be overwritten by this plan. 7 | # TODO: Provide an option to merge with existing facts. 8 | # 9 | # Once the facts are written, by default, the facts will be ran and uploaded to PuppetDB. 10 | # If you wish to disable this, simply set upload=false 11 | # 12 | # @param [TargetSpec] targets 13 | # Set of targets to run against. 14 | # 15 | # @param [Optional[String]] patching_group 16 | # Name of the patching group that the targets are a member of. This will be the value for the 17 | # patching_group fact. 18 | # 19 | # @param [Hash] custom_facts 20 | # Hash of custom facts that will be set on these targets. This can be anything you like 21 | # and will merged with the other facts above. 22 | # 23 | # @param [Boolean] upload 24 | # After setting the facts, perform a puppet facts upload so the new 25 | # facts are stored in PuppetDB. 26 | # 27 | # @example Set the patching_group fact 28 | # bolt plan run patching::set_facts --targets xxx patching_group=tuesday_night 29 | # 30 | # @example Set the custom facts 31 | # bolt plan run patching::set_facts --targets xxx custom_facts='{"fact1": "blah"}' 32 | # 33 | # @example Don't upload facts to PuppetDB 34 | # bolt plan run patching::set_facts --targets xxx patching_group=tuesday_night upload=false 35 | # 36 | plan patching::set_facts ( 37 | TargetSpec $targets, 38 | Optional[String] $patching_group = undef, 39 | Hash $custom_facts = {}, 40 | Boolean $upload = true, 41 | ) { 42 | # this will set all of the facts on the targets if they have Puppet or not 43 | $_targets = run_plan('patching::get_targets', $targets) 44 | 45 | # split by linux vs windows because of the different paths for custom facts 46 | $targets_linux = $_targets.filter |$t| { facts($t)['os']['family'] != 'windows' } 47 | $targets_windows = $_targets.filter |$t| { facts($t)['os']['family'] == 'windows' } 48 | 49 | # merge our facts 50 | # the explicitly defined facts always win 51 | $_facts = $custom_facts + { 'patching_group' => $patching_group } 52 | $_facts_yaml = stdlib::to_yaml($_facts) 53 | out::message('============= writing facts.d/patching.yaml =============') 54 | out::message($_facts_yaml) 55 | 56 | if !$targets_linux.empty() { 57 | write_file($_facts_yaml, 58 | '/etc/facter/facts.d/patching.yaml', 59 | $targets_linux) 60 | $results_linux = run_command('/opt/puppetlabs/bin/puppet facts upload', $targets_linux) 61 | } 62 | else { 63 | $results_linux = ResultSet([]) 64 | } 65 | 66 | if !$targets_windows.empty() { 67 | write_file($_facts_yaml, 68 | 'C:/ProgramData/PuppetLabs/facter/facts.d/patching.yaml', 69 | $targets_windows) 70 | $results_windows = run_command("& 'C:/Program Files/Puppet Labs/Puppet/bin/puppet' facts upload", $targets_windows) 71 | } 72 | else { 73 | $results_windows = ResultSet([]) 74 | } 75 | return ResultSet($results_linux.results + $results_windows.results) 76 | } 77 | -------------------------------------------------------------------------------- /plans/snapshot_kvm.pp: -------------------------------------------------------------------------------- 1 | # @summary Creates or deletes VM snapshots on targets in KVM/Libvirt. 2 | # 3 | # Runs commands on the CLI of the KVM/Libvirt hypervisor host. 4 | # 5 | # @param [TargetSpec] targets 6 | # Set of targets to run against. 7 | # 8 | # @param [Enum['create', 'delete']] action 9 | # What action to perform on the snapshots: 10 | # 11 | # - `create` creates a new snapshot 12 | # - 'delete' deletes snapshots by matching the `snapshot_name` passed in. 13 | # 14 | # @param [Optional[Enum['hostname', 'name', 'uri']]] target_name_property 15 | # Determines what property on the Target object will be used as the VM name when 16 | # mapping the Target to a VM in vSphere. 17 | # 18 | # - `uri` : use the `uri` property on the Target. This is preferred because 19 | # If you specify a list of Targets in the inventory file, the value shown in that 20 | # list is set as the `uri` and not the `name`, in this case `name` will be `undef`. 21 | # - `name` : use the `name` property on the Target, this is not preferred because 22 | # `name` is usually a short name or nickname. 23 | # - `hostname`: use the `hostname` value to use host component of `uri` property on the Target 24 | # this can be useful if VM name doesn't include domain name 25 | ## 26 | # @param [Optional[String[1]]] snapshot_name 27 | # Name of the snapshot 28 | # 29 | # @param [Optional[String]] snapshot_description 30 | # Description of the snapshot 31 | # 32 | # @param [Optional[Boolean]] snapshot_memory 33 | # Capture the VMs memory in the snapshot 34 | # 35 | # @param [Optional[Boolean]] snapshot_quiesce 36 | # Quiesce/flush the filesystem when snapshotting the VM. This requires VMware tools be installed 37 | # in the guest OS to work properly. 38 | # 39 | # @param [TargetSpec] hypervisor_targets 40 | # Name or reference to the targets of the KVM hypervisors. 41 | # We will login to this host an run the snapshot tasks so that the local CLI can be used. 42 | # Default target name is "kvm_hypervisors", this can be a group of targets too! 43 | # 44 | # @param [Boolean] noop 45 | # Flag to enable noop mode. When noop mode is enabled no snapshots will be created or deleted. 46 | # 47 | plan patching::snapshot_kvm ( 48 | TargetSpec $targets, 49 | Enum['create', 'delete'] $action, 50 | Optional[Enum['hostname', 'name', 'uri']] $target_name_property = undef, 51 | Optional[String[1]] $snapshot_name = undef, 52 | Optional[String] $snapshot_description = undef, 53 | Optional[Boolean] $snapshot_memory = undef, 54 | Optional[Boolean] $snapshot_quiesce = undef, 55 | Optional[TargetSpec] $hypervisor_targets = undef, 56 | Boolean $noop = false, 57 | ) { 58 | $_targets = run_plan('patching::get_targets', $targets) 59 | $group_vars = $_targets[0].vars 60 | # Order: CLI > Config > Default 61 | $_target_name_property = pick($target_name_property, 62 | $group_vars['patching_snapshot_target_name_property'], 63 | 'uri') 64 | $_snapshot_name = pick($snapshot_name, 65 | $group_vars['patching_snapshot_name'], 66 | 'Bolt Patching Snapshot') 67 | $_snapshot_description = pick_default($snapshot_description, 68 | $group_vars['patching_snapshot_description'], 69 | 'Bolt Patching Snapshot') 70 | $_snapshot_memory = pick($snapshot_memory, 71 | $group_vars['patching_snapshot_memory'], 72 | false) 73 | $_snapshot_quiesce = pick($snapshot_quiesce, 74 | $group_vars['patching_snapshot_quiesce'], 75 | false) 76 | $_hypervisor_targets = pick($hypervisor_targets, 77 | $group_vars['patching_snapshot_kvm_hypervisor_targets'], 78 | 'kvm_hypervisors') 79 | 80 | # Create array of node names 81 | $vm_names = patching::target_names($_targets, $_target_name_property) 82 | 83 | # Display status message 84 | if $action == 'create' { 85 | out::message("Creating VM snapshot '${_snapshot_name}' for:") 86 | $vm_names.each |$n| { 87 | out::message(" + ${n}") 88 | } 89 | } else { 90 | out::message("Deleting VM snapshot '${_snapshot_name}' for:") 91 | $vm_names.each |$n| { 92 | out::message(" - ${n}") 93 | } 94 | } 95 | 96 | if !$noop { 97 | $results = (run_task('patching::snapshot_kvm', $_hypervisor_targets, 98 | vm_names => $vm_names, 99 | snapshot_name => $_snapshot_name, 100 | snapshot_description => $_snapshot_description, 101 | snapshot_memory => $_snapshot_memory, 102 | snapshot_quiesce => $_snapshot_quiesce, 103 | action => $action, 104 | _catch_errors => true)) 105 | 106 | $filtered_results = patching::filter_results($results, 'patching::snapshot_kvm') 107 | 108 | return $filtered_results 109 | } 110 | } 111 | -------------------------------------------------------------------------------- /plans/snapshot_vmware.pp: -------------------------------------------------------------------------------- 1 | # @summary Creates or deletes VM snapshots on targets in VMware. 2 | # 3 | # Communicates to the vSphere API from the local Bolt control node using 4 | # the [rbvmomi](https://github.com/vmware/rbvmomi) Ruby gem. 5 | # 6 | # To install the rbvmomi gem on the bolt control node: 7 | # ```shell 8 | # /opt/puppetlabs/bolt/bin/gem install --user-install rbvmomi 9 | # ``` 10 | # 11 | # TODO config variables 12 | # 13 | # @param [TargetSpec] targets 14 | # Set of targets to run against. 15 | # 16 | # @param [Enum['create', 'delete']] action 17 | # What action to perform on the snapshots: 18 | # 19 | # - `create` creates a new snapshot 20 | # - 'delete' deletes snapshots by matching the `snapshot_name` passed in. 21 | # 22 | # @param [Optional[Enum['hostname', 'name', 'uri']]] target_name_property 23 | # Determines what property on the Target object will be used as the VM name when 24 | # mapping the Target to a VM in vSphere. 25 | # 26 | # - `uri` : use the `uri` property on the Target. This is preferred because 27 | # If you specify a list of Targets in the inventory file, the value shown in that 28 | # list is set as the `uri` and not the `name`, in this case `name` will be `undef`. 29 | # - `name` : use the `name` property on the Target, this is not preferred because 30 | # `name` is usually a short name or nickname. 31 | # - `hostname`: use the `hostname` value to use host component of `uri` property on the Target 32 | # this can be useful if VM name doesn't include domain name 33 | # 34 | # @param [String[1]] vsphere_host 35 | # Hostname of the vSphere server that we're going to use to create snapshots via the API. 36 | # 37 | # @param [String[1]] vsphere_username 38 | # Username to use when authenticating with the vSphere API. 39 | # 40 | # @param [String[1]] vsphere_password 41 | # Password to use when authenticating with the vSphere API. 42 | # 43 | # @param [String[1]] vsphere_datacenter 44 | # Name of the vSphere datacenter to search for VMs under. 45 | # 46 | # @param [Boolean] vsphere_insecure 47 | # Flag to enable insecure HTTPS connections by disabling SSL server certificate verification. 48 | # 49 | # @param [Optional[String[1]]] snapshot_name 50 | # Name of the snapshot 51 | # 52 | # @param [Optional[String]] snapshot_description 53 | # Description of the snapshot 54 | # 55 | # @param [Optional[Boolean]] snapshot_memory 56 | # Capture the VMs memory in the snapshot 57 | # 58 | # @param [Optional[Boolean]] snapshot_quiesce 59 | # Quiesce/flush the filesystem when snapshotting the VM. This requires VMware tools be installed 60 | # in the guest OS to work properly. 61 | # 62 | # @param [Boolean] noop 63 | # Flag to enable noop mode. When noop mode is enabled no snapshots will be created or deleted. 64 | # 65 | plan patching::snapshot_vmware ( 66 | TargetSpec $targets, 67 | Enum['create', 'delete'] $action, 68 | Optional[Enum['hostname', 'name', 'uri']] $target_name_property = undef, 69 | String[1] $vsphere_host = get_targets($targets)[0].vars['vsphere_host'], 70 | String[1] $vsphere_username = get_targets($targets)[0].vars['vsphere_username'], 71 | String[1] $vsphere_password = get_targets($targets)[0].vars['vsphere_password'], 72 | String[1] $vsphere_datacenter = get_targets($targets)[0].vars['vsphere_datacenter'], 73 | Boolean $vsphere_insecure = get_targets($targets)[0].vars['vsphere_insecure'], 74 | Optional[String[1]] $snapshot_name = undef, 75 | Optional[String] $snapshot_description = undef, 76 | Optional[Boolean] $snapshot_memory = undef, 77 | Optional[Boolean] $snapshot_quiesce = undef, 78 | Boolean $noop = false, 79 | ) { 80 | $_targets = run_plan('patching::get_targets', $targets) 81 | $group_vars = $_targets[0].vars 82 | # Order: CLI > Config > Default 83 | $_target_name_property = pick($target_name_property, 84 | $group_vars['patching_snapshot_target_name_property'], 85 | 'uri') 86 | $_snapshot_name = pick($snapshot_name, 87 | $group_vars['patching_snapshot_name'], 88 | 'Bolt Patching Snapshot') 89 | $_snapshot_description = pick_default($snapshot_description, 90 | $group_vars['patching_snapshot_description'], 91 | '') 92 | $_snapshot_memory = pick($snapshot_memory, 93 | $group_vars['patching_snapshot_memory'], 94 | false) 95 | $_snapshot_quiesce = pick($snapshot_quiesce, 96 | $group_vars['patching_snapshot_quiesce'], 97 | true) 98 | 99 | # Create array of node names 100 | $vm_names = patching::target_names($_targets, $_target_name_property) 101 | 102 | # Display status message 103 | if $action == 'create' { 104 | out::message("Creating VM snapshot '${_snapshot_name}' for:") 105 | $vm_names.each |$n| { 106 | out::message(" + ${n}") 107 | } 108 | } else { 109 | out::message("Deleting VM snapshot '${_snapshot_name}' for:") 110 | $vm_names.each |$n| { 111 | out::message(" - ${n}") 112 | } 113 | } 114 | 115 | if !$noop { 116 | $results = patching::snapshot_vmware($vm_names, 117 | $_snapshot_name, 118 | $vsphere_host, 119 | $vsphere_username, 120 | $vsphere_password, 121 | $vsphere_datacenter, 122 | $vsphere_insecure, 123 | $_snapshot_description, 124 | $_snapshot_memory, 125 | $_snapshot_quiesce, 126 | $action) 127 | 128 | $filtered_results = patching::filter_results($results, 'patching::snapshot_vmware') 129 | 130 | return $filtered_results 131 | } 132 | } 133 | -------------------------------------------------------------------------------- /plans/update_history.pp: -------------------------------------------------------------------------------- 1 | # @summary Collect update history from the results JSON file on the targets 2 | # 3 | # When executing the `patching::update` task, the data that is returned to Bolt 4 | # is also written into a "results" file. This plan reads the last JSON document 5 | # from that results file, then formats the results in various ways. 6 | # 7 | # This is useful for gather patching report data on a fleet of servers. 8 | # 9 | # If you're using this in a larger workflow and you've run `patching::update` inline. 10 | # You can pass the ResultSet from that task into the `history` parameter of this 11 | # plan and we will skip retrieving the history from the targets and simply use 12 | # that data. 13 | # 14 | # By default the report is also written to a file `patching_report.csv`. 15 | # If you would like to disable this you can pass in `undef` or `'disabled'` to 16 | # `report_file` parameter. You can also customize this as by specifying the 17 | # `patching_report_file` var on the target or group. 18 | # 19 | # Patching format can also be customized using the inventory var `patching_report_format` 20 | # on the target or group. 21 | # 22 | # @param [TargetSpec] targets 23 | # Set of targets to run against. 24 | # 25 | # @param [Optional[ResultSet]] history 26 | # Optional ResultSet from the `patching::update` or `patching::update_history` tasks 27 | # that contains update result data to be formatted. 28 | # 29 | # @param [Optional[String]] report_file 30 | # Optional filename to save the formatted repot into. 31 | # If `undef` or `'disabled'` are passed, then no report file will be written. 32 | # NOTE: If you're running PE, then you'll need to disable writing reports because it will 33 | # fail when running from the console. 34 | # 35 | # @param [Enum['none', 'pretty', 'csv']] format 36 | # The method of formatting to use for the data. 37 | # 38 | # @return [String] Data string formatting in the method requested 39 | # 40 | plan patching::update_history ( 41 | TargetSpec $targets, 42 | Optional[ResultSet] $history = undef, 43 | Optional[String] $report_file = 'patching_report.csv', 44 | # TODO JSON outputs 45 | Enum['none', 'pretty', 'csv'] $format = 'pretty', 46 | ) { 47 | $_targets = run_plan('patching::get_targets', $targets) 48 | $group_vars = $_targets[0].vars 49 | $_format = pick($group_vars['patching_report_format'], $format) 50 | $_report_file = pick($group_vars['patching_report_file'], $report_file) 51 | 52 | ## Collect update history 53 | if $history { 54 | $_history = $history 55 | } 56 | else { 57 | $_history = run_task('patching::update_history', $_targets, _catch_errors => true) 58 | } 59 | 60 | ## Format the report 61 | case $_format { 62 | 'none': { 63 | return($_history) 64 | } 65 | 'pretty': { 66 | $row_format = '%-30s | %-8s | %-8s | %-8s' 67 | $header = sprintf($row_format, 'host', 'upgraded', 'installed', 'failed') 68 | $divider = '--------------------------------------------------------------' 69 | $output = $_history.map|$hist| { 70 | # in case history doesn't contain any updates 71 | $upgraded = pick($hist['upgraded'], []) 72 | $installed = pick($hist['installed'], []) 73 | $failed = pick($hist['failed'], []) 74 | $num_upgraded = $upgraded.size 75 | $num_installed = $installed.size 76 | $num_failed = $failed.size 77 | $row_format = '%-30s | %-8s | %-8s | %-8s' 78 | $message = sprintf($row_format, $hist.target.name, $num_upgraded, $num_installed, $num_failed) 79 | $message 80 | } 81 | 82 | ## Build report 83 | $report = join([$header, $divider] + $output + [''], "\n") 84 | } 85 | 'csv': { 86 | $csv_header = "host,action,name,version,kb (windows only)\n" 87 | $report = $_history.reduce($csv_header) |$res_memo, $res| { 88 | $hostname = $res.target.name 89 | # in case history doesn't contain any updates 90 | $upgraded = pick($res['upgraded'], []) 91 | $num_updates = $upgraded.length 92 | $host_updates = $upgraded.reduce('') |$up_memo, $up| { 93 | $name = $up['name'] 94 | $version = ('version' in $up) ? { 95 | true => $up['version'], 96 | default => '', 97 | } 98 | # if this is windows we want to print KB articles (one per line?) 99 | if 'kb_ids' in $up { 100 | # TODO: provider? - need a custom tab for windows vs linux 101 | 102 | # create a new line for each KB article 103 | $csv_line = $up['kb_ids'].reduce('') |$kb_memo, $kb| { 104 | $kb_line = "${hostname},upgraded,\"${name}\",\"${version}\",\"${kb}\"" 105 | "${kb_memo}${kb_line}\n" 106 | } 107 | } 108 | else { 109 | # TODO version old? - need a custom tab for windows vs linux 110 | 111 | # create one line per update/upgrade 112 | $csv_line = "${hostname},upgraded,\"${name}\",\"${version}\",\n" 113 | } 114 | "${up_memo}${csv_line}" 115 | } 116 | "${res_memo}${host_updates}" 117 | } 118 | } 119 | default: { 120 | fail_plan("unknown format: ${_format}") 121 | } 122 | } 123 | 124 | out::message($report) 125 | 126 | ## Write report to file 127 | if $_report_file and $_report_file != 'disabled' { 128 | file::write($_report_file, $report) 129 | } 130 | return($report) 131 | } 132 | -------------------------------------------------------------------------------- /spec/default_facts.yml: -------------------------------------------------------------------------------- 1 | # Use default_module_facts.yml for module specific facts. 2 | # 3 | # Facts specified here will override the values provided by rspec-puppet-facts. 4 | --- 5 | networking: 6 | ip: "172.16.254.254" 7 | ip6: "FE80:0000:0000:0000:AAAA:AAAA:AAAA" 8 | mac: "AA:AA:AA:AA:AA:AA" 9 | is_pe: false 10 | -------------------------------------------------------------------------------- /spec/spec_helper.rb: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | RSpec.configure do |c| 4 | c.mock_with :rspec 5 | end 6 | 7 | require 'puppetlabs_spec_helper/module_spec_helper' 8 | require 'rspec-puppet-facts' 9 | 10 | require 'spec_helper_local' if File.file?(File.join(File.dirname(__FILE__), 'spec_helper_local.rb')) 11 | 12 | include RspecPuppetFacts 13 | 14 | default_facts = { 15 | puppetversion: Puppet.version, 16 | facterversion: Facter.version, 17 | } 18 | 19 | default_fact_files = [ 20 | File.expand_path(File.join(File.dirname(__FILE__), 'default_facts.yml')), 21 | File.expand_path(File.join(File.dirname(__FILE__), 'default_module_facts.yml')), 22 | ] 23 | 24 | default_fact_files.each do |f| 25 | next unless File.exist?(f) && File.readable?(f) && File.size?(f) 26 | 27 | begin 28 | require 'deep_merge' 29 | default_facts.deep_merge!(YAML.safe_load(File.read(f), permitted_classes: [], permitted_symbols: [], aliases: true)) 30 | rescue StandardError => e 31 | RSpec.configuration.reporter.message "WARNING: Unable to load #{f}: #{e}" 32 | end 33 | end 34 | 35 | # read default_facts and merge them over what is provided by facterdb 36 | default_facts.each do |fact, value| 37 | add_custom_fact fact, value, merge_facts: true 38 | end 39 | 40 | RSpec.configure do |c| 41 | c.default_facts = default_facts 42 | c.before :each do 43 | # set to strictest setting for testing 44 | # by default Puppet runs at warning level 45 | Puppet.settings[:strict] = :warning 46 | Puppet.settings[:strict_variables] = true 47 | end 48 | c.filter_run_excluding(bolt: true) unless ENV['GEM_BOLT'] 49 | c.after(:suite) do 50 | RSpec::Puppet::Coverage.report!(0) 51 | end 52 | 53 | # Filter backtrace noise 54 | backtrace_exclusion_patterns = [ 55 | %r{spec_helper}, 56 | %r{gems}, 57 | ] 58 | 59 | if c.respond_to?(:backtrace_exclusion_patterns) 60 | c.backtrace_exclusion_patterns = backtrace_exclusion_patterns 61 | elsif c.respond_to?(:backtrace_clean_patterns) 62 | c.backtrace_clean_patterns = backtrace_exclusion_patterns 63 | end 64 | end 65 | 66 | # Ensures that a module is defined 67 | # @param module_name Name of the module 68 | def ensure_module_defined(module_name) 69 | module_name.split('::').reduce(Object) do |last_module, next_module| 70 | last_module.const_set(next_module, Module.new) unless last_module.const_defined?(next_module, false) 71 | last_module.const_get(next_module, false) 72 | end 73 | end 74 | 75 | # 'spec_overrides' from sync.yml will appear below this line 76 | -------------------------------------------------------------------------------- /tasks/available_updates.json: -------------------------------------------------------------------------------- 1 | { 2 | "description": "Collects information about available updates on a target system", 3 | "supports_noop": true, 4 | "implementations": [ 5 | { 6 | "name": "available_updates_linux.sh", 7 | "requirements": ["shell"], 8 | "files": [ 9 | "patching/files/bash/os_test.sh", 10 | "patching/files/bash/available_updates_rh.sh", 11 | "patching/files/bash/available_updates_deb.sh", 12 | "patching/files/bash/available_updates_sles.sh" 13 | ] 14 | }, 15 | { 16 | "name": "available_updates_windows.ps1", 17 | "requirements": ["powershell"], 18 | "files": ["patching/files/powershell/TaskUtils.ps1"] 19 | } 20 | ], 21 | "parameters": { 22 | "provider": { 23 | "description": "What update provider to use. For Linux (RHEL, Debian, SUSE, etc.) this parameter is not used. For Windows the available values are: 'windows', 'chocolatey', 'all' (both 'windows' and 'chocolatey'). The default value for Windows is 'all'. If 'all' is passed and Chocolatey isn't installed then Chocolatey will simply be skipped. If 'chocolatey' is passed and Chocolatey isn't installed, then this will error.", 24 | "type": "Optional[String]" 25 | } 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /tasks/available_updates_linux.json: -------------------------------------------------------------------------------- 1 | { 2 | "description": "Collects information about available updates on a target system", 3 | "supports_noop": true, 4 | "private": true, 5 | "implementations": [ 6 | { 7 | "name": "available_updates_linux.sh", 8 | "requirements": ["shell"], 9 | "files": [ 10 | "patching/files/bash/os_test.sh", 11 | "patching/files/bash/available_updates_rh.sh", 12 | "patching/files/bash/available_updates_deb.sh" 13 | ] 14 | } 15 | ], 16 | "parameters": { 17 | "provider": { 18 | "description": "What update provider to use. For Linux (RHEL, Debian, etc) this parameter is not used. For Windows the available values are: 'windows', 'chocolatey', 'all' (both 'windows' and 'chocolatey'). The default value for Windows is 'all'. If 'all' is passed and Chocolatey isn't installed then Chocolatey will simply be skipped. If 'chocolatey' is passed and Chocolatey isn't installed, then this will error.", 19 | "type": "Optional[String[1]]" 20 | } 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /tasks/available_updates_linux.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export RESULT_FILE="$PT_result_file" 4 | if [[ -z "$RESULT_FILE" ]]; then 5 | export RESULT_FILE="/var/log/patching.json" 6 | fi 7 | if [[ ! -e "$RESULT_FILE" ]]; then 8 | touch "$RESULT_FILE" 9 | fi 10 | 11 | # Run our OS tests, export OS_RELEASE 12 | source "${PT__installdir}/patching/files/bash/os_test.sh" 13 | 14 | case $OS_RELEASE in 15 | ################################################################################ 16 | RHEL | CENTOS | FEDORA | ROCKY | OL | ALMALINUX) 17 | # RedHat variant 18 | source "${PT__installdir}/patching/files/bash/available_updates_rh.sh" 19 | ;; 20 | ################################################################################ 21 | DEBIAN | UBUNTU) 22 | # Debian variant 23 | source "${PT__installdir}/patching/files/bash/available_updates_deb.sh" 24 | ;; 25 | ################################################################################ 26 | SLES) 27 | # SUSE variant 28 | source "${PT__installdir}/patching/files/bash/available_updates_sles.sh" 29 | ;; 30 | ################################################################################ 31 | *) 32 | echo "Unknown Operating System: ${OS_RELEASE}" 33 | exit 2 34 | ;; 35 | esac 36 | -------------------------------------------------------------------------------- /tasks/available_updates_windows.json: -------------------------------------------------------------------------------- 1 | { 2 | "description": "Collects information about available updates on a target system", 3 | "supports_noop": true, 4 | "private": true, 5 | "implementations": [ 6 | { 7 | "name": "available_updates_windows.ps1", 8 | "requirements": ["powershell"], 9 | "files": ["patching/files/powershell/TaskUtils.ps1"] 10 | } 11 | ], 12 | "parameters": { 13 | "provider": { 14 | "description": "What update provider to use. For Linux (RHEL, Debian, etc) this parameter is not used. For Windows the available values are: 'windows', 'chocolatey', 'all' (both 'windows' and 'chocolatey'). The default value for Windows is 'all'. If 'all' is passed and Chocolatey isn't installed then Chocolatey will simply be skipped. If 'chocolatey' is passed and Chocolatey isn't installed, then this will error.", 15 | "type": "Optional[String[1]]" 16 | } 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /tasks/available_updates_windows.ps1: -------------------------------------------------------------------------------- 1 | [CmdletBinding()] 2 | Param( 3 | # Mandatory is set to false. If Set to $True then a dialog box appears to get the missing information 4 | # https://blogs.technet.microsoft.com/heyscriptingguy/2011/05/22/use-powershell-to-make-mandatory-parameters/ 5 | [Parameter(Mandatory = $False)] 6 | [String]$provider, 7 | [String]$_installdir 8 | ) 9 | 10 | Import-Module "$_installdir\patching\files\powershell\TaskUtils.ps1" 11 | 12 | Set-StrictMode -Version Latest 13 | $ErrorActionPreference = 'Stop' 14 | $ProgressPreference = 'SilentlyContinue' 15 | 16 | function AvailableUpdates-Windows() { 17 | $exit_code = 0 18 | $updateSession = Create-WindowsUpdateSession 19 | $updateList = Search-WindowsUpdate -session $updateSession 20 | $availableUpdateList = @() 21 | 22 | # for each update, collect information about it 23 | foreach ($updateAndServer in $updateList) { 24 | $serverSelection = $updateAndServer['server_selection'] 25 | $update = $updateAndServer['update'] 26 | #Write-Host "update = $update" 27 | $updateId = $update.Identity.UpdateID 28 | $kbIds = @() 29 | foreach ($kb in $update.KBArticleIDs) { 30 | $kbIds += $kb 31 | } 32 | $availableUpdateList += @{ 33 | 'name' = $update.Title; 34 | 'id' = $updateId; 35 | 'version' = $update.Identity.RevisionNumber; 36 | 'kb_ids' = $kbIds; 37 | 'server_selection' = $serverSelection; 38 | 'provider' = 'windows'; 39 | } 40 | } 41 | return @{ 42 | 'result' = @($availableUpdateList | Sort-Object); 43 | 'exit_code' = $exit_code; 44 | } 45 | } 46 | 47 | # function AvailableUpdates-Windows() { 48 | # $exit_code = 1 # Set a non-zero exit code to simulate failure 49 | # $error_message = "Simulated failure when checking for available updates on Windows." 50 | 51 | # return @{ 52 | # 'result' = $error_message; 53 | # 'exit_code' = $exit_code; 54 | # } 55 | # } 56 | 57 | # function AvailableUpdates-Chocolatey() { 58 | # $exit_code = 1 # Set a non-zero exit code to simulate failure 59 | # $error_message = "Simulated failure when checking for available updates on Windows." 60 | 61 | # return @{ 62 | # 'result' = $error_message; 63 | # 'exit_code' = $exit_code; 64 | # } 65 | # } 66 | 67 | function AvailableUpdates-Chocolatey([bool]$choco_required) { 68 | $exit_code = 0 69 | $updateList = @() 70 | if (-not (Test-CommandExists 'choco')) { 71 | if ($choco_required) { 72 | Write-Error "Unable to find required command: choco" 73 | exit 2 74 | } else { 75 | # Write-Error "Unable to find required command: choco" 76 | # exit 2 77 | # TODO make a chocolatey required parameter 78 | # chocolatey wasn't required, simply return an empty list 79 | return @{ 80 | 'result' = @($updateList); 81 | 'exit_code' = $exit_code; 82 | } 83 | } 84 | } 85 | 86 | # determine what chocolatey packages need upgrading 87 | # run command: choco outdated 88 | $output = iex "& choco outdated --limit-output --ignore-unfound" 89 | $exit_code = $LastExitCode 90 | # TODO handle unfound packages more gracefully 91 | 92 | if ($exit_code -eq 0) { 93 | # output is in the format: 94 | # package name|current version|available version|pinned? 95 | foreach ($line in $output) { 96 | $parts = @($line.split('|')) 97 | if ($parts.Length -lt 4) { 98 | return @{ 99 | 'result' = $output; 100 | 'exit_code' = 102; 101 | 'error' = '"choco outdated" command returned data in an unknown format (couldnt find at least 4x "|" characters). Check the "result" parameter for the raw output from the command. Guessing there was some unexpected error and "choco outdated" still returned an exit code of 0.'; 102 | } 103 | } 104 | $updateList += @{ 105 | 'name' = $parts[0]; 106 | 'version_old' = $parts[1]; 107 | 'version' = $parts[2]; 108 | 'pinned' = $parts[3]; 109 | 'provider' = 'chocolatey'; 110 | } 111 | } 112 | return @{ 113 | 'result' = @($updateList | Sort-Object); 114 | 'exit_code' = $exit_code; 115 | } 116 | } else { 117 | return @{ 118 | 'result' = $output; 119 | 'exit_code' = $exit_code; 120 | } 121 | } 122 | } 123 | 124 | if ($provider -eq '') { 125 | $provider = 'all' 126 | } 127 | 128 | $exit_code = 0 129 | if ($provider -eq 'windows') { 130 | $data_windows = AvailableUpdates-Windows 131 | $exit_code = $data_windows['exit_code'] 132 | if ($exit_code -eq 0) { 133 | $result = @{"updates" = @($data_windows['result'])} 134 | } 135 | else { 136 | $result = @{'error_windows' = $data_windows} 137 | } 138 | } elseif ($provider -eq 'chocolatey') { 139 | $data_chocolatey = AvailableUpdates-Chocolatey($True) 140 | $exit_code = $data_chocolatey['exit_code'] 141 | if ($exit_code -eq 0) { 142 | $result = @{"updates" = @($data_chocolatey['result'])} 143 | } 144 | else { 145 | $result = @{'error_chocolatey' = $data_chocolatey} 146 | } 147 | } elseif ($provider -eq 'all') { 148 | $result = @{"updates" = @()} 149 | $exit_code = 0 150 | 151 | $data_windows = AvailableUpdates-Windows 152 | $result_windows = $data_windows['result'] 153 | $exit_code_windows = $data_windows['exit_code'] 154 | if ($exit_code_windows -eq 0) { 155 | $result['updates'] += @($result_windows) 156 | } 157 | else { 158 | $result['error_windows'] = $result_windows 159 | $exit_code = $exit_code_windows 160 | } 161 | 162 | $data_chocolatey = AvailableUpdates-Chocolatey($False) 163 | $result_chocolatey = $data_chocolatey['result'] 164 | $exit_code_chocolatey = $data_chocolatey['exit_code'] 165 | if ($exit_code_chocolatey -eq 0) { 166 | $result['updates'] += @($result_chocolatey) 167 | } 168 | else { 169 | $result['error_chocolatey'] = $result_chocolatey 170 | $exit_code = $exit_code_chocolatey 171 | } 172 | } else { 173 | Write-Error "Unknown provider! Expected 'windows', 'chocolatey', 'all'. Got: $provider" 174 | exit 100 175 | } 176 | 177 | ConvertTo-Json -Depth 100 $result 178 | exit $exit_code 179 | -------------------------------------------------------------------------------- /tasks/cache_remove.json: -------------------------------------------------------------------------------- 1 | { 2 | "description": "Removes/clears the target's update cache. For RHEL/CentOS this means a `yum clean all`. For Debian this means a `apt update`. For Windows this means a Windows Update refresh.", 3 | "implementations": [ 4 | { 5 | "name": "cache_remove_linux.sh", 6 | "requirements": ["shell"], 7 | "files": ["patching/files/bash/os_test.sh"] 8 | }, 9 | { 10 | "name": "cache_remove_windows.ps1", 11 | "requirements": ["powershell"] 12 | } 13 | ], 14 | "parameters": {} 15 | } 16 | 17 | -------------------------------------------------------------------------------- /tasks/cache_remove_linux.json: -------------------------------------------------------------------------------- 1 | { 2 | "description": "Removes/clears the target's update cache. For RHEL/CentOS this means a `yum clean all`. For Debian this means a `apt update`. For Windows this means a Windows Update refresh.", 3 | "private": true, 4 | "implementations": [ 5 | { 6 | "name": "cache_remove_linux.sh", 7 | "requirements": ["shell"], 8 | "files": ["patching/files/bash/os_test.sh"] 9 | } 10 | ], 11 | "parameters": {} 12 | } 13 | 14 | -------------------------------------------------------------------------------- /tasks/cache_remove_linux.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Run our OS tests, export OS_RELEASE 4 | source "${PT__installdir}/patching/files/bash/os_test.sh" 5 | 6 | case $OS_RELEASE in 7 | ################################################################################ 8 | RHEL | CENTOS | FEDORA | ROCKY | OL | ALMALINUX) 9 | # RedHat variant 10 | # clean yum cache 11 | OUTPUT=$(yum clean all 2>&1) 12 | STATUS=$? 13 | if [[ $STATUS -ne 0 ]]; then 14 | ERROR="yum clean all FAILED, you probably forgot to run this as sudo or there is a network error." 15 | fi 16 | ;; 17 | ################################################################################ 18 | DEBIAN | UBUNTU) 19 | # Debian variant 20 | ## Clean apt cache 21 | OUTPUT=$(apt-get clean 2>&1) 22 | STATUS=$? 23 | if [[ $STATUS -ne 0 ]]; then 24 | ERROR="apt-get clean FAILED, you probably forgot to run this as sudo or there is a network error." 25 | fi 26 | ;; 27 | ################################################################################ 28 | SLES) 29 | # SUSE variant 30 | # clean zypper cache 31 | OUTPUT=$(zypper clean 2>&1) 32 | STATUS=$? 33 | if [[ $STATUS -ne 0 ]]; then 34 | ERROR="zypper clean FAILED, you probably forgot to run this as sudo or there is a network error." 35 | fi 36 | ;; 37 | ################################################################################ 38 | *) 39 | ERROR="Unknown Operating System: ${OS_RELEASE}" 40 | STATUS=2 41 | ;; 42 | esac 43 | 44 | if [[ $STATUS -ne 0 ]]; then 45 | echo "ERROR: $ERROR" 46 | echo "Output: $OUTPUT" 47 | fi 48 | exit $STATUS 49 | -------------------------------------------------------------------------------- /tasks/cache_remove_windows.json: -------------------------------------------------------------------------------- 1 | { 2 | "description": "Removes/clears the target's update cache. For RHEL/CentOS this means a `yum clean all`. For Debian this means a `apt update`. For Windows this means a Windows Update refresh.", 3 | "private": true, 4 | "implementations": [ 5 | { 6 | "name": "cache_remove_windows.ps1", 7 | "requirements": ["powershell"] 8 | } 9 | ], 10 | "parameters": {} 11 | } 12 | 13 | -------------------------------------------------------------------------------- /tasks/cache_remove_windows.ps1: -------------------------------------------------------------------------------- 1 | Set-StrictMode -Version Latest 2 | $ErrorActionPreference = 'Stop' 3 | $ProgressPreference = 'SilentlyContinue' 4 | 5 | # Procedure for resetting WSUS documented here: 6 | # https://docs.microsoft.com/en-us/windows/deployment/update/windows-update-resources 7 | # https://gallery.technet.microsoft.com/scriptcenter/Reset-WindowsUpdateps1-e0c5eb78 8 | 9 | # Stop the Windows Update service 10 | Write-Host "Stopping service... bits" 11 | Stop-Service -Name bits 12 | Write-Host "Stopping service... wuauserv" 13 | Stop-Service -Name wuauserv 14 | 15 | # Remove the downloaded updates 16 | Write-Host "Removing downloaded updates... $env:systemroot\SoftwareDistribution\Download" 17 | Remove-Item "$env:systemroot\SoftwareDistribution\Download" -force -Confirm:$false -Recurse -ErrorAction SilentlyContinue 18 | Write-Host "Removing downloaded updates... $env:allusersprofile\Application Data\Microsoft\Network\Downloader\qmgr*.dat" 19 | Remove-Item "$env:allusersprofile\Application Data\Microsoft\Network\Downloader\qmgr*.dat" -force -Confirm:$false -ErrorAction SilentlyContinue 20 | 21 | # Re-register all WSUS DLLs 22 | Write-Host "Re-registering WSUS DLLs" 23 | Set-Location $env:systemroot\system32 24 | regsvr32.exe /s atl.dll 25 | regsvr32.exe /s urlmon.dll 26 | regsvr32.exe /s mshtml.dll 27 | regsvr32.exe /s shdocvw.dll 28 | regsvr32.exe /s browseui.dll 29 | regsvr32.exe /s jscript.dll 30 | regsvr32.exe /s vbscript.dll 31 | regsvr32.exe /s scrrun.dll 32 | regsvr32.exe /s msxml.dll 33 | regsvr32.exe /s msxml3.dll 34 | regsvr32.exe /s msxml6.dll 35 | regsvr32.exe /s actxprxy.dll 36 | regsvr32.exe /s softpub.dll 37 | regsvr32.exe /s wintrust.dll 38 | regsvr32.exe /s dssenh.dll 39 | regsvr32.exe /s rsaenh.dll 40 | regsvr32.exe /s gpkcsp.dll 41 | regsvr32.exe /s sccbase.dll 42 | regsvr32.exe /s slbcsp.dll 43 | regsvr32.exe /s cryptdlg.dll 44 | regsvr32.exe /s oleaut32.dll 45 | regsvr32.exe /s ole32.dll 46 | regsvr32.exe /s shell32.dll 47 | regsvr32.exe /s initpki.dll 48 | regsvr32.exe /s wuapi.dll 49 | regsvr32.exe /s wuaueng.dll 50 | regsvr32.exe /s wuaueng1.dll 51 | regsvr32.exe /s wucltui.dll 52 | regsvr32.exe /s wups.dll 53 | regsvr32.exe /s wups2.dll 54 | regsvr32.exe /s wuweb.dll 55 | regsvr32.exe /s qmgr.dll 56 | regsvr32.exe /s qmgrprxy.dll 57 | regsvr32.exe /s wucltux.dll 58 | regsvr32.exe /s muweb.dll 59 | regsvr32.exe /s wuwebv.dll 60 | 61 | # Start the Windows Update service 62 | Write-Host "Starting service... bits" 63 | Start-Service -Name bits 64 | Write-Host "Starting service... wuauserv" 65 | Start-Service -Name wuauserv 66 | 67 | # Force WSUS discovery 68 | Write-Host "Forcing WSUS re-auth and discovery..." 69 | wuauclt /resetauthorization /detectnow 70 | 71 | exit 0 72 | -------------------------------------------------------------------------------- /tasks/cache_update.json: -------------------------------------------------------------------------------- 1 | { 2 | "description": "Updates the targets update cache. For RHEL/CentOS this means a `yum clean expire-cache`. For Debian this means a `apt update`. For Windows this means a Windows Update refresh.", 3 | "supports_noop": true, 4 | "implementations": [ 5 | { 6 | "name": "cache_update_linux.sh", 7 | "requirements": ["shell"], 8 | "files": ["patching/files/bash/os_test.sh"] 9 | }, 10 | { 11 | "name": "cache_update_windows.ps1", 12 | "requirements": ["powershell"], 13 | "files": ["patching/files/powershell/TaskUtils.ps1"] 14 | } 15 | ], 16 | "parameters": {} 17 | } 18 | 19 | -------------------------------------------------------------------------------- /tasks/cache_update_linux.json: -------------------------------------------------------------------------------- 1 | { 2 | "description": "Updates the targets update cache. For RHEL/CentOS this means a `yum clean expire-cache`. For Debian this means a `apt update`.", 3 | "supports_noop": true, 4 | "private": true, 5 | "implementations": [ 6 | { 7 | "name": "cache_update_linux.sh", 8 | "requirements": ["shell"], 9 | "files": ["patching/files/bash/os_test.sh"] 10 | } 11 | ], 12 | "parameters": {} 13 | } 14 | 15 | -------------------------------------------------------------------------------- /tasks/cache_update_linux.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [[ -n "$PT__noop" && "$PT__noop" == "true" ]]; then 4 | echo '{"message": "noop - cache was not updated"}' 5 | exit 0 6 | fi 7 | 8 | # Run our OS tests, export OS_RELEASE 9 | source "${PT__installdir}/patching/files/bash/os_test.sh" 10 | 11 | case $OS_RELEASE in 12 | ################################################################################ 13 | RHEL | CENTOS | FEDORA | ROCKY | OL | ALMALINUX) 14 | # RedHat variant 15 | # update yum cache 16 | OUTPUT=$(yum clean expire-cache 2>&1) 17 | STATUS=$? 18 | if [[ $STATUS -ne 0 ]]; then 19 | ERROR="yum clean expire-cache FAILED, you probably forgot to run this as sudo or there is a network error." 20 | fi 21 | ;; 22 | ################################################################################ 23 | DEBIAN | UBUNTU) 24 | # Debian variant 25 | ## Update apt cache 26 | OUTPUT=$(apt-get -y update 2>&1) 27 | STATUS=$? 28 | if [[ $STATUS -ne 0 ]]; then 29 | ERROR="apt-get -y update FAILED, you probably forgot to run this as sudo or there is a network error." 30 | fi 31 | ;; 32 | ################################################################################ 33 | SLES) 34 | # SUSE variant 35 | ## Update zypper cache 36 | OUTPUT=$(zypper ref 2>&1) 37 | STATUS=$? 38 | if [[ $STATUS -ne 0 ]]; then 39 | ERROR="zypper ref FAILED, you probably forgot to run this as sudo or there is a network error." 40 | fi 41 | ;; 42 | ################################################################################ 43 | *) 44 | ERROR="Unknown Operating System: ${OS_RELEASE}" 45 | STATUS=2 46 | ;; 47 | esac 48 | 49 | if [[ $STATUS -ne 0 ]]; then 50 | echo "ERROR: $ERROR" 51 | echo "Output: $OUTPUT" 52 | fi 53 | exit $STATUS 54 | -------------------------------------------------------------------------------- /tasks/cache_update_windows.json: -------------------------------------------------------------------------------- 1 | { 2 | "description": "Updates the targets update cache. For Windows this means a Windows Update refresh.", 3 | "supports_noop": true, 4 | "private": true, 5 | "implementations": [ 6 | { 7 | "name": "cache_update_windows.ps1", 8 | "requirements": ["powershell"], 9 | "files": ["patching/files/powershell/TaskUtils.ps1"] 10 | } 11 | ], 12 | "parameters": {} 13 | } 14 | 15 | -------------------------------------------------------------------------------- /tasks/cache_update_windows.ps1: -------------------------------------------------------------------------------- 1 | [CmdletBinding()] 2 | Param( 3 | # Mandatory is set to false. If Set to $True then a dialog box appears to get the missing information 4 | # We will do a variable check later 5 | # https://blogs.technet.microsoft.com/heyscriptingguy/2011/05/22/use-powershell-to-make-mandatory- parameters/ 6 | [Parameter(Mandatory = $False)] 7 | [String]$_installdir, 8 | [Boolean]$_noop = $false 9 | ) 10 | 11 | # TODO 12 | # - WUA: run wuactl? 13 | # - Chocolatey: ? 14 | 15 | 16 | # if ($_noop) { 17 | # Write-Output '{"message": "noop - cache was not updated"}' 18 | # exit 0 19 | # } 20 | 21 | # Import-Module "$_installdir\patching\files\powershell\TaskUtils.ps1" 22 | 23 | # Set-StrictMode -Version Latest 24 | # $ErrorActionPreference = 'Stop' 25 | # $ProgressPreference = 'SilentlyContinue' 26 | 27 | # # Restart the Windows Update service 28 | # Restart-Service -Name wuauserv 29 | 30 | # $exitStatus = 0 31 | 32 | # # search all windows update servers 33 | # $cacheResultHash = @{"servers" = @()} 34 | # $updateSession = Create-WindowsUpdateSession 35 | # $searchResultHash = Search-WindowsUpdateResults -session $updateSession 36 | # foreach ($serverSelection in ($searchResultHash.keys | Sort-Object)) { 37 | # $value = $searchResultHash[$serverSelection] 38 | # $searchResult = $value['result'] 39 | 40 | # # interpret the result code and have us exit with an error if any of the patches error 41 | # $result = @{ 42 | # 'name' = $value['name']; 43 | # 'server_selection' = $serverSelection; 44 | # 'result_code' = $searchResult.ResultCode; 45 | # } 46 | # switch ($searchResult.ResultCode) 47 | # { 48 | # 0 { $result['result'] = 'Not Started'; break } 49 | # 1 { $result['result'] = 'In Progress'; break } 50 | # 2 { $result['result'] = 'Succeeded'; break } 51 | # 3 { $result['result'] = 'Succeeded With Errors'; break } 52 | # 4 { 53 | # $result['result'] = 'Failed' 54 | # $exitStatus = 2 55 | # break 56 | # } 57 | # 5 { 58 | # $result['result'] = 'Aborted' 59 | # $exitStatus = 2 60 | # break 61 | # } 62 | # default { $result['result'] = 'Unknown'; break } 63 | # } 64 | 65 | # $cacheResultHash['servers'] += $result 66 | # } 67 | 68 | # ConvertTo-Json -Depth 100 $cacheResultHash 69 | # exit $exitStatus 70 | 71 | exit 0 72 | -------------------------------------------------------------------------------- /tasks/history.json: -------------------------------------------------------------------------------- 1 | { 2 | "description": "Reads the update history from the JSON 'result_file'.", 3 | "implementations": [ 4 | { 5 | "name": "update_history_linux.sh", 6 | "requirements": ["shell"], 7 | "files": ["patching/files/bash/os_test.sh"] 8 | }, 9 | { 10 | "name": "update_history_windows.ps1", 11 | "requirements": ["powershell"], 12 | "files": ["patching/files/powershell/TaskUtils.ps1"] 13 | } 14 | ], 15 | "parameters": { 16 | "result_file": { 17 | "description": "Log file for patching results. This file will contain the JSON output that is returned from these tasks. This is data that was written by patching::update. If no script name is passed on Linux hosts a default is used: /var/log/patching.json. If no script name is passed on Windows hosts a default is used: C:/ProgramData/PuppetLabs/patching/patching.json", 18 | "type": "Optional[String[1]]" 19 | } 20 | } 21 | } 22 | 23 | -------------------------------------------------------------------------------- /tasks/monitoring_prometheus.json: -------------------------------------------------------------------------------- 1 | { 2 | "description": "Create or remove alert silences for hosts in Prometheus.", 3 | "remote": true, 4 | "supports_noop": true, 5 | "files": [ 6 | "patching/lib/puppet_x/encore/patching/http_helper.rb", 7 | "ruby_task_helper/files/task_helper.rb" 8 | ], 9 | "parameters": { 10 | "targets": { 11 | "type": "Variant[String[1], Array[String[1]]]", 12 | "description": "List of hostnames for targets in Prometheus that will have monitoring alerts either enabled or disabled." 13 | }, 14 | "action": { 15 | "type": "Enum['enable', 'disable']", 16 | "description": "Action to perform on monitored targets. 'enable' will enable monitoring alerts. 'disable' will disable monitoring alerts on targets." 17 | }, 18 | "prometheus_server": { 19 | "type": "String[1]", 20 | "description": "FQDN of the Prometheus server to create an alert silence for" 21 | }, 22 | "silence_duration": { 23 | "type": "Optional[Integer]", 24 | "description": "How long the alert silence will be alive for" 25 | }, 26 | "silence_units": { 27 | "type": "Optional[Enum['minutes', 'hours', 'days', 'weeks']]", 28 | "description": "Goes with the silence duration to determine how long the alert silence will be alive for" 29 | }, 30 | "ssl_cert": { 31 | "type": "Optional[String]", 32 | "description": "Optional CA File to specify for SSL requests" 33 | }, 34 | "ssl_verify": { 35 | "type": "Optional[Boolean]", 36 | "description": "Whether or not to use SSL" 37 | } 38 | } 39 | } 40 | 41 | -------------------------------------------------------------------------------- /tasks/monitoring_prometheus.rb: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | require_relative '../../ruby_task_helper/files/task_helper.rb' 3 | require_relative '../lib/puppet_x/encore/patching/http_helper.rb' 4 | require 'time' 5 | require 'json' 6 | require 'open3' 7 | 8 | # Bolt task for enabling/disabling monitoring alerts in SolarWinds 9 | class MonitoringPrometheusTask < TaskHelper 10 | def get_end_timestamp(duration, units) 11 | case units 12 | when 'minutes' 13 | offset = 60 14 | when 'hours' 15 | offset = 3600 16 | when 'days' 17 | offset = 86_400 18 | when 'weeks' 19 | offset = 604_800 20 | end 21 | 22 | (Time.now.utc + duration * offset).iso8601 23 | end 24 | 25 | def check_telegraf_service(target, timeout, interval) 26 | url = "http://#{target}:19100/metrics" 27 | end_time = Time.now + timeout 28 | 29 | while Time.now < end_time 30 | command = "curl --silent --head --fail #{url}" 31 | stdout, stderr, status = Open3.capture3(command) 32 | return true if status.success? 33 | 34 | sleep(interval) 35 | end 36 | 37 | false 38 | end 39 | 40 | # Create a silence for every target that starts now and ends after the given duration 41 | def create_silences(targets, duration, units, prometheus_server, http_helper) 42 | silence_ids = [] 43 | ok_targets = [] 44 | failed_targets = {} 45 | 46 | targets.each do |target| 47 | payload = { 48 | matchers: [{ name: 'alias', value: target, isRegex: false }], 49 | startsAt: Time.now.utc.iso8601, 50 | endsAt: get_end_timestamp(duration, units), 51 | comment: "Silencing alerts on #{target} for patching", 52 | createdBy: 'patching', 53 | } 54 | headers = { 'Content-Type' => 'application/json' } 55 | begin 56 | res = http_helper.post("https://#{prometheus_server}:9093/api/v2/silences", 57 | body: payload.to_json, 58 | headers: headers) 59 | 60 | ok_targets.push(target) if res.code == '200' 61 | rescue => e 62 | failed_targets[target] = e.message 63 | end 64 | end 65 | 66 | { ok_targets: ok_targets, failed_targets: failed_targets } 67 | end 68 | 69 | # Remove all silences for targets that were created by 'patching' 70 | def remove_silences(targets, prometheus_server, http_helper, timeout, interval) 71 | ok_targets = [] 72 | failed_targets = {} 73 | res = http_helper.get("https://#{prometheus_server}:9093/api/v2/silences") 74 | silences = res.body 75 | 76 | (JSON.parse silences).each do |silence| 77 | target = silence['matchers'][0]['value'] 78 | # Verify that the current silence is for one of the given targets 79 | # All silences created by this task will have exactly one matcher 80 | next if silence['matchers'][0]['name'] != 'alias' || !targets.include?(silence['matchers'][0]['value']) 81 | # Remove only silences that are active and were created by 'patching' 82 | if silence['status']['state'] == 'active' && silence['createdBy'] == 'patching' 83 | if check_telegraf_service(target, timeout, interval) 84 | begin 85 | res = http_helper.delete("https://#{prometheus_server}:9093/api/v2/silence/#{silence['id']}") 86 | ok_targets.push(target) if res.code == '200' 87 | rescue => e 88 | failed_targets[target] = e.message 89 | end 90 | else 91 | failed_targets[target] = "Telegraf service not up on #{target} after waiting for #{timeout} seconds" 92 | end 93 | end 94 | end 95 | 96 | { ok_targets: ok_targets, failed_targets: failed_targets } 97 | end 98 | 99 | # This will either enable or disable monitoring 100 | def task(targets: nil, 101 | action: nil, 102 | prometheus_server: nil, 103 | silence_duration: nil, 104 | silence_units: nil, 105 | ssl_cert: nil, 106 | ssl_verify: false, 107 | timeout: 60, 108 | interval: 5, 109 | **_kwargs) 110 | # targets can be either an array or a string with a single target 111 | # Check if a single target was given and convert it to an array if it was 112 | if targets.is_a? String 113 | targets = [targets] 114 | end 115 | 116 | http_helper = PuppetX::Patching::HTTPHelper.new(ssl: ssl_verify, ca_file: ssl_verify ? ssl_cert : nil) 117 | 118 | if action == 'disable' 119 | silences_result = create_silences(targets, silence_duration, silence_units, prometheus_server, http_helper) 120 | elsif action == 'enable' 121 | silences_result = remove_silences(targets, prometheus_server, http_helper, timeout, interval) 122 | end 123 | 124 | silences_result 125 | end 126 | end 127 | 128 | MonitoringPrometheusTask.run if $PROGRAM_NAME == __FILE__ -------------------------------------------------------------------------------- /tasks/monitoring_solarwinds.json: -------------------------------------------------------------------------------- 1 | { 2 | "description": "Enable or disable monitoring alerts on hosts in SolarWinds.", 3 | "remote": true, 4 | "supports_noop": true, 5 | "files": [ 6 | "patching/lib/puppet_x/encore/patching/http_helper.rb", 7 | "patching/lib/puppet_x/encore/patching/orion_client.rb", 8 | "ruby_task_helper/files/task_helper.rb" 9 | ], 10 | "parameters": { 11 | "targets": { 12 | "type": "Variant[String[1], Array[String[1]]]", 13 | "description": "List of hostnames or IP addresses for targets in SolarWinds that will have monitoring alerts either enabled or disabled." 14 | }, 15 | "name_property": { 16 | "type": "Optional[String[1]]", 17 | "description": "Property to use when looking up an Orion.Node in SolarWinds from a Bolt::Target. By default we check to see if the node is an IP address, if it is then we use the 'IPAddress' property, otherwise we use 'DNS'. If you want to change what the 'other' property is when the node name isn't an IP address, then specify this property." 18 | }, 19 | "action": { 20 | "type": "Enum['enable', 'disable']", 21 | "description": "Action to perform on monitored targets. 'enable' will enable monitoring alerts. 'disable' will disable monitoring alerts on targets." 22 | } 23 | } 24 | } 25 | 26 | -------------------------------------------------------------------------------- /tasks/monitoring_solarwinds.rb: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | require_relative '../../ruby_task_helper/files/task_helper.rb' 3 | 4 | # Bolt task for enabling/disabling monitoring alerts in SolarWinds 5 | class MonitoringSolarwindsTask < TaskHelper 6 | def add_module_lib_paths(install_dir) 7 | Dir.glob(File.join([install_dir, '*'])).each do |mod| 8 | $LOAD_PATH << File.join([mod, 'lib']) 9 | end 10 | end 11 | 12 | def task(targets: nil, 13 | name_property: nil, 14 | action: nil, 15 | **kwargs) 16 | add_module_lib_paths(kwargs[:_installdir]) 17 | require 'puppet_x/encore/patching/orion_client' 18 | 19 | # targets can be a string (one target) or an array of targets 20 | # if it's a string (single target) convert it to an array so we can treat them the same 21 | targets = [targets] unless targets.is_a?(Array) 22 | 23 | # set name_property to a default of 'DNS' if it isn't specified 24 | name_property = 'DNS' if name_property.nil? 25 | 26 | # this key contains all of the remote configuration from the inventory.yaml 27 | # combined with information for the remote target (SolarWinds server) 28 | remote_target = kwargs[:_target] 29 | 30 | # suppress alerts on a host 31 | orion = PuppetX::Patching::OrionClient.new(remote_target[:host], 32 | username: remote_target[:username], 33 | password: remote_target[:password], 34 | port: remote_target.fetch(:port, 17_778)) 35 | 36 | missing_targets = [] 37 | uri_array = targets.map do |t| 38 | sw_nodes = orion.get_node(t, name_property: name_property) 39 | 40 | if sw_nodes.empty? 41 | missing_targets << t 42 | next 43 | elsif sw_nodes.length > 1 44 | raise ArgumentError, "Found [#{sw_nodes.length}] targets matching '#{t}': #{sw_nodes.to_json}" 45 | end 46 | 47 | # extract the URI property for our good nodes 48 | sw_nodes.map { |sw_n| sw_n['Uri'] } 49 | end 50 | 51 | # print all of the missing targets at the same time to make debugging easier 52 | unless missing_targets.empty? 53 | missing_pretty = JSON.pretty_generate(missing_targets.sort) 54 | raise ArgumentError, "Unable to find the following targets in SolarWinds using the name property '#{name_property}': #{missing_pretty}" 55 | end 56 | 57 | uri_array.flatten! 58 | case action 59 | when 'disable' 60 | orion.suppress_alerts(uri_array) 61 | when 'enable' 62 | orion.resume_alerts(uri_array) 63 | else 64 | raise ArgumentError, "Unknown action: #{action}" 65 | end 66 | end 67 | end 68 | 69 | MonitoringSolarwindsTask.run if $PROGRAM_NAME == __FILE__ 70 | -------------------------------------------------------------------------------- /tasks/post_update.json: -------------------------------------------------------------------------------- 1 | { 2 | "description": "Run post-update script on target host(s), only if it exists. If the script doesn't exist or isn't executable, then this task succeeds (this allows us to run thist task on all hosts, even if they don't have a post-update script).", 3 | "supports_noop": true, 4 | "implementations": [ 5 | {"name": "pre_post_update_linux.sh", "requirements": ["shell"]}, 6 | {"name": "pre_post_update_windows.ps1", "requirements": ["powershell"]} 7 | ], 8 | "parameters": { 9 | "script": { 10 | "type": "Optional[String[1]]", 11 | "description": "Absolute path of the script to execute. If no script name is passed on Linux hosts a default is used: /opt/patching/bin/post_update.sh. If no script name is passed on Windows hosts a default is used: C:/ProgramData/patching/bin/post_update.ps1." 12 | } 13 | } 14 | } 15 | 16 | -------------------------------------------------------------------------------- /tasks/pre_post_update_linux.json: -------------------------------------------------------------------------------- 1 | { 2 | "description": "Pre-post-update definition to make bolt not throw a warning. Best to use pre_update or post_update directly.", 3 | "supports_noop": true, 4 | "private": true, 5 | "implementations": [ 6 | {"name": "pre_post_update_linux.sh", "requirements": ["shell"]} 7 | ], 8 | "parameters": { 9 | "script": { 10 | "type": "Optional[String[1]]", 11 | "description": "Absolute path of the script to execute. If no script name is passed on Linux hosts a default is used: /opt/patching/bin/pre_update.sh. If no script name is passed on Windows hosts a default is used: C:/ProgramData/patching/bin/pre_update.ps1." 12 | } 13 | } 14 | } 15 | 16 | -------------------------------------------------------------------------------- /tasks/pre_post_update_linux.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # when doing a `bolt task run` without a script argument the PT_script variable is not set 4 | # when doing a run_task('patching::pre_update', script => undef) PT_script is set to "null" 5 | if [[ -z "$PT_script" || "$PT_script" == "null" ]]; then 6 | # set our default script, if one wasn't passed in, based on the calling task 7 | if [[ "$PT__task" == "patching::pre_update" ]]; then 8 | PT_script='/opt/patching/bin/pre_update.sh' 9 | elif [[ "$PT__task" == "patching::post_update" ]]; then 10 | PT_script='/opt/patching/bin/post_update.sh' 11 | else 12 | echo "ERROR - 'script' wasn't specified and we were called with an unknown task: ${PT__task}" >&2 13 | exit 2 14 | fi 15 | fi 16 | 17 | if [[ -x "$PT_script" ]]; then 18 | if [[ -n "$PT__noop" && "$PT__noop" == "true" ]]; then 19 | echo "{\"message\": \"noop - would have executed script: ${PT_script}\"}" 20 | exit 0 21 | else 22 | echo "{\"script\": \"${PT_script}\"}" 23 | "$PT_script" 24 | exit $? 25 | fi 26 | else 27 | echo "WARNING: Script doesn't exist or isn't executable: ${PT_script}" 28 | exit 0 29 | fi 30 | -------------------------------------------------------------------------------- /tasks/pre_post_update_windows.json: -------------------------------------------------------------------------------- 1 | { 2 | "description": "Pre-post-update definition to make bolt not throw a warning. Best to use pre_update or post_update directly.", 3 | "supports_noop": true, 4 | "private": true, 5 | "implementations": [ 6 | {"name": "pre_post_update_windows.ps1", "requirements": ["powershell"]} 7 | ], 8 | "parameters": { 9 | "script": { 10 | "type": "Optional[String[1]]", 11 | "description": "Absolute path of the script to execute. If no script name is passed on Linux hosts a default is used: /opt/patching/bin/pre_update.sh. If no script name is passed on Windows hosts a default is used: C:/ProgramData/patching/bin/pre_update.ps1." 12 | } 13 | } 14 | } 15 | 16 | -------------------------------------------------------------------------------- /tasks/pre_post_update_windows.ps1: -------------------------------------------------------------------------------- 1 | [CmdletBinding()] 2 | Param( 3 | # Mandatory is set to false. If Set to $True then a dialog box appears to get the missing information 4 | # We will do a variable check later 5 | # https://blogs.technet.microsoft.com/heyscriptingguy/2011/05/22/use-powershell-to-make-mandatory-parameters/ 6 | [Parameter(Mandatory = $False)] 7 | [String]$script, 8 | [String]$_task 9 | ) 10 | 11 | Set-StrictMode -Version Latest 12 | $ErrorActionPreference = 'Stop' 13 | $ProgressPreference = 'SilentlyContinue' 14 | 15 | If (-not $script) { 16 | If ($_task -eq 'patching::pre_update') { 17 | $script = 'C:\ProgramData\patching\bin\pre_update.ps1' 18 | } elseif ($_task -eq 'patching::post_update') { 19 | $script = 'C:\ProgramData\patching\bin\post_update.ps1' 20 | } else { 21 | Write-Error "ERROR - 'script' wasn't specified and we were called with an unknown task: $_task" 22 | exit 2 23 | } 24 | } 25 | 26 | If ($script -and (Test-Path $script -PathType Leaf)) { 27 | & $script 28 | exit $LASTEXITCODE 29 | } else { 30 | Write-Output "WARNING: Script doesn't exist: $script" 31 | exit 0 32 | } 33 | -------------------------------------------------------------------------------- /tasks/pre_update.json: -------------------------------------------------------------------------------- 1 | { 2 | "description": "Run pre-update script on target host(s), only if it exists. If the script doesn't exist or isn't executable, then this task succeeds (this allows us to run thist task on all hosts, even if they don't have a pre-update script).", 3 | "supports_noop": true, 4 | "implementations": [ 5 | {"name": "pre_post_update_linux.sh", "requirements": ["shell"]}, 6 | {"name": "pre_post_update_windows.ps1", "requirements": ["powershell"]} 7 | ], 8 | "parameters": { 9 | "script": { 10 | "type": "Optional[String[1]]", 11 | "description": "Absolute path of the script to execute. If no script name is passed on Linux hosts a default is used: /opt/patching/bin/pre_update.sh. If no script name is passed on Windows hosts a default is used: C:/ProgramData/patching/bin/pre_update.ps1." 12 | } 13 | } 14 | } 15 | 16 | -------------------------------------------------------------------------------- /tasks/puppet_facts.json: -------------------------------------------------------------------------------- 1 | { 2 | "description": "Gather system facts using 'puppet facts'. Puppet agent MUST be installed for this to work.", 3 | "parameters": {} 4 | } 5 | -------------------------------------------------------------------------------- /tasks/puppet_facts.rb: -------------------------------------------------------------------------------- 1 | #!/opt/puppetlabs/puppet/bin/ruby 2 | # frozen_string_literal: true 3 | 4 | def puppet_executable 5 | if Gem.win_platform? 6 | require 'win32/registry' 7 | installed_dir = 8 | begin 9 | Win32::Registry::HKEY_LOCAL_MACHINE.open('SOFTWARE\Puppet Labs\Puppet') do |reg| 10 | # rubocop:disable Style/RescueModifier 11 | # Rescue missing key 12 | dir = reg['RememberedInstallDir64'] rescue '' 13 | # Both keys may exist, make sure the dir exists 14 | break dir if File.exist?(dir) 15 | 16 | # Rescue missing key 17 | reg['RememberedInstallDir'] rescue '' 18 | # rubocop:enable Style/RescueModifier 19 | end 20 | rescue Win32::Registry::Error 21 | # Rescue missing registry path 22 | '' 23 | end 24 | 25 | puppet = 26 | if installed_dir.empty? 27 | '' 28 | else 29 | File.join(installed_dir, 'bin', 'puppet') 30 | end 31 | else 32 | puppet = '/opt/puppetlabs/bin/puppet' 33 | end 34 | 35 | # Fall back to PATH lookup if puppet-agent isn't installed 36 | File.exist?(puppet) ? puppet : 'puppet' 37 | end 38 | 39 | # Delegate to puppet facts 40 | puppet_exe = puppet_executable 41 | if puppet_exe != 'puppet' 42 | # If using the standard install of Puppet, then clear out any custom GEM_PATH that may 43 | # be set on the system. Custom GEM_PATHs can cause all sorts of issues when puppet loads 44 | ENV.delete('GEM_PATH') 45 | end 46 | exec(puppet_exe, 'facts', '--render-as', 'json') 47 | -------------------------------------------------------------------------------- /tasks/reboot_required.json: -------------------------------------------------------------------------------- 1 | { 2 | "description": "Checks if a reboot is pending", 3 | "implementations": [ 4 | { 5 | "name": "reboot_required_linux.sh", 6 | "requirements": ["shell"], 7 | "files": [ 8 | "patching/files/bash/os_test.sh", 9 | "patching/files/bash/reboot_required_rh.sh", 10 | "patching/files/bash/reboot_required_deb.sh", 11 | "patching/files/bash/reboot_required_sles.sh" 12 | ] 13 | }, 14 | { 15 | "name": "reboot_required_windows.ps1", 16 | "requirements": ["powershell"], 17 | "files": ["patching/files/powershell/TaskUtils.ps1"] 18 | } 19 | ] 20 | } 21 | -------------------------------------------------------------------------------- /tasks/reboot_required_linux.json: -------------------------------------------------------------------------------- 1 | { 2 | "description": "Checks if a reboot is pending", 3 | "private": true, 4 | "implementations": [ 5 | { 6 | "name": "reboot_required_linux.sh", 7 | "requirements": ["shell"], 8 | "files": [ 9 | "patching/files/bash/os_test.sh", 10 | "patching/files/bash/reboot_required_rh.sh", 11 | "patching/files/bash/reboot_required_deb.sh" 12 | ] 13 | } 14 | ] 15 | } 16 | -------------------------------------------------------------------------------- /tasks/reboot_required_linux.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Run our OS tests, export OS_RELEASE 4 | source "${PT__installdir}/patching/files/bash/os_test.sh" 5 | 6 | # default 7 | export REBOOT_REQUIRED="false" 8 | 9 | case $OS_RELEASE in 10 | ################################################################################ 11 | RHEL | CENTOS | FEDORA | ROCKY | OL | ALMALINUX) 12 | # RedHat variant 13 | source "${PT__installdir}/patching/files/bash/reboot_required_rh.sh" 14 | ;; 15 | ################################################################################ 16 | DEBIAN | UBUNTU) 17 | # Debian variant 18 | source "${PT__installdir}/patching/files/bash/reboot_required_deb.sh" 19 | ;; 20 | ################################################################################ 21 | SLES) 22 | # SUSE variant 23 | source "${PT__installdir}/patching/files/bash/reboot_required_sles.sh" 24 | ;; 25 | ################################################################################ 26 | *) 27 | echo "ERROR - Unknown Operating System: ${OS_RELEASE}" 28 | exit 2 29 | ;; 30 | esac 31 | 32 | echo "{\"reboot_required\": ${REBOOT_REQUIRED} }" 33 | exit 0 34 | 35 | -------------------------------------------------------------------------------- /tasks/reboot_required_windows.json: -------------------------------------------------------------------------------- 1 | { 2 | "description": "Checks if a reboot is pending", 3 | "private": true, 4 | "implementations": [ 5 | { 6 | "name": "reboot_required_windows.ps1", 7 | "requirements": ["powershell"], 8 | "files": ["patching/files/powershell/TaskUtils.ps1"] 9 | } 10 | ] 11 | } 12 | -------------------------------------------------------------------------------- /tasks/reboot_required_windows.ps1: -------------------------------------------------------------------------------- 1 | [CmdletBinding()] 2 | Param( 3 | # Mandatory is set to false. If Set to $True then a dialog box appears to get the missing information 4 | # We will do a variable check later 5 | # https://blogs.technet.microsoft.com/heyscriptingguy/2011/05/22/use-powershell-to-make-mandatory-parameters/ 6 | [Parameter(Mandatory = $False)] 7 | [String]$_installdir 8 | ) 9 | 10 | Import-Module "$_installdir\patching\files\powershell\TaskUtils.ps1" 11 | 12 | Set-StrictMode -Version Latest 13 | $ErrorActionPreference = 'Stop' 14 | $ProgressPreference = 'SilentlyContinue' 15 | 16 | $script_block = { 17 | 18 | Set-StrictMode -Version Latest 19 | $ErrorActionPreference = 'Stop' 20 | $ProgressPreference = 'SilentlyContinue' 21 | 22 | function Test-PendingReboot 23 | { 24 | $systemInformation = New-Object -ComObject 'Microsoft.Update.SystemInfo' 25 | if ($systemInformation.RebootRequired) { 26 | return $true 27 | } 28 | 29 | # https://ilovepowershell.com/2015/09/10/how-to-check-if-a-server-needs-a-reboot/ 30 | # Adapted from https://gist.github.com/altrive/5329377 31 | # Based on 32 | if (Get-ChildItem "HKLM:\Software\Microsoft\Windows\CurrentVersion\Component Based Servicing\RebootPending" -EA Ignore) { return $true } 33 | if (Get-Item "HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\WindowsUpdate\Auto Update\RebootRequired" -EA Ignore) { return $true } 34 | if (Get-ItemProperty "HKLM:\SYSTEM\CurrentControlSet\Control\Session Manager" -Name PendingFileRenameOperations -EA Ignore) { return $true } 35 | try { 36 | $util = [wmiclass]"\\.\root\ccm\clientsdk:CCM_ClientUtilities" 37 | $status = $util.DetermineIfRebootPending() 38 | if(($status -ne $null) -and $status.RebootPending){ 39 | return $true 40 | } 41 | } catch {} 42 | 43 | return $false 44 | } 45 | 46 | if (Test-PendingReboot) { 47 | exit 1 48 | } 49 | exit 0 50 | } 51 | 52 | $reboot_needed = Invoke-CommandAsLocal -ScriptBlock $script_block 53 | 54 | # Passing back the whole $reboot_needed.CommandOutput results in extra data in the bolt return 55 | # So using exit code to get the neccessary code we need 56 | $return_value = @{"reboot_required" = $false} 57 | if ($reboot_needed.ExitCode -eq 1) { 58 | $return_value.reboot_required = $true 59 | } 60 | 61 | $return_value | ConvertTo-Json 62 | exit 0 63 | -------------------------------------------------------------------------------- /tasks/snapshot_kvm.json: -------------------------------------------------------------------------------- 1 | { 2 | "description": "Creates or deletes snapshots on a set of KVM/Libvirt hypervisors", 3 | "supports_noop": true, 4 | "implementations": [ 5 | { 6 | "name": "snapshot_kvm.sh", 7 | "requirements": ["shell"] 8 | } 9 | ], 10 | "parameters": { 11 | "vm_names": { 12 | "type": "Variant[String[1], Array[String[1]]]", 13 | "description": "List of VM names, in KVM/Libvirt these are called domains." 14 | }, 15 | "snapshot_name": { 16 | "type": "Optional[String[1]]", 17 | "default": "Bolt Snapshot", 18 | "description": "Name of the snapshot" 19 | }, 20 | "snapshot_description": { 21 | "type": "Optional[String[1]]", 22 | "description": "Description of the snapshot" 23 | }, 24 | "snapshot_memory": { 25 | "type": "Optional[Boolean]", 26 | "default": false, 27 | "description": "Snapshot the VMs memory" 28 | }, 29 | "snapshot_quiesce": { 30 | "type": "Optional[Boolean]", 31 | "default": false, 32 | "description": "Quiesce the filesystem during the snapshot, can be a PITA." 33 | }, 34 | "action": { 35 | "type": "Enum['create', 'delete']", 36 | "description": "Action to perform on the snapshots. 'create' will create new snapshots on the VMs. 'delete' will delete snapshots on the VMs." 37 | } 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /tasks/snapshot_kvm.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Convert JSON list into spaced list for Bash to be happy with 4 | IFS=$'\n' vm_names=($(echo "$PT_vm_names" | sed -E -e 's/(\["|"\])//g' | sed -e 's/","/'"\n"'/g')) 5 | echo "vm_names = ${vm_names[*]}" 6 | echo "snapshot_name = $PT_snapshot_name" 7 | echo "snapshot_description = $PT_snapshot_description" 8 | echo "snapshot_memory = $PT_snapshot_memory" 9 | echo "snapshot_quiesce = $PT_snapshot_quiesce" 10 | echo "action = $PT_action" 11 | 12 | extra_args="" 13 | if [[ "${PT_snapshot_memory}" == "false" ]]; then 14 | extra_args="$extra_args --disk-only" 15 | fi 16 | if [[ "${PT_snapshot_quiesce}" == "true" ]]; then 17 | extra_args="$extra_args --quiesce" 18 | fi 19 | 20 | for vm in "${vm_names[@]}"; do 21 | echo "snapshot vm=${vm} action=${PT_action}" 22 | if [[ "${PT_action}" == "create" ]]; then 23 | virsh snapshot-create-as --domain "${vm}" \ 24 | --name "${PT_snapshot_name}" \ 25 | --description "${PT_snapshot_description}" \ 26 | --atomic $extra_args 27 | if [[ $? -ne 0 ]]; then 28 | echo "ERROR: Failed to create snapshot for VM '${vm}'" 29 | exit 1 30 | fi 31 | elif [[ "${PT_action}" == "delete" ]]; then 32 | virsh snapshot-delete --domain "${vm}" \ 33 | --snapshotname "${PT_snapshot_name}" 34 | if [[ $? -ne 0 ]]; then 35 | echo "ERROR: Failed to delete snapshot for VM '${vm}'" 36 | exit 1 37 | fi 38 | else 39 | echo "ERROR: action='${PT_action}' is not supported. Valid actions are: 'create', 'delete'" 40 | exit 1 41 | fi 42 | done 43 | 44 | echo "Snapshot operation completed successfully." -------------------------------------------------------------------------------- /tasks/update.json: -------------------------------------------------------------------------------- 1 | { 2 | "description": "Execute OS updates on the target. For RedHat/CentOS this runs `yum update`. For Debian/Ubuntu runs `apt upgrade`. For Windows this runs Windows Update and `choco update`.", 3 | "implementations": [ 4 | { 5 | "name": "update_linux.sh", 6 | "requirements": ["shell"], 7 | "files": [ 8 | "patching/files/bash/os_test.sh", 9 | "patching/files/bash/update_rh.sh", 10 | "patching/files/bash/update_deb.sh", 11 | "patching/files/bash/update_sles.sh" 12 | ] 13 | }, 14 | { 15 | "name": "update_windows.ps1", 16 | "requirements": ["powershell"], 17 | "files": ["patching/files/powershell/TaskUtils.ps1"] 18 | } 19 | ], 20 | "parameters": { 21 | "provider": { 22 | "description": "What update provider to use. For Linux (RHEL, Debian, etc) this parameter is not used. For Windows the available values are: 'windows', 'chocolatey', 'all' (both 'windows' and 'chocolatey'). The default value for Windows is 'all'. If 'all' is passed and Chocolatey isn't installed then Chocolatey will simply be skipped. If 'chocolatey' is passed and Chocolatey isn't installed, then this will error.", 23 | "type": "Optional[String]" 24 | }, 25 | "names": { 26 | "description": "Name of the package(s) to update. If nothing is passed then all packages will be updated. Note: this currently only works for Linux, Windows support will be added in the future for both Windows Update and Chocolatey (TODO)", 27 | "type": "Optional[Array[String]]" 28 | }, 29 | "result_file": { 30 | "description": "Log file for patching results. This file will contain the JSON output that is returned from these tasks. The data is written to a log file so that you can collect it later by running patching::history. If no script name is passed on Linux hosts a default is used: /var/log/patching.json. If no script name is passed on Windows hosts a default is used: C:/ProgramData/patching/log/patching.json", 31 | "type": "Optional[String[1]]" 32 | }, 33 | "log_file": { 34 | "description": "Log file for OS specific output during the patching process. This file will contain OS specific (RHEL/CentOS = yum history, Debian/Ubuntu = /var/log/apt/history.log, Windows = ??) data that this task used to generate its output. If no script name is passed on Linux hosts a default is used: /var/log/patching.log. If no script name is passed on Windows hosts a default is used: C:/ProgramData/patching/log/patching.log", 35 | "type": "Optional[String[1]]" 36 | } 37 | } 38 | } 39 | 40 | -------------------------------------------------------------------------------- /tasks/update_history.json: -------------------------------------------------------------------------------- 1 | { 2 | "description": "Reads the update history from the JSON 'result_file'.", 3 | "implementations": [ 4 | { 5 | "name": "update_history_linux.sh", 6 | "requirements": ["shell"] 7 | }, 8 | { 9 | "name": "update_history_windows.ps1", 10 | "requirements": ["powershell"] 11 | } 12 | ], 13 | "parameters": { 14 | "result_file": { 15 | "description": "Log file for patching results. This file will contain the JSON output that is returned from these tasks. This is data that was written by patching::update. If no script name is passed on Linux hosts a default is used: /var/log/patching.json. If no script name is passed on Windows hosts a default is used: C:/ProgramData/patching/log/patching.json", 16 | "type": "Optional[String[1]]" 17 | } 18 | } 19 | } 20 | 21 | -------------------------------------------------------------------------------- /tasks/update_history_linux.json: -------------------------------------------------------------------------------- 1 | { 2 | "description": "Reads the update history from the JSON 'result_file'.", 3 | "private": true, 4 | "implementations": [ 5 | { 6 | "name": "update_history_linux.sh", 7 | "requirements": ["shell"] 8 | } 9 | ], 10 | "parameters": { 11 | "result_file": { 12 | "description": "Log file for patching results. This file will contain the JSON output that is returned from these tasks. This is data that was written by patching::update. If no script name is passed on Linux hosts a default is used: /var/log/patching.json. If no script name is passed on Windows hosts a default is used: C:/ProgramData/patching/log/patching.json", 13 | "type": "Optional[String[1]]" 14 | } 15 | } 16 | } 17 | 18 | -------------------------------------------------------------------------------- /tasks/update_history_linux.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export RESULT_FILE="$PT_result_file" 4 | if [[ -z "$RESULT_FILE" ]]; then 5 | export RESULT_FILE="/var/log/patching.json" 6 | fi 7 | if [[ ! -e "$RESULT_FILE" ]]; then 8 | echo '{"installed": [], "upgraded": []}' 9 | exit 0 10 | fi 11 | 12 | # The result_file is a file where each record is a JSON dictionary. 13 | # Look for the last "{" line and print out everything in the file after that 14 | # to get our previous transaction. 15 | LAST_LOG=$(tac "$RESULT_FILE" | sed '/^{$/q' | tac) 16 | echo "$LAST_LOG" 17 | exit 0 18 | -------------------------------------------------------------------------------- /tasks/update_history_windows.json: -------------------------------------------------------------------------------- 1 | { 2 | "description": "Reads the update history from the JSON 'result_file'.", 3 | "private": true, 4 | "implementations": [ 5 | { 6 | "name": "update_history_windows.ps1", 7 | "requirements": ["powershell"] 8 | } 9 | ], 10 | "parameters": { 11 | "result_file": { 12 | "description": "Log file for patching results. This file will contain the JSON output that is returned from these tasks. This is data that was written by patching::update. If no script name is passed on Linux hosts a default is used: /var/log/patching.json. If no script name is passed on Windows hosts a default is used: C:/ProgramData/patching/log/patching.json", 13 | "type": "Optional[String[1]]" 14 | } 15 | } 16 | } 17 | 18 | -------------------------------------------------------------------------------- /tasks/update_history_windows.ps1: -------------------------------------------------------------------------------- 1 | [CmdletBinding()] 2 | Param( 3 | # Mandatory is set to false. If Set to $True then a dialog box appears to get the missing information 4 | # We will do a variable check later 5 | # https://blogs.technet.microsoft.com/heyscriptingguy/2011/05/22/use-powershell-to-make-mandatory- parameters/ 6 | [Parameter(Mandatory = $False)] 7 | [String]$result_file, 8 | [String]$_installdir 9 | ) 10 | 11 | Set-StrictMode -Version Latest 12 | $ErrorActionPreference = 'Stop' 13 | $ProgressPreference = 'SilentlyContinue' 14 | 15 | if ($result_file -eq '') { 16 | $result_file = 'C:\ProgramData\patching\log\patching.json' 17 | } 18 | 19 | # if the result file does not exist, create it 20 | if (-not (Test-Path $result_file)) { 21 | New-Item -ItemType "file" -Path (Split-Path -Path $result_file) -Name (Split-Path -Path $result_file -Leaf) | Out-Null 22 | } 23 | 24 | $pattern_matches = Select-String -Path $result_file -Pattern "^{$" 25 | if ($pattern_matches) { 26 | # get the LAST matching line number of { , the start of a JSON document 27 | $last_line = $pattern_matches[-1].LineNumber 28 | 29 | # find the total number of lines in the file 30 | $measure = Get-Content -Path $result_file | Measure-Object 31 | # don't use .Lines, it doesn't account of empty lines at end of file 32 | $num_lines = $measure.Count 33 | 34 | # compute how many lines we need to read off the tail of the file 35 | # based on total lines - last line were found + 1 (includes the last_line match) 36 | $num_tail_lines = 1 + $num_lines - $last_line 37 | 38 | # read the last N lines from the file (starting at our match) 39 | $data = Get-Content -Path $result_file -Tail $num_tail_lines; 40 | 41 | Write-Output $data 42 | } 43 | -------------------------------------------------------------------------------- /tasks/update_linux.json: -------------------------------------------------------------------------------- 1 | { 2 | "description": "Execute OS updates on the target. For RedHat/CentOS this runs `yum update`. For Debian/Ubuntu runs `apt upgrade`. For SLES this runs `zypper up`. For Windows this runs Windows Update and `choco update`.", 3 | "private": true, 4 | "implementations": [ 5 | { 6 | "name": "update_linux.sh", 7 | "requirements": ["shell"], 8 | "files": [ 9 | "patching/files/bash/os_test.sh", 10 | "patching/files/bash/update_rh.sh", 11 | "patching/files/bash/update_deb.sh", 12 | "patching/files/bash/update_sles.sh" 13 | ] 14 | } 15 | ], 16 | "parameters": { 17 | "provider": { 18 | "description": "What update provider to use. For Linux (RHEL, Debian, SUSE, etc.) this parameter is not used. For Windows the available values are: 'windows', 'chocolatey', 'all' (both 'windows' and 'chocolatey'). The default value for Windows is 'all'. If 'all' is passed and Chocolatey isn't installed then Chocolatey will simply be skipped. If 'chocolatey' is passed and Chocolatey isn't installed, then this will error.", 19 | "type": "Optional[String[1]]" 20 | }, 21 | "names": { 22 | "description": "Name of the package(s) to update. If nothing is passed then all packages will be updated. Note: this currently only works for Linux, Windows support will be added in the future for both Windows Update and Chocolatey (TODO)", 23 | "type": "Optional[Array[String]]" 24 | }, 25 | "result_file": { 26 | "description": "Log file for patching results. This file will contain the JSON output that is returned from these tasks. The data is written to a log file so that you can collect it later by running patching::history. If no script name is passed on Linux hosts a default is used: /var/log/patching.json. If no script name is passed on Windows hosts a default is used: C:/ProgramData/patching/log/patching.json", 27 | "type": "Optional[String[1]]" 28 | }, 29 | "log_file": { 30 | "description": "Log file for OS specific output during the patching process. This file will contain OS specific (RHEL/CentOS = yum history, Debian/Ubuntu = /var/log/apt/history.log, SLES = /var/log/zypp/history, Windows = ??) data that this task used to generate its output. If no script name is passed on Linux hosts a default is used: /var/log/patching.log. If no script name is passed on Windows hosts a default is used: C:/ProgramData/patching/log/patching.log", 31 | "type": "Optional[String[1]]" 32 | } 33 | } 34 | } 35 | 36 | -------------------------------------------------------------------------------- /tasks/update_linux.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export PACKAGES="$PT_names" 4 | export RESULT_FILE="$PT_result_file" 5 | export LOG_FILE="$PT_log_file" 6 | if [[ -z "$LOG_FILE" ]]; then 7 | export LOG_FILE="/var/log/patching.log" 8 | fi 9 | if [[ -z "$RESULT_FILE" ]]; then 10 | export RESULT_FILE="/var/log/patching.json" 11 | fi 12 | if [[ ! -e "$LOG_FILE" ]]; then 13 | touch "$LOG_FILE" 14 | fi 15 | if [[ ! -e "$RESULT_FILE" ]]; then 16 | touch "$RESULT_FILE" 17 | fi 18 | 19 | # Run our OS tests, export OS_RELEASE 20 | source "${PT__installdir}/patching/files/bash/os_test.sh" 21 | 22 | # default 23 | STATUS=0 24 | 25 | case $OS_RELEASE in 26 | ################################################################################ 27 | RHEL | CENTOS | FEDORA | ROCKY | OL | ALMALINUX) 28 | # RedHat variant 29 | source "${PT__installdir}/patching/files/bash/update_rh.sh" 30 | STATUS=$? 31 | ;; 32 | ################################################################################ 33 | DEBIAN | UBUNTU) 34 | # Debian variant 35 | source "${PT__installdir}/patching/files/bash/update_deb.sh" 36 | STATUS=$? 37 | ;; 38 | ################################################################################ 39 | SLES) 40 | # SUSE variant 41 | source "${PT__installdir}/patching/files/bash/update_sles.sh" 42 | STATUS=$? 43 | ;; 44 | ################################################################################ 45 | *) 46 | echo "Unknown Operating System: ${OS_RELEASE}" 47 | STATUS=2 48 | ;; 49 | esac 50 | 51 | exit $STATUS 52 | -------------------------------------------------------------------------------- /tasks/update_windows.json: -------------------------------------------------------------------------------- 1 | { 2 | "description": "Execute OS updates on the target. For RedHat/CentOS this runs `yum update`. For Debian/Ubuntu runs `apt upgrade`. For Windows this runs Windows Update and `choco update`.", 3 | "private": true, 4 | "implementations": [ 5 | { 6 | "name": "update_windows.ps1", 7 | "requirements": ["powershell"], 8 | "files": ["patching/files/powershell/TaskUtils.ps1"] 9 | } 10 | ], 11 | "parameters": { 12 | "provider": { 13 | "description": "What update provider to use. For Linux (RHEL, Debian, etc) this parameter is not used. For Windows the available values are: 'windows', 'chocolatey', 'all' (both 'windows' and 'chocolatey'). The default value for Windows is 'all'. If 'all' is passed and Chocolatey isn't installed then Chocolatey will simply be skipped. If 'chocolatey' is passed and Chocolatey isn't installed, then this will error.", 14 | "type": "Optional[String[1]]" 15 | }, 16 | "names": { 17 | "description": "Name of the package(s) to update. If nothing is passed then all packages will be updated. Note: this currently only works for Linux, Windows support will be added in the future for both Windows Update and Chocolatey (TODO)", 18 | "type": "Optional[Array[String]]" 19 | }, 20 | "result_file": { 21 | "description": "Log file for patching results. This file will contain the JSON output that is returned from these tasks. The data is written to a log file so that you can collect it later by running patching::history. If no script name is passed on Linux hosts a default is used: /var/log/patching.json. If no script name is passed on Windows hosts a default is used: C:/ProgramData/patching/log/patching.json", 22 | "type": "Optional[String[1]]" 23 | }, 24 | "log_file": { 25 | "description": "Log file for OS specific output during the patching process. This file will contain OS specific (RHEL/CentOS = yum history, Debian/Ubuntu = /var/log/apt/history.log, Windows = ??) data that this task used to generate its output. If no script name is passed on Linux hosts a default is used: /var/log/patching.log. If no script name is passed on Windows hosts a default is used: C:/ProgramData/patching/log/patching.log", 26 | "type": "Optional[String[1]]" 27 | } 28 | } 29 | } 30 | 31 | --------------------------------------------------------------------------------