├── .gitattributes ├── .github └── workflows │ └── main.yml ├── .gitignore ├── .markdownlint.json ├── .pre-commit-config.yaml ├── .prettierignore ├── .vscode └── settings.json ├── Jenkinsfile ├── LICENSE ├── Makefile ├── PSScriptAnalyzerSettings.psd1 ├── README.md ├── build.ps1 ├── build.sh ├── docs ├── AWS.MD ├── AZURE.MD ├── GCP.MD └── instructions.md ├── environment ├── readme.md ├── template-aws.json ├── template-azure.json ├── template-gcp.json ├── write-environmentFile.ps1 └── write-environmentFile.sh ├── hcl2 ├── Amazon Linux2 │ ├── apache-cassandra │ │ ├── amazon-ebs.cassandra.pkr.hcl │ │ ├── build.pkr.hcl │ │ ├── install-cassandra.sh │ │ ├── values.auto.pkrvars.hcl │ │ └── variables.pkr.hcl │ └── dse-cassandra │ │ ├── amazon-ebs.cassandra-dse.pkr.hcl │ │ ├── build.pkr.hcl │ │ ├── values.auto.pkrvars.hcl │ │ └── variables.pkr.hcl ├── ubuntu │ ├── amazon-ebs.base1604.pkr.hcl │ ├── build.pkr.hcl │ ├── values.auto.pkrvars.hcl │ └── variables.pkr.hcl ├── ubuntu2210 │ ├── amazon-ebs.base2210.pkr.hcl │ ├── build.pkr.hcl │ ├── values.auto.pkrvars.hcl │ └── variables.pkr.hcl └── windows │ ├── amazon-ebs.Windows2019.pkr.hcl │ ├── bootstrap_win.txt │ ├── build.Windows2019.pkr.hcl │ ├── variables.pkr.hcl │ └── win.auto.pkrvars.hcl ├── iam_assume_role.ps1 ├── iam_assume_role.sh ├── packfiles ├── Amazon Linux2 │ ├── amazon-ebs.cassandra-dse.json │ └── amazon-ebs.cassandra.json ├── CentOS │ ├── amazon-ebs.centos.json │ └── azure-arm.centos.json ├── Debian │ ├── azure-arm.debian.json │ └── googlecompute.debian.json ├── Redhat │ ├── amazon-ebs.application-load-balancer-proxy.json │ ├── amazon-ebs.base.json │ ├── amazon-ebs.confluent-broker.json │ ├── amazon-ebs.confluent-connect.json │ ├── amazon-ebs.confluent-control-center.json │ ├── amazon-ebs.confluent-schema.json │ ├── amazon-ebs.confluent-zookeeper.json │ ├── amazon-ebs.jenkins-master.json │ ├── amazon-ebs.jmeter.json │ └── azure-arm.rhel.json ├── Ubuntu │ ├── amazon-ebs.base1604.json │ ├── amazon-ebs.base1804.json │ ├── amazon-ebs.cassandra.json │ ├── amazon-ebs.vault.json │ ├── azure-arm.base.json │ ├── azure-arm.server.json │ ├── azure-arm.ubuntu.json │ └── azure-arm.ubuntu_quickstart.json ├── Windows │ ├── README.MD │ ├── amazon-ebs.winserver2012r2.json │ ├── amazon-ebs.winserver2016.json │ ├── amazon-ebs.winserver2019.json │ ├── azure-arm.windows.json │ ├── azure-arm.windows_custom_imagee.json │ ├── azure-arm.windows_quickstart.json │ ├── bootstrap_win.txt │ └── setup_winrm.txt ├── fix.ps1 ├── freebsd │ └── azure-arm.freebsd.json ├── null │ └── null.bubble.json └── suse │ └── azure-arm.suse.json ├── provisioners ├── ansible │ ├── playbooks │ │ ├── aws-ssm.yml │ │ ├── cassandra.yml │ │ ├── cloudwatch-albp.yml │ │ ├── cloudwatch-confluent.yml │ │ ├── cloudwatch-metrics.yml │ │ ├── confluent-broker.yml │ │ ├── confluent-connect-distributed.yml │ │ ├── confluent-control-center.yml │ │ ├── confluent-private-ssl.yml │ │ ├── confluent-schema.yml │ │ ├── confluent-zookeeper.yml │ │ ├── confluent.yml │ │ ├── jenkins-master.yml │ │ ├── jmeter.yml │ │ ├── mongodb.yml │ │ ├── ssl_CA.yml │ │ └── zookeeper-node.yml │ └── roles │ │ ├── awslogs-agent │ │ ├── files │ │ │ └── awslog.conf │ │ └── tasks │ │ │ └── main.yml │ │ ├── cloudwatch-logs │ │ ├── README.md │ │ ├── defaults │ │ │ └── main.yml │ │ ├── handlers │ │ │ └── main.yml │ │ ├── meta │ │ │ └── main.yml │ │ ├── tasks │ │ │ ├── Debian.yml │ │ │ ├── DebianInstall.yml │ │ │ ├── RedHat.yml │ │ │ ├── conf.yml │ │ │ └── main.yml │ │ ├── templates │ │ │ └── etc │ │ │ │ ├── aws.conf.j2 │ │ │ │ ├── awslogs │ │ │ │ ├── awscli.conf.j2 │ │ │ │ ├── awslogs.conf.j2 │ │ │ │ └── awslogs.logging.conf.j2 │ │ │ │ └── logrotate.d │ │ │ │ ├── awslogs_debian.j2 │ │ │ │ └── awslogs_redhat.j2 │ │ └── vars │ │ │ └── main.yml │ │ ├── cloudwatch-metrics │ │ ├── defaults │ │ │ └── main.yml │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ ├── templates │ │ │ └── config.json │ │ └── vars │ │ │ └── main.yml │ │ ├── confluent.common │ │ └── tasks │ │ │ ├── install.yml │ │ │ └── main.yml │ │ ├── confluent.connect-distributed │ │ ├── defaults │ │ │ └── main.yml │ │ ├── files │ │ │ └── connect-distributed │ │ ├── handlers │ │ │ └── main.yml │ │ ├── meta │ │ │ └── main.yml │ │ ├── tasks │ │ │ ├── configure.yml │ │ │ ├── install.yml │ │ │ └── main.yml │ │ └── templates │ │ │ ├── connect-distributed.properties.j2 │ │ │ ├── connect-distributed_sasl_ssl.properties.j2 │ │ │ ├── connect-distributed_ssl.properties.j2 │ │ │ ├── connect-env.j2 │ │ │ ├── connect-log4j.properties.j2 │ │ │ ├── connect.service.j2 │ │ │ ├── connect_jaas.j2 │ │ │ └── krb5.conf.j2 │ │ ├── confluent.control-center │ │ ├── defaults │ │ │ └── main.yml │ │ ├── handlers │ │ │ └── main.yml │ │ ├── meta │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ ├── control-center.properties.j2 │ │ │ ├── control-center_sasl_ssl.properties.j2 │ │ │ └── control-center_ssl.properties.j2 │ │ ├── confluent.kafka-broker │ │ ├── defaults │ │ │ └── main.yml │ │ ├── files │ │ │ └── kafka-server-start │ │ ├── handlers │ │ │ └── main.yml │ │ ├── meta │ │ │ └── main.yml │ │ ├── tasks │ │ │ ├── configure.yml │ │ │ ├── install.yml │ │ │ └── main.yml │ │ └── templates │ │ │ ├── kafka-env.j2 │ │ │ ├── kafka-log4j.properties.j2 │ │ │ ├── kafka.service.j2 │ │ │ ├── kafka_server_jaas.j2 │ │ │ ├── krb5.conf.j2 │ │ │ ├── server.properties.j2 │ │ │ ├── server_sasl_ssl.properties.j2 │ │ │ └── server_ssl.properties.j2 │ │ ├── confluent.schema-registry │ │ ├── defaults │ │ │ └── main.yml │ │ ├── handlers │ │ │ └── main.yml │ │ ├── meta │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ ├── krb5.conf.j2 │ │ │ ├── schema-registry.properties.j2 │ │ │ ├── schema-registry_sasl_ssl.properties.j2 │ │ │ ├── schema-registry_ssl.properties.j2 │ │ │ └── schema_jaas.conf.j2 │ │ ├── confluent.ssl_CA │ │ ├── meta │ │ │ └── main.yml │ │ ├── scripts │ │ │ └── certs-create.sh │ │ └── tasks │ │ │ └── main.yml │ │ ├── confluent.zookeeper │ │ ├── defaults │ │ │ └── main.yml │ │ ├── files │ │ │ └── zookeeper-server-start │ │ ├── handlers │ │ │ └── main.yml │ │ ├── meta │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ ├── krb5.conf.j2 │ │ │ ├── zookeeper-log4j.properties.j2 │ │ │ ├── zookeeper.env.j2 │ │ │ ├── zookeeper.properties.j2 │ │ │ ├── zookeeper.service.j2 │ │ │ ├── zookeeper_jaas.conf.j2 │ │ │ └── zookeeper_sasl.properties.j2 │ │ ├── debugging_tools │ │ ├── defaults │ │ │ └── main.yml │ │ └── tasks │ │ │ ├── main.yml │ │ │ └── network.yml │ │ ├── devoinc.openjdk │ │ ├── LICENSE │ │ ├── defaults │ │ │ └── main.yml │ │ ├── meta │ │ │ ├── .galaxy_install_info │ │ │ └── main.yml │ │ ├── molecule │ │ │ └── default │ │ │ │ ├── Dockerfile.j2 │ │ │ │ ├── INSTALL.rst │ │ │ │ ├── molecule.yml │ │ │ │ ├── playbook.yml │ │ │ │ └── tests │ │ │ │ └── test_default.py │ │ ├── tasks │ │ │ └── main.yml │ │ ├── templates │ │ │ └── etc │ │ │ │ └── ansible │ │ │ │ └── facts.d │ │ │ │ └── java.fact.j2 │ │ └── vars │ │ │ ├── RedHat-java-11.yml │ │ │ └── RedHat-java-8.yml │ │ ├── dhoeric.aws-ssm │ │ ├── .travis.yml │ │ ├── README.md │ │ ├── defaults │ │ │ └── main.yml │ │ ├── files │ │ │ └── policy.xml │ │ ├── handlers │ │ │ └── main.yml │ │ ├── meta │ │ │ ├── .galaxy_install_info │ │ │ └── main.yml │ │ ├── tasks │ │ │ ├── main.yml │ │ │ └── register.yml │ │ ├── tests │ │ │ ├── inventory │ │ │ └── test.yml │ │ └── vars │ │ │ └── main.yml │ │ ├── jmeter │ │ └── tasks │ │ │ └── main.yml │ │ ├── openjdk │ │ └── tasks │ │ │ └── main.yml │ │ ├── oracle_sdk │ │ ├── defaults │ │ │ └── main.yml │ │ └── tasks │ │ │ ├── install.yml │ │ │ └── main.yml │ │ ├── ssmagent │ │ ├── README.md │ │ ├── defaults │ │ │ └── main.yml │ │ ├── files │ │ │ └── policy.xml │ │ ├── handlers │ │ │ └── main.yml │ │ ├── meta │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ ├── tests │ │ │ ├── inventory │ │ │ └── test.yml │ │ └── vars │ │ │ └── main.yml │ │ ├── stunnel │ │ ├── defaults │ │ │ └── main.yaml │ │ ├── files │ │ │ └── stunnel.service │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── vars │ │ │ └── main.yml │ │ └── sudoers │ │ ├── README.md │ │ ├── defaults │ │ └── main.yml │ │ ├── filter_plugins │ │ └── to_list.py │ │ ├── meta │ │ └── main.yml │ │ ├── tasks │ │ └── main.yml │ │ ├── templates │ │ ├── sudoer_spec.j2 │ │ ├── sudoers_nospec.j2 │ │ └── sudoers_plus_spec.j2 │ │ └── test │ │ ├── ansible-setup.sh │ │ └── integration │ │ └── default │ │ ├── default.yml │ │ └── serverspec │ │ ├── default_spec.rb │ │ └── spec_helper.rb └── scripts │ └── linux │ ├── install-cassandra-dse.sh │ ├── install-cassandra.sh │ ├── install_awslogs_agent.sh │ ├── open_confluent_firewall.sh │ ├── open_nginx_firewall.sh │ ├── redhat │ ├── install_ansible.sh │ ├── install_aws_ssm.sh │ ├── install_awscli.sh │ ├── install_docker.sh │ └── install_terraform.sh │ └── ubuntu │ ├── install_ansible.sh │ ├── install_aws_ssm.sh │ ├── install_aws_ssm2210.sh │ ├── install_azcli.sh │ ├── install_cassandra.sh │ ├── install_mastodon2210.sh │ ├── install_powershell.sh │ └── install_terraform.sh └── setup-packer.sh /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bump version 3 | on: 4 | push: 5 | branches: 6 | - master 7 | jobs: 8 | build: 9 | name: versioning 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@master 13 | - name: Bump version and push tag 14 | uses: anothrNick/github-tag-action@master 15 | env: 16 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 17 | DEFAULT_BUMP: patch 18 | WITH_V: "true" 19 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled files 2 | *.tfstate 3 | *.tfstate.backup 4 | 5 | # Module directory 6 | .terraform 7 | .idea 8 | *.iml 9 | .build-harness 10 | build-harness 11 | *.ini 12 | 13 | *~HEAD 14 | *backup 15 | # Cache objects 16 | packer_cache/ 17 | 18 | # For built boxes 19 | *.box 20 | personal* 21 | aws-230-300-2* 22 | *.orig 23 | Personal* 24 | -------------------------------------------------------------------------------- /.markdownlint.json: -------------------------------------------------------------------------------- 1 | { 2 | "MD013": false, 3 | "MD033": { 4 | "allowed_elements": ["br", "img"] 5 | }, 6 | "MD041": false 7 | } 8 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # yamllint disable rule:line-length 3 | repos: 4 | - repo: https://github.com/pre-commit/pre-commit-hooks 5 | rev: v4.3.0 6 | hooks: 7 | - id: trailing-whitespace 8 | - id: end-of-file-fixer 9 | - id: check-yaml 10 | - id: check-added-large-files 11 | - id: detect-aws-credentials 12 | - id: detect-private-key 13 | - repo: https://github.com/Lucas-C/pre-commit-hooks 14 | rev: v1.3.1 15 | hooks: 16 | - id: forbid-tabs 17 | exclude_types: [python, javascript, dtd, markdown, makefile, xml] 18 | exclude: binary|\.bin$ 19 | - repo: https://github.com/jameswoolfenden/pre-commit.git 20 | rev: v0.1.50 21 | hooks: 22 | - id: terraform-fmt 23 | - repo: https://github.com/detailyang/pre-commit-shell 24 | rev: 1.0.5 25 | hooks: 26 | - id: shell-lint 27 | - repo: https://github.com/igorshubovych/markdownlint-cli 28 | rev: v0.32.2 29 | hooks: 30 | - id: markdownlint 31 | - repo: https://github.com/prettier/prettier 32 | rev: 1.19.1 33 | hooks: 34 | - id: prettier 35 | exclude_types: [markdown] 36 | -------------------------------------------------------------------------------- /.prettierignore: -------------------------------------------------------------------------------- 1 | README.md 2 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "cSpell.words": [ 3 | "API's", 4 | "Cloudposse", 5 | "Debian", 6 | "Mgmt", 7 | "Playbook", 8 | "REPLACEME", 9 | "RHEL", 10 | "admin", 11 | "albp", 12 | "ansible", 13 | "artifacts", 14 | "awscli", 15 | "convertfrom", 16 | "deprovision", 17 | "diskio", 18 | "enablement", 19 | "gcloud", 20 | "googleapi", 21 | "googlecompute", 22 | "googleplus", 23 | "hostnames", 24 | "iowait", 25 | "jameswoolfenden", 26 | "jmeter", 27 | "killall", 28 | "nginx", 29 | "openjdk", 30 | "packfile", 31 | "packfiles", 32 | "powershelll", 33 | "provisioners", 34 | "reddit", 35 | "redhat", 36 | "runas", 37 | "setup", 38 | "spec", 39 | "specs", 40 | "stunnel", 41 | "sudoer", 42 | "sudoers", 43 | "syslog", 44 | "totalcpu", 45 | "urite", 46 | "versioning", 47 | "waagent", 48 | "xenial" 49 | ] 50 | } 51 | -------------------------------------------------------------------------------- /Jenkinsfile: -------------------------------------------------------------------------------- 1 | node { 2 | properties([disableConcurrentBuilds(), [$class: 'GithubProjectProperty', displayName: 'Build', projectUrlStr: 'https://github.com/jameswoolfenden/packer-by-example']]) 3 | 4 | parameters { 5 | string(name: 'AWS_REGION', defaultValue: 'eu-west-1', description: 'Specify the AWS Region') 6 | string(name: 'AWS_ROLE', description: 'Specify the AWS ROLE') 7 | string(name: 'AWS_ACCOUNT', description: 'Specify the AWS ACCOUNT') 8 | string(name: 'ENVIRONMENT', description: 'Specify the AWS ACCOUNT Environment to create the AMI from') 9 | string(name: 'PACKFILE', description: 'Specify the json packer file to build') 10 | } 11 | 12 | wrap([$class: 'AnsiColorBuildWrapper', colorMapName: 'xterm']) { 13 | stage('Clear workspace') { 14 | step([$class: 'WsCleanup']) 15 | } 16 | 17 | try { 18 | sh("eval \$(aws ecr get-login --region eu-west-1 --no-include-email | sed 's|https://||')") 19 | stage('Pull') { 20 | checkout scm 21 | } 22 | docker.image('jenkins').inside('-v /var/lib/jenkins/.ssh:/var/lib/jenkins/.ssh -v /etc/passwd:/etc/passwd') { 23 | stage('Build AMI') { 24 | sh '''cat <build.ps1 -packfile .\packfiles\redhat\base.json -environment .\environmment\jameswoolfenden-sandbox.json 10 | .NOTES 11 | Author: James Woolfenden 12 | Date: January 10, 2019 13 | #> 14 | 15 | param( 16 | [Parameter(Mandatory=$true)] 17 | [string]$packfile, 18 | [Parameter(Mandatory=$true)] 19 | [string]$environment) 20 | 21 | function Invoke-Packer 22 | { 23 | <# 24 | .Description 25 | Get-Function displays the name and syntax of all functions in the session. 26 | 27 | .Example 28 | Invoke-Packer -packfile .\packfiles\redhat\base.json -environment .\environmment\jameswoolfenden-sandbox.json 29 | #> 30 | 31 | param( 32 | [Parameter(Mandatory=$true)] 33 | [string]$packfile, 34 | [Parameter(Mandatory=$true)] 35 | [string]$environment) 36 | 37 | Write-Output "Checking $environment Found: $(test-path $environment)" 38 | Write-Output "Checking $packfile Found: $(test-path $packfile)" 39 | 40 | packer validate -var-file="$environment" $packfile 41 | packer build -var-file="$environment" $packfile 42 | } 43 | 44 | Invoke-Packer -packfile $packfile -environment $environment 45 | -------------------------------------------------------------------------------- /build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Wraps up Packer with support for environment files and Packer Debug flags 4 | # 5 | # 6 | # To use: 7 | # 8 | # $ build.sh -p .\packfiles\linux\.base.json -e .\environment\myaccount.json -d false 9 | # 10 | set -e 11 | 12 | usage(){ 13 | echo "Usage: $0 -p packfile -e environment -d false" 14 | exit 1 15 | } 16 | 17 | if [ "$#" -lt 4 ]; then 18 | usage; 19 | fi 20 | 21 | while getopts p:e:d: option 22 | do 23 | case "${option}" 24 | in 25 | p) packfile=${OPTARG};; 26 | e) environment=${OPTARG};; 27 | d) debug=${6:-false};; 28 | -h|--help) usage 29 | exit ;; 30 | --) shift 31 | break ;; 32 | *) usage ;; 33 | esac 34 | done 35 | 36 | # example 37 | # .\build.sh -environment ./environment/sandbox.json -packfile ./packfiles/consul-vault 38 | echo "packfile: $packfile" 39 | echo "environment: $environment" 40 | 41 | packer validate -var-file="${environment}" "${packfile}" 42 | if [ "$debug" = "true" ]; then 43 | packer build -debug -on-error=ask -var-file="${environment}" "${packfile}" 44 | else 45 | packer build -var-file="${environment}" "${packfile}" 46 | fi 47 | -------------------------------------------------------------------------------- /docs/AWS.MD: -------------------------------------------------------------------------------- 1 | # AWS AMI 2 | 3 | These examples target the customisation of existing AWS AMI's, usually from AWS or from trusted sources such as Canonicals AWS account. You can find the details of an AMI from the command line. 4 | 5 | ## AWS Set-up 6 | 7 | You will need to set-up and configured your AWS with the AWS **configure** command: 8 | 9 | ```cli 10 | $aws configure 11 | AWS Access Key ID [********************]: 12 | AWS Secret Access Key [*******************]: 13 | Default region name [eu-west-1]: 14 | Default output format [json]: 15 | ``` 16 | 17 | Packer respects the AWS Authentication chain, so it will check your default profile and if not set it look for environment variables. 18 | These are Environment variables it looks for: 19 | 20 | ```cli 21 | AWS_ACCESS_KEY_ID 22 | AWS_SECRET_ACCESS_KEY 23 | AWS_SESSION_TOKEN 24 | ``` 25 | 26 | A simple test of your auth set-up is see what S3 buckets you can list: 27 | 28 | ```cli 29 | aws s3 ls 30 | ``` 31 | 32 | With the Authentication confirmed it's time to run Packer, I have to chosen to use a wrapper script and you can call it from anywhere: 33 | 34 | ```bash 35 | ./build.sh -packfile ./packfiles/RedHat/amazon-ebs.base.json -environment personal.json 36 | ``` 37 | 38 | Additionally in the root you will find the script **iam_assume_role**, this allows you to assume an aws role to build your AMI's. 39 | -------------------------------------------------------------------------------- /docs/AZURE.MD: -------------------------------------------------------------------------------- 1 | # Azure Images 2 | 3 | ## Azure Set-up 4 | 5 | (Some of the packerfiles are modified examples from the Packer Source code repository). 6 | 7 | ## Pre-requisites 8 | 9 | Packer instructions here 10 | Needs an Azure resource group and storage container to store the completed VHDs in. If you have multiple AZ subscriptions you also need to set the current Sub. Using the Az cli: 11 | 12 | ```cli 13 | az account set --subscription xxxxxxxxxx-xxxx-xxxx-xxxxxxxxx 14 | ``` 15 | 16 | ```cli 17 | $LOCATION="uksouth" 18 | $GROUPNAME="packer" 19 | 20 | az group create --name $GROUPNAME --location $LOCATION 21 | az storage account create --name packfiles --resource-group $GROUPNAME --location $LOCATION --sku Standard_LRS --kind Storage 22 | ``` 23 | 24 | Auth comes from client_id and secret passed in as env variables, you will need to create a user to get the Auth. 25 | 26 | ```cli 27 | az ad sp create-for-rbac -n "Packer" --role contributor \ 28 | --scopes /subscriptions/{SubID} 29 | ``` 30 | 31 | Then add these to your shell: 32 | 33 | ```bash 34 | export ARM_CLIENT_SECRET="xxxxxx-xxxx-xxxx-xxxx-xxxxxxx" 35 | export ARM_CLIENT_ID="xxxxxx-xxxx-xxxx-xxxx-xxxxxxx" 36 | export ARM_SUBSCRIPTION_ID="xxxxxx-xxxx-xxxx-xxxx-xxxxxxx" 37 | export ARM_TENANT_ID="xxxxxx-xxxx-xxxx-xxxx-xxxxxxx" 38 | ``` 39 | 40 | or 41 | 42 | ```powershell 43 | $env:ARM_CLIENT_SECRET="xxxxxx-xxxx-xxxx-xxxx-xxxxxxx" 44 | $env:ARM_CLIENT_ID="xxxxxx-xxxx-xxxx-xxxx-xxxxxxx" 45 | $env:ARM_SUBSCRIPTION_ID="xxxxxx-xxxx-xxxx-xxxx-xxxxxxx" 46 | $env:ARM_TENANT_ID="xxxxxx-xxxx-xxxx-xxxx-xxxxxxx" 47 | ``` 48 | 49 | The CentOS example additionally uses an ssh passwords and this needs to be preset. 50 | 51 | ```bash 52 | export ARM_SSH_PASS="cent0$73" 53 | ``` 54 | 55 | ```powershell 56 | $env:ARM_SSH_PASS="cent0$73" 57 | ``` 58 | 59 | You can run 60 | 61 | ```bash 62 | ./build.sh -packfile ./packfiles/freeebsd/freebsd-azure.json -environment ./environment/template-azure.json 63 | ``` 64 | 65 | or 66 | 67 | ```powershell 68 | .\build.ps1 -packfile .\packfiles\freebsd\freebsd-azure.json -environment .\environment\template-azure.json 69 | ``` 70 | 71 | ## Where's my image 72 | 73 | 74 | -------------------------------------------------------------------------------- /docs/instructions.md: -------------------------------------------------------------------------------- 1 | This Repository contains a number of examples for using Packer, with different OS and CloudPlatforms. 2 | 3 | Instructions for each Cloud provider are here: 4 | 5 | - [AWS](docs/AWS.MD) 6 | - [GCP](docs/GCP.MD) 7 | - [AZURE](docs/AZURE.MD) 8 | 9 | There are several different OS examples, Windows and Linux, and different versions of each. 10 | The "packfiles" have examples of using basic features of scripts or Ansible to configure your images, aw well versioning the AMI's. 11 | -------------------------------------------------------------------------------- /environment/readme.md: -------------------------------------------------------------------------------- 1 | # Environment Folder 2 | 3 | The property files here are to populate the packer variables for environment. 4 | The file template.json is an example file. 5 | 6 | ## To populate the environment files 7 | 8 | ## For AWS 9 | 10 | `aws ec2 describe-vpcs` 11 | 12 | `aws ec2 describe-subnets` 13 | 14 | `aws sts get-caller-identity --output text --query 'Account'` 15 | 16 | How do i find the AMI's on which to base my work: 17 | () 18 | 19 | In Powershell 20 | 21 | ```powershell 22 | aws ec2 describe-images --filter Name="name",Values="CentOS7*"|convertfrom-json 23 | aws ec2 describe-images --filter Name="ProductCode",Values="aw0evgkw8e5c1q413zgy5pjce"|convertfrom-json 24 | ``` 25 | 26 | In Bash 27 | 28 | ```Bash 29 | aws ec2 describe-images \ 30 | --owners 679593333241 \ 31 | --filters \ 32 | Name=name,Values='CentOS Linux 7 x86_64 HVM EBS\*' \ 33 | Name=architecture,Values=x86_64 \ 34 | Name=root-device-type,Values=ebs \ 35 | --query 'sort_by(Images, &Name)[-1].ImageId' \ 36 | --output text 37 | ``` 38 | 39 | ## Automated Method 40 | 41 | To run to make environment file. 42 | 43 | In Powershell 44 | 45 | ```Powershell 46 | .\write-environmentFile.ps1 -BaseName template 47 | ``` 48 | 49 | In Bash 50 | 51 | ```Bash 52 | .\write-environmentFile.sh template 53 | ``` 54 | -------------------------------------------------------------------------------- /environment/template-aws.json: -------------------------------------------------------------------------------- 1 | { 2 | "instance_type": "t2.micro", 3 | "vpc_id": "", 4 | "subnet_id": "", 5 | "ami_users": "", 6 | "aws_region": "" 7 | } 8 | -------------------------------------------------------------------------------- /environment/template-azure.json: -------------------------------------------------------------------------------- 1 | { 2 | "location": "uksouth", 3 | "vm_size": "Standard_A2", 4 | "resourcegroup": "packer", 5 | "storageaccount": "packerfiles" 6 | } 7 | -------------------------------------------------------------------------------- /environment/template-gcp.json: -------------------------------------------------------------------------------- 1 | { 2 | "project_id": "examplea" 3 | } 4 | -------------------------------------------------------------------------------- /environment/write-environmentFile.ps1: -------------------------------------------------------------------------------- 1 | <# 2 | .SYNOPSIS 3 | Creates an environment file for use with Packer packfiles 4 | .DESCRIPTION 5 | The Packer files in the packfiles folder expect certain variables to come from and environment file. 6 | This script is designed to create these for you. 7 | .PARAMETER Path 8 | The path to the . 9 | .EXAMPLE 10 | C:\PS>get-environment.ps1 11 | This script runs without prompt or parameters 12 | .NOTES 13 | Author: James Woolfenden 14 | Date: January 10, 2019 15 | #> 16 | 17 | 18 | Param( 19 | [string]$basetemplate = "template-aws", 20 | [ValidateSet('AWS','Azure','GCP')] 21 | [string]$provider="AWS") 22 | 23 | function New-AWSEnvironment { 24 | param([string]$basetemplate = "template-aws") 25 | 26 | #locally defined and used 27 | [string]$accountid|Out-Null 28 | [string]$templatename|Out-Null 29 | [string]$vpc|Out-Null 30 | [string]$subnet|Out-Null 31 | 32 | $vpc=aws ec2 describe-vpcs --output text --query 'Vpcs[0].{VpcId:VpcId}' 33 | $subnet=aws ec2 describe-subnets --output text --query 'Subnets[0].{SubnetId:SubnetId}' 34 | $accountid=aws sts get-caller-identity --output text --query 'Account' 35 | 36 | Write-Output "$(get-date) - Talking to AWS account $accountid" 37 | $templatename=aws iam list-account-aliases --output text --query 'AccountAliases' 38 | 39 | Write-Output "$(get-date) - imported template .\$basetemplate.json" 40 | $envtemplate=get-content .\$basetemplate.json|convertfrom-json 41 | $envtemplate.ami_users=$accountid 42 | $envtemplate.vpc_id=$vpc 43 | $envtemplate.subnet_id=$subnet 44 | $envtemplate.aws_region=aws configure get region 45 | $envtemplate|ConvertTo-Json| Set-Content -Path ".\$templatename.json" 46 | 47 | Write-Output "$(get-date) - written .\$templatename.json" 48 | } 49 | 50 | function New-GCPEnvironment {} 51 | function New-AzureEnvironment {} 52 | 53 | switch ($provider) { 54 | AWS { 55 | New-AWSEnvironment -basetemplate $basetemplate 56 | } 57 | GCP { 58 | New-GCPEnvironment 59 | } 60 | Azure { 61 | New-AzureEnvironment 62 | } 63 | Default {} 64 | } 65 | -------------------------------------------------------------------------------- /environment/write-environmentFile.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | echo "TODO" 3 | -------------------------------------------------------------------------------- /hcl2/Amazon Linux2/apache-cassandra/amazon-ebs.cassandra.pkr.hcl: -------------------------------------------------------------------------------- 1 | source "amazon-ebs" "cassandra" { 2 | ami_description= "amazon cassandra AMI" 3 | ami_name = "cassandra-BASE-v${var.BUILD_NUMBER}-{{timestamp}}-AMI" 4 | ami_users = var.ami_users 5 | ami_virtualization_type = "hvm" 6 | associate_public_ip_address= var.associate_public_ip_address 7 | instance_type = var.instance_type 8 | region = var.region 9 | run_tags { 10 | Name= "amazon-cassandra-packer" 11 | Application= "cassandra" 12 | } 13 | spot_price = "auto" 14 | ssh_username= "ec2-user" 15 | ssh_interface= var.ssh_interface 16 | 17 | subnet_id=var.subnet_id 18 | 19 | source_ami_filter { 20 | filters { 21 | virtualization-type= "hvm" 22 | name= "amzn2-ami-hvm-*-x86_64-ebs" 23 | root-device-type= "ebs" 24 | } 25 | most_recent= true 26 | owners= ["amazon"] 27 | } 28 | 29 | temporary_key_pair_name= "amazon-packer-{{timestamp}}" 30 | 31 | vpc_id=var.vpc_id 32 | 33 | tags { 34 | OS_Version = "Amazon 2 linux" 35 | Version = var.BUILD_NUMBER 36 | Application = "Cassandra Image" 37 | Runner = "EC2" 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /hcl2/Amazon Linux2/apache-cassandra/build.pkr.hcl: -------------------------------------------------------------------------------- 1 | build { 2 | sources=[ 3 | "source.amazon-ebs.cassandra" 4 | ] 5 | 6 | provisioner "shell" { 7 | scripts=[ 8 | "{{ template_dir }}install-cassandra.sh" 9 | ] 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /hcl2/Amazon Linux2/apache-cassandra/install-cassandra.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | sudo yum install java-1.8.0-openjdk -y 3 | 4 | cat < 2 | # Set administrator password 3 | net user Administrator SuperS3cr3t!!!! 4 | wmic useraccount where "name='Administrator'" set PasswordExpires=FALSE 5 | 6 | # First, make sure WinRM can't be connected to 7 | netsh advfirewall firewall set rule name="Windows Remote Management (HTTP-In)" new enable=yes action=block 8 | 9 | # Delete any existing WinRM listeners 10 | winrm delete winrm/config/listener?Address=*+Transport=HTTP 2>$Null 11 | winrm delete winrm/config/listener?Address=*+Transport=HTTPS 2>$Null 12 | 13 | # Disable group policies which block basic authentication and unencrypted login 14 | 15 | Set-ItemProperty -Path HKLM:\Software\Policies\Microsoft\Windows\WinRM\Client -Name AllowBasic -Value 1 16 | Set-ItemProperty -Path HKLM:\Software\Policies\Microsoft\Windows\WinRM\Client -Name AllowUnencryptedTraffic -Value 1 17 | Set-ItemProperty -Path HKLM:\Software\Policies\Microsoft\Windows\WinRM\Service -Name AllowBasic -Value 1 18 | Set-ItemProperty -Path HKLM:\Software\Policies\Microsoft\Windows\WinRM\Service -Name AllowUnencryptedTraffic -Value 1 19 | 20 | 21 | # Create a new WinRM listener and configure 22 | winrm create winrm/config/listener?Address=*+Transport=HTTP 23 | winrm set winrm/config/winrs '@{MaxMemoryPerShellMB="0"}' 24 | winrm set winrm/config '@{MaxTimeoutms="7200000"}' 25 | winrm set winrm/config/service '@{AllowUnencrypted="true"}' 26 | winrm set winrm/config/service '@{MaxConcurrentOperationsPerUser="12000"}' 27 | winrm set winrm/config/service/auth '@{Basic="true"}' 28 | winrm set winrm/config/client/auth '@{Basic="true"}' 29 | 30 | # Configure UAC to allow privilege elevation in remote shells 31 | $Key = 'HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\Policies\System' 32 | $Setting = 'LocalAccountTokenFilterPolicy' 33 | Set-ItemProperty -Path $Key -Name $Setting -Value 1 -Force 34 | 35 | # Configure and restart the WinRM Service; Enable the required firewall exception 36 | Stop-Service -Name WinRM 37 | Set-Service -Name WinRM -StartupType Automatic 38 | netsh advfirewall firewall set rule name="Windows Remote Management (HTTP-In)" new action=allow localip=any remoteip=any 39 | Start-Service -Name WinRM 40 | 41 | -------------------------------------------------------------------------------- /hcl2/windows/build.Windows2019.pkr.hcl: -------------------------------------------------------------------------------- 1 | build { 2 | sources = [ 3 | "source.amazon-ebs.Windows2019" 4 | ] 5 | provisioner "powershell" { 6 | inline = ["iex ((new-object net.webclient).DownloadString('https://chocolatey.org/install.ps1'))|out-null" 7 | , "choco install javaruntime -y -force"] 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /hcl2/windows/variables.pkr.hcl: -------------------------------------------------------------------------------- 1 | variable "vpc_id" { 2 | description = "The VPC you're building AMI's in" 3 | type = string 4 | } 5 | 6 | variable "region" { 7 | description = "The AWS region you're using" 8 | type = string 9 | } 10 | 11 | variable "subnet_id" { 12 | description = "The Subnet to build the AMI inm that's ssh'able" 13 | type = string 14 | } 15 | -------------------------------------------------------------------------------- /hcl2/windows/win.auto.pkrvars.hcl: -------------------------------------------------------------------------------- 1 | vpc_id = "vpc-0e2e925de622375b5" 2 | region = "eu-west-2" 3 | subnet_id = "subnet-05f8f3c120238ca8d" 4 | -------------------------------------------------------------------------------- /iam_assume_role.ps1: -------------------------------------------------------------------------------- 1 | <# 2 | .SYNOPSIS 3 | A wrapper script for Assuming different AWS roles, useful for roles and specifically cross account roles. 4 | .DESCRIPTION 5 | Wraps up support for AWS role support 6 | .PARAMETER Path 7 | The path to the . 8 | .EXAMPLE 9 | C:\PS>.\code\bumsonseats\iam_assume_role.ps1 -AccountNo 12121212121 -Role MarcusDogsbody 10 | .NOTES 11 | Author: James Woolfenden 12 | Date: January 10, 2019 13 | #> 14 | 15 | Param( 16 | [Parameter(Mandatory=$true)] 17 | [string]$AccountNo, 18 | [Parameter(Mandatory=$true)] 19 | [string]$Role, 20 | [string]$SESSION_NAME = "PACKER" 21 | ) 22 | 23 | 24 | function iam_assume_role 25 | { 26 | <# 27 | .Description 28 | iam_assume_role allows you to run as a different role in a different account 29 | 30 | .Example 31 | iam_assume_role -AccountNo $AccountNo -Role SuperAdmin 32 | #> 33 | Param( 34 | [Parameter(Mandatory=$true)] 35 | [string]$AccountNo, 36 | [Parameter(Mandatory=$true)] 37 | [string]$Role 38 | ) 39 | 40 | Write-Output "AccountNo: $AccountNo" 41 | Write-Output "Role : $Role" 42 | 43 | $ARN="arn:aws:iam::$($AccountNo):role/$Role" 44 | Write-Output "ARN : $ARN" 45 | Write-Output "aws sts assume-role --role-arn $ARN --role-session-name $SESSION_NAME --duration-seconds 3600" 46 | $Creds=aws sts assume-role --role-arn $ARN --role-session-name $SESSION_NAME --duration-seconds 3600 |convertfrom-json 47 | 48 | [Environment]::SetEnvironmentVariable("AWS_DEFAULT_REGION","eu-west-2") 49 | [Environment]::SetEnvironmentVariable("AWS_ACCESS_KEY_ID",$creds.Credentials.AccessKeyId) 50 | [Environment]::SetEnvironmentVariable("AWS_ACCESS_KEY",$creds.Credentials.AccessKeyId) 51 | [Environment]::SetEnvironmentVariable("AWS_SECRET_ACCESS_KEY",$creds.Credentials.SecretAccessKey) 52 | [Environment]::SetEnvironmentVariable("AWS_SECRET_KEY", $creds.Credentials.SecretAccessKey) 53 | [Environment]::SetEnvironmentVariable("AWS_SESSION_TOKEN",$creds.Credentials.SessionToken) 54 | } 55 | 56 | iam_assume_role -AccountNo $AccountNo -Role $Role 57 | -------------------------------------------------------------------------------- /iam_assume_role.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Assume the given role, and print out a set of environment variables 4 | # for use with aws cli. 5 | # 6 | # To use: 7 | # 8 | # $ eval $(./iam-assume-role.sh) 9 | # 10 | 11 | set -e 12 | 13 | # Clear out existing AWS session environment, or the awscli call will fail 14 | unset AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY AWS_SESSION_TOKEN AWS_SECURITY_TOKEN 15 | 16 | # Old ec2 tools use other env vars 17 | unset AWS_ACCESS_KEY AWS_SECRET_KEY AWS_DELEGATION_TOKEN 18 | 19 | ROLE="${1:-SecurityMonkey}" 20 | ACCOUNT="${2:-123456789}" 21 | DURATION="${3:-900}" 22 | NAME="${4:-$LOGNAME@$(hostname -s)}" 23 | ARN="arn:aws:iam::${ACCOUNT}:role/$ROLE" 24 | ECHO "ARN: $ARN" 25 | 26 | KST=($(aws sts assume-role --role-arn "$ARN" \ 27 | --role-session-name "$NAME" \ 28 | --duration-seconds "$DURATION" \ 29 | --query "[Credentials.AccessKeyId,Credentials.SecretAccessKey,Credentials.SessionToken]" \ 30 | --output text)) 31 | 32 | echo "export AWS_DEFAULT_REGION=\"eu-west-2\"" 33 | echo "export AWS_ACCESS_KEY_ID=\"${KST[0]}\"" 34 | echo "export AWS_ACCESS_KEY=\"${KST[0]}\"" 35 | echo "export AWS_SECRET_ACCESS_KEY=\"${KST[1]}\"" 36 | echo "export AWS_SECRET_KEY=\"${KST[1]}\"" 37 | echo "export AWS_SESSION_TOKEN='${KST[2]}'" 38 | -------------------------------------------------------------------------------- /packfiles/Amazon Linux2/amazon-ebs.cassandra-dse.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "aws_access_key": "{{env `AWS_ACCESS_KEY_ID`}}", 4 | "aws_secret_key": "{{env `AWS_SECRET_ACCESS_KEY`}}", 5 | "aws_session_token": "{{env `AWS_SESSION_TOKEN`}}", 6 | "aws_region": "{{ env `AWS_REGION` }}", 7 | "build_number": "{{ env `BUILD_NUMBER` }}", 8 | "instance_type": "t2.micro" 9 | }, 10 | "builders": [ 11 | { 12 | "type": "amazon-ebs", 13 | "access_key": "{{ user `aws_access_key` }}", 14 | "secret_key": "{{ user `aws_secret_key` }}", 15 | "token": "{{ user `aws_session_token` }}", 16 | "region": "{{ user `aws_region` }}", 17 | "source_ami_filter": { 18 | "filters": { 19 | "virtualization-type": "hvm", 20 | "name": "amzn2-ami-hvm-*-x86_64-ebs", 21 | "root-device-type": "ebs" 22 | }, 23 | "owners": ["amazon"], 24 | "most_recent": true 25 | }, 26 | "ami_description": "amazon cassandra DSE AMI", 27 | "ami_name": "cassandra-DSE-BASE-v{{user `build_number`}}-{{timestamp}}-AMI", 28 | "ami_virtualization_type": "hvm", 29 | "associate_public_ip_address": true, 30 | "instance_type": "{{ user `instance_type` }}", 31 | "spot_price": "auto", 32 | "ssh_username": "ec2-user", 33 | "ssh_interface": "public_ip", 34 | "subnet_id": "{{user `subnet_id`}}", 35 | "temporary_key_pair_name": "amazon-packer-{{timestamp}}", 36 | "vpc_id": "{{user `vpc_id`}}", 37 | "run_tags": { 38 | "Name": "amazon-cassandra-dse-packer", 39 | "Application": "cassandra" 40 | }, 41 | "tags": { 42 | "OS_Version": "Amazon 2 linux", 43 | "Version": "{{user `build_number`}}", 44 | "Application": "Cassandra DSE Image", 45 | "Runner": "EC2" 46 | } 47 | } 48 | ], 49 | "provisioners": [ 50 | { 51 | "type": "shell", 52 | "script": "{{template_dir}}/../../provisioners/scripts/linux/install-cassandra-dse.sh" 53 | } 54 | ] 55 | } 56 | -------------------------------------------------------------------------------- /packfiles/Amazon Linux2/amazon-ebs.cassandra.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "aws_access_key": "{{env `AWS_ACCESS_KEY_ID`}}", 4 | "aws_secret_key": "{{env `AWS_SECRET_ACCESS_KEY`}}", 5 | "aws_session_token": "{{env `AWS_SESSION_TOKEN`}}", 6 | "aws_region": "{{ env `AWS_REGION` }}", 7 | "build_number": "{{ env `BUILD_NUMBER` }}", 8 | "instance_type": "t2.micro" 9 | }, 10 | "builders": [ 11 | { 12 | "type": "amazon-ebs", 13 | "access_key": "{{ user `aws_access_key` }}", 14 | "secret_key": "{{ user `aws_secret_key` }}", 15 | "token": "{{ user `aws_session_token` }}", 16 | "region": "{{ user `aws_region` }}", 17 | "source_ami_filter": { 18 | "filters": { 19 | "virtualization-type": "hvm", 20 | "name": "amzn2-ami-hvm-*-x86_64-ebs", 21 | "root-device-type": "ebs" 22 | }, 23 | "owners": ["amazon"], 24 | "most_recent": true 25 | }, 26 | "ami_description": "amazon cassandra AMI", 27 | "ami_name": "cassandra-BASE-v{{user `build_number`}}-{{timestamp}}-AMI", 28 | "ami_virtualization_type": "hvm", 29 | "associate_public_ip_address": true, 30 | "instance_type": "{{ user `instance_type` }}", 31 | "spot_price": "auto", 32 | "ssh_username": "ec2-user", 33 | "ssh_interface": "public_ip", 34 | "subnet_id": "{{user `subnet_id`}}", 35 | "temporary_key_pair_name": "amazon-packer-{{timestamp}}", 36 | "vpc_id": "{{user `vpc_id`}}", 37 | "run_tags": { 38 | "Name": "amazon-cassandra-packer", 39 | "Application": "cassandra" 40 | }, 41 | "tags": { 42 | "OS_Version": "Amazon 2 linux", 43 | "Version": "{{user `build_number`}}", 44 | "Application": "Cassandra Image", 45 | "Runner": "EC2" 46 | } 47 | } 48 | ], 49 | "provisioners": [ 50 | { 51 | "type": "shell", 52 | "script": "{{template_dir}}/../../provisioners/scripts/linux/install-cassandra.sh" 53 | } 54 | ] 55 | } 56 | -------------------------------------------------------------------------------- /packfiles/CentOS/amazon-ebs.centos.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "aws_access_key": "{{env `AWS_ACCESS_KEY_ID`}}", 4 | "aws_secret_key": "{{env `AWS_SECRET_ACCESS_KEY`}}", 5 | "aws_session_token": "{{env `AWS_SESSION_TOKEN`}}", 6 | "aws_region": "{{env `AWS_REGION`}}", 7 | "build_number": "{{env `BUILD_NUMBER`}}", 8 | "instance_type": "t3.micro" 9 | }, 10 | "builders": [ 11 | { 12 | "type": "amazon-ebs", 13 | "access_key": "{{ user `aws_access_key` }}", 14 | "secret_key": "{{ user `aws_secret_key` }}", 15 | "token": "{{ user `aws_session_token` }}", 16 | "region": "{{ user `aws_region` }}", 17 | "source_ami_filter": { 18 | "filters": { 19 | "virtualization-type": "hvm", 20 | "name": "CentOS Linux 7 x86_64 HVM EBS*", 21 | "root-device-type": "ebs" 22 | }, 23 | "owners": ["679593333241"], 24 | "most_recent": true 25 | }, 26 | "ami_description": "CentOS base AMI", 27 | "ami_name": "CentOS-BASE-v{{user `build_number`}}-{{timestamp}}-AMI", 28 | "ami_virtualization_type": "hvm", 29 | "associate_public_ip_address": true, 30 | "instance_type": "{{ user `instance_type` }}", 31 | "spot_price": "auto", 32 | "ssh_username": "centos", 33 | "subnet_id": "{{user `subnet_id`}}", 34 | "temporary_key_pair_name": "centos-packer-{{timestamp}}", 35 | "vpc_id": "{{user `vpc_id`}}", 36 | "run_tags": { 37 | "Name": "centos-base-packer", 38 | "Application": "base" 39 | }, 40 | "tags": { 41 | "OS_Version": "CentOS", 42 | "Version": "{{user `build_number`}}", 43 | "Application": "Base Image", 44 | "Runner": "EC2" 45 | } 46 | } 47 | ], 48 | "provisioners": [ 49 | { 50 | "type": "shell", 51 | "script": "{{template_dir}}/../../provisioners/scripts/linux/redhat/install_ansible.sh" 52 | }, 53 | { 54 | "type": "shell", 55 | "script": "{{template_dir}}/../../provisioners/scripts/linux/redhat/install_aws_ssm.sh" 56 | }, 57 | { 58 | "type": "ansible-local", 59 | "playbook_file": "{{template_dir}}/../../provisioners/ansible/playbooks/cloudwatch-metrics.yml", 60 | "role_paths": [ 61 | "{{template_dir}}/../../provisioners/ansible/roles/cloudwatch-metrics" 62 | ] 63 | } 64 | ] 65 | } 66 | -------------------------------------------------------------------------------- /packfiles/CentOS/azure-arm.centos.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "client_id": "{{env `ARM_CLIENT_ID`}}", 4 | "client_secret": "{{env `ARM_CLIENT_SECRET`}}", 5 | "subscription_id": "{{env `ARM_SUBSCRIPTION_ID`}}", 6 | "tenant_id": "{{env `ARM_TENANT_ID`}}", 7 | "ssh_user": "centos", 8 | "ssh_pass": "{{env `ARM_SSH_PASS`}}" 9 | }, 10 | "builders": [ 11 | { 12 | "type": "azure-arm", 13 | "client_id": "{{ user `client_id` }}", 14 | "client_secret": "{{ user `client_secret` }}", 15 | "subscription_id": "{{ user `subscription_id` }}", 16 | "tenant_id": "{{ user `tenant_id` }}", 17 | "resource_group_name": "{{ user `resourcegroup` }}", 18 | "storage_account": "{{ user `storageaccount` }}", 19 | "capture_container_name": "images", 20 | "capture_name_prefix": "packer", 21 | "ssh_username": "{{user `ssh_user`}}", 22 | "ssh_password": "{{user `ssh_pass`}}", 23 | "os_type": "Linux", 24 | "image_publisher": "OpenLogic", 25 | "image_offer": "CentOS", 26 | "image_sku": "7.3", 27 | "azure_tags": { 28 | "dept": "engineering" 29 | }, 30 | "image_version": "latest", 31 | "ssh_pty": "true", 32 | "location": "{{ user `location` }}", 33 | "vm_size": "{{ user `vm_size` }}" 34 | } 35 | ], 36 | "provisioners": [ 37 | { 38 | "execute_command": "echo '{{user `ssh_pass`}}' | {{ .Vars }} sudo -S -E sh '{{ .Path }}'", 39 | "inline": [ 40 | "yum update -y", 41 | "/usr/sbin/waagent -force -deprovision+user && export HISTSIZE=0 && sync" 42 | ], 43 | "inline_shebang": "/bin/sh -x", 44 | "type": "shell", 45 | "skip_clean": true 46 | } 47 | ] 48 | } 49 | -------------------------------------------------------------------------------- /packfiles/Debian/azure-arm.debian.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "client_id": "{{env `ARM_CLIENT_ID`}}", 4 | "client_secret": "{{env `ARM_CLIENT_SECRET`}}", 5 | "subscription_id": "{{env `ARM_SUBSCRIPTION_ID`}}", 6 | "tenant_id": "{{env `ARM_TENANT_ID`}}", 7 | "ssh_user": "packer", 8 | "ssh_pass": "{{env `ARM_SSH_PASS`}}" 9 | }, 10 | "builders": [ 11 | { 12 | "type": "azure-arm", 13 | "client_id": "{{user `client_id`}}", 14 | "client_secret": "{{user `client_secret`}}", 15 | "resource_group_name": "{{user `resource_group`}}", 16 | "storage_account": "{{user `storage_account`}}", 17 | "subscription_id": "{{user `subscription_id`}}", 18 | "tenant_id": "{{ user `tenant_id` }}", 19 | "ssh_username": "{{user `ssh_user`}}", 20 | "ssh_password": "{{user `ssh_pass`}}", 21 | "os_type": "Linux", 22 | "image_publisher": "credativ", 23 | "image_offer": "Debian", 24 | "image_sku": "9", 25 | "azure_tags": { 26 | "dept": "engineering" 27 | }, 28 | "ssh_pty": "true", 29 | "location": "{{ user `location` }}", 30 | "vm_size": "{{ user `vm_size` }}" 31 | } 32 | ], 33 | "provisioners": [ 34 | { 35 | "execute_command": "echo '{{user `ssh_pass`}}' | {{ .Vars }} sudo -S -E sh '{{ .Path }}'", 36 | "inline": [ 37 | "apt-get update", 38 | "apt-get upgrade -y", 39 | "/usr/sbin/waagent -force -deprovision+user && export HISTSIZE=0 && sync" 40 | ], 41 | "inline_shebang": "/bin/sh -x", 42 | "skip_clean": true, 43 | "type": "shell" 44 | } 45 | ] 46 | } 47 | -------------------------------------------------------------------------------- /packfiles/Debian/googlecompute.debian.json: -------------------------------------------------------------------------------- 1 | { 2 | "builders": [ 3 | { 4 | "type": "googlecompute", 5 | "project_id": "{{user `project_id`}}", 6 | "source_image_family": "debian-9", 7 | "ssh_username": "packer", 8 | "zone": "us-central1-a" 9 | } 10 | ] 11 | } 12 | -------------------------------------------------------------------------------- /packfiles/Redhat/amazon-ebs.base.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "aws_access_key": "{{ env `AWS_ACCESS_KEY_ID` }}", 4 | "aws_secret_key": "{{ env `AWS_SECRET_ACCESS_KEY` }}", 5 | "aws_session_token": "{{ env `AWS_SESSION_TOKEN` }}", 6 | "aws_region": "{{ env `AWS_REGION` }}", 7 | "build_number": "{{ env `BUILD_NUMBER` }}", 8 | "instance_type": "t2.micro" 9 | }, 10 | "builders": [ 11 | { 12 | "type": "amazon-ebs", 13 | "access_key": "{{user `aws_access_key`}}", 14 | "secret_key": "{{user `aws_secret_key`}}", 15 | "token": "{{user `aws_session_token`}}", 16 | "region": "{{user `aws_region`}}", 17 | "source_ami_filter": { 18 | "filters": { 19 | "virtualization-type": "hvm", 20 | "name": "RHEL-7.6_HVM_GA-*", 21 | "root-device-type": "ebs" 22 | }, 23 | "owners": ["309956199498"], 24 | "most_recent": true 25 | }, 26 | "ami_description": "RHEL base AMI", 27 | "ami_name": "RHEL-BASE-v{{user `build_number`}}-{{timestamp}}-AMI", 28 | "ami_virtualization_type": "hvm", 29 | "associate_public_ip_address": true, 30 | "instance_type": "{{ user `instance_type` }}", 31 | "spot_price": "auto", 32 | "ssh_username": "ec2-user", 33 | "subnet_id": "{{user `subnet_id`}}", 34 | "temporary_key_pair_name": "rhel-packer-{{timestamp}}", 35 | "vpc_id": "{{user `vpc_id`}}", 36 | "run_tags": { 37 | "Name": "rhel-base", 38 | "Application": "base", 39 | "OS_Version": "RedHat7.6" 40 | }, 41 | "tags": { 42 | "OS_Version": "RedHat7.6", 43 | "Version": "{{user `build_number`}}", 44 | "Application": "Base", 45 | "Runner": "EC2" 46 | } 47 | } 48 | ], 49 | "provisioners": [ 50 | { 51 | "type": "shell", 52 | "script": "{{template_dir}}/../../provisioners/scripts/linux/redhat/install_ansible.sh" 53 | }, 54 | { 55 | "type": "shell", 56 | "script": "{{template_dir}}/../../provisioners/scripts/linux/redhat/install_aws_ssm.sh" 57 | }, 58 | { 59 | "type": "ansible-local", 60 | "playbook_file": "{{template_dir}}/../../provisioners/ansible/playbooks/cloudwatch-metrics.yml", 61 | "role_paths": [ 62 | "{{template_dir}}/../../provisioners/ansible/roles/cloudwatch-metrics" 63 | ] 64 | } 65 | ] 66 | } 67 | -------------------------------------------------------------------------------- /packfiles/Redhat/amazon-ebs.jenkins-master.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "aws_access_key": "{{ env `AWS_ACCESS_KEY_ID` }}", 4 | "aws_secret_key": "{{ env `AWS_SECRET_ACCESS_KEY` }}", 5 | "aws_session_token": "{{ env `AWS_SESSION_TOKEN` }}", 6 | "aws_region": "{{ env `AWS_REGION` }}", 7 | "build_number": "{{ env `BUILD_NUMBER` }}", 8 | "instance_type": "t2.micro" 9 | }, 10 | "builders": [ 11 | { 12 | "type": "amazon-ebs", 13 | "access_key": "{{user `aws_access_key`}}", 14 | "secret_key": "{{user `aws_secret_key`}}", 15 | "token": "{{user `aws_session_token`}}", 16 | "region": "{{user `aws_region`}}", 17 | "source_ami_filter": { 18 | "filters": { 19 | "virtualization-type": "hvm", 20 | "name": "RHEL-BASE-*", 21 | "root-device-type": "ebs" 22 | }, 23 | "owners": ["self"], 24 | "most_recent": true 25 | }, 26 | "ami_description": "Jenkins master base AMI", 27 | "ami_name": "jenkins-master-v{{user `build_number`}}-{{timestamp}}", 28 | "ami_virtualization_type": "hvm", 29 | "associate_public_ip_address": true, 30 | "instance_type": "{{ user `instance_type` }}", 31 | "spot_price": "auto", 32 | "ssh_username": "ec2-user", 33 | "subnet_id": "{{user `subnet_id`}}", 34 | "temporary_key_pair_name": "jenkins-master-packer-{{timestamp}}", 35 | "vpc_id": "{{user `vpc_id`}}", 36 | "run_tags": { 37 | "Name": "jenkins-master-packer-image", 38 | "Application": "Jenkins", 39 | "OS_Version": "RedHat7.6" 40 | }, 41 | "tags": { 42 | "OS_Version": "RedHat7", 43 | "Version": "{{user `build_number`}}", 44 | "Application": "Jenkins Master", 45 | "Runner": "EC2" 46 | } 47 | } 48 | ], 49 | "provisioners": [ 50 | { 51 | "type": "shell", 52 | "script": "{{template_dir}}/../../provisioners/scripts/linux/redhat/install_ansible.sh" 53 | }, 54 | { 55 | "type": "shell", 56 | "script": "{{template_dir}}/../../provisioners/scripts/linux/redhat/install_awscli.sh" 57 | }, 58 | { 59 | "type": "ansible-local", 60 | "playbook_file": "{{template_dir}}/../../provisioners/ansible/playbooks/jenkins-master.yml", 61 | "role_paths": [ 62 | "{{template_dir}}/../../provisioners/ansible/roles/cloudwatch-logs" 63 | ] 64 | } 65 | ] 66 | } 67 | -------------------------------------------------------------------------------- /packfiles/Redhat/amazon-ebs.jmeter.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "aws_access_key": "{{ env `AWS_ACCESS_KEY_ID` }}", 4 | "aws_secret_key": "{{ env `AWS_SECRET_ACCESS_KEY` }}", 5 | "aws_session_token": "{{ env `AWS_SESSION_TOKEN` }}", 6 | "aws_region": "{{ env `AWS_REGION` }}", 7 | "build_number": "{{ env `BUILD_NUMBER` }}", 8 | "instance_type": "t2.micro" 9 | }, 10 | "builders": [ 11 | { 12 | "type": "amazon-ebs", 13 | "access_key": "{{user `aws_access_key`}}", 14 | "secret_key": "{{user `aws_secret_key`}}", 15 | "token": "{{user `aws_session_token`}}", 16 | "region": "{{user `aws_region`}}", 17 | "source_ami_filter": { 18 | "filters": { 19 | "virtualization-type": "hvm", 20 | "name": "RHEL-BASE-*", 21 | "root-device-type": "ebs" 22 | }, 23 | "owners": ["self"], 24 | "most_recent": true 25 | }, 26 | "ami_description": "jmeter AMI", 27 | "ami_name": "jmeter-v{{user `build_number`}}-{{timestamp}}", 28 | "ami_virtualization_type": "hvm", 29 | "associate_public_ip_address": true, 30 | "instance_type": "{{ user `instance_type` }}", 31 | "spot_price": "auto", 32 | "ssh_username": "ec2-user", 33 | "subnet_id": "{{user `subnet_id`}}", 34 | "temporary_key_pair_name": "jmeter-{{timestamp}}", 35 | "vpc_id": "{{user `vpc_id`}}", 36 | "run_tags": { 37 | "Name": "jmeter-packer-image", 38 | "Application": "Jmeter", 39 | "OS_Version": "RedHat7.6" 40 | }, 41 | "tags": { 42 | "OS_Version": "RedHat7", 43 | "Version": "{{user `build_number`}}", 44 | "Application": "Jmeter", 45 | "Runner": "EC2" 46 | } 47 | } 48 | ], 49 | "provisioners": [ 50 | { 51 | "type": "shell", 52 | "script": "{{template_dir}}/../../provisioners/scripts/linux/redhat/install_ansible.sh" 53 | }, 54 | { 55 | "type": "ansible-local", 56 | "playbook_file": "{{template_dir}}/../../provisioners/ansible/playbooks/jmeter.yml", 57 | "role_paths": [ 58 | "{{template_dir}}/../../provisioners/ansible/roles/openjdk", 59 | "{{template_dir}}/../../provisioners/ansible/roles/jmeter" 60 | ] 61 | } 62 | ] 63 | } 64 | -------------------------------------------------------------------------------- /packfiles/Redhat/azure-arm.rhel.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "client_id": "{{env `ARM_CLIENT_ID`}}", 4 | "client_secret": "{{env `ARM_CLIENT_SECRET`}}", 5 | "subscription_id": "{{env `ARM_SUBSCRIPTION_ID`}}", 6 | "tenant_id": "{{env `ARM_TENANT_ID`}}", 7 | "ssh_user": "centos", 8 | "ssh_pass": "{{env `ARM_SSH_PASS`}}" 9 | }, 10 | "builders": [ 11 | { 12 | "type": "azure-arm", 13 | "client_id": "{{user `client_id`}}", 14 | "client_secret": "{{user `client_secret`}}", 15 | "image_offer": "RHEL", 16 | "image_publisher": "RedHat", 17 | "image_sku": "7.3", 18 | "image_version": "latest", 19 | "location": "South Central US", 20 | "managed_image_name": "MyRedHatOSImage", 21 | "managed_image_resource_group_name": "packertest", 22 | "os_type": "Linux", 23 | "ssh_password": "{{user `ssh_pass`}}", 24 | "ssh_pty": "true", 25 | "ssh_username": "{{user `ssh_user`}}", 26 | "subscription_id": "{{user `subscription_id`}}", 27 | "tenant_id": "{{user `tenant_id`}}", 28 | "vm_size": "Standard_DS2_v2" 29 | } 30 | ], 31 | "provisioners": [ 32 | { 33 | "execute_command": "echo '{{user `ssh_pass`}}' | {{ .Vars }} sudo -S -E sh '{{ .Path }}'", 34 | "inline": [ 35 | "yum update -y", 36 | "/usr/sbin/waagent -force -deprovision+user && export HISTSIZE=0 && sync" 37 | ], 38 | "inline_shebang": "/bin/sh -x", 39 | "type": "shell", 40 | "skip_clean": true 41 | } 42 | ] 43 | } 44 | -------------------------------------------------------------------------------- /packfiles/Ubuntu/amazon-ebs.base1604.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "aws_access_key": "{{ env `AWS_ACCESS_KEY_ID` }}", 4 | "aws_secret_key": "{{ env `AWS_SECRET_ACCESS_KEY` }}", 5 | "aws_session_token": "{{ env `AWS_SESSION_TOKEN` }}", 6 | "aws_region": "{{ env `AWS_REGION` }}", 7 | "build_number": "{{ env `BUILD_NUMBER` }}", 8 | "instance_type": "t2.micro" 9 | }, 10 | "builders": [ 11 | { 12 | "type": "amazon-ebs", 13 | "access_key": "{{user `aws_access_key`}}", 14 | "secret_key": "{{user `aws_secret_key`}}", 15 | "token": "{{user `aws_session_token`}}", 16 | "region": "{{user `aws_region`}}", 17 | "source_ami_filter": { 18 | "filters": { 19 | "virtualization-type": "hvm", 20 | "name": "ubuntu/images/*ubuntu-xenial-16.04-amd64-server-*", 21 | "root-device-type": "ebs" 22 | }, 23 | "owners": ["099720109477"], 24 | "most_recent": true 25 | }, 26 | "ami_description": "ubuntu base 16.04", 27 | "ami_name": "ubuntu-16.04-BASE-v{{user `build_number`}}-{{timestamp}}-AMI", 28 | "ami_virtualization_type": "hvm", 29 | "associate_public_ip_address": true, 30 | "instance_type": "{{ user `instance_type` }}", 31 | "spot_price": "auto", 32 | "ssh_username": "ubuntu", 33 | "subnet_id": "{{user `subnet_id`}}", 34 | "temporary_key_pair_name": "ubuntu-packer-{{timestamp}}", 35 | "vpc_id": "{{user `vpc_id`}}", 36 | "run_tags": { 37 | "Name": "ubuntu-base-packer", 38 | "Application": "base", 39 | "OS": "Ubuntu 16.04" 40 | }, 41 | "tags": { 42 | "OS_Version": "ubuntu 16.04", 43 | "Version": "{{user `build_number`}}", 44 | "Application": "Base Image", 45 | "Runner": "EC2" 46 | } 47 | } 48 | ], 49 | "provisioners": [ 50 | { 51 | "type": "shell", 52 | "script": "{{template_dir}}/../../provisioners/scripts/linux/ubuntu/install_ansible.sh" 53 | }, 54 | { 55 | "type": "shell", 56 | "script": "{{template_dir}}/../../provisioners/scripts/linux/ubuntu/install_aws_ssm.sh" 57 | }, 58 | { 59 | "type": "ansible-local", 60 | "playbook_file": "{{template_dir}}/../../provisioners/ansible/playbooks/cloudwatch-metrics.yml", 61 | "role_paths": [ 62 | "{{template_dir}}/../../provisioners/ansible/roles/cloudwatch-metrics" 63 | ] 64 | } 65 | ] 66 | } 67 | -------------------------------------------------------------------------------- /packfiles/Ubuntu/amazon-ebs.base1804.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "aws_access_key": "{{ env `AWS_ACCESS_KEY_ID` }}", 4 | "aws_secret_key": "{{ env `AWS_SECRET_ACCESS_KEY` }}", 5 | "aws_session_token": "{{ env `AWS_SESSION_TOKEN` }}", 6 | "aws_region": "{{ env `AWS_REGION` }}", 7 | "build_number": "{{ env `BUILD_NUMBER` }}", 8 | "instance_type": "t2.micro" 9 | }, 10 | "builders": [ 11 | { 12 | "type": "amazon-ebs", 13 | "access_key": "{{user `aws_access_key`}}", 14 | "secret_key": "{{user `aws_secret_key`}}", 15 | "token": "{{user `aws_session_token`}}", 16 | "region": "{{user `aws_region`}}", 17 | "source_ami_filter": { 18 | "filters": { 19 | "virtualization-type": "hvm", 20 | "name": "ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-*", 21 | "root-device-type": "ebs" 22 | }, 23 | "owners": ["099720109477"], 24 | "most_recent": true 25 | }, 26 | "ami_description": "ubuntu base AMI", 27 | "ami_name": "ubuntu-18.04-BASE-v{{user `build_number`}}-{{timestamp}}-AMI", 28 | "ami_virtualization_type": "hvm", 29 | "associate_public_ip_address": true, 30 | "instance_type": "{{ user `instance_type` }}", 31 | "spot_price": "auto", 32 | "ssh_username": "ubuntu", 33 | "subnet_id": "{{user `subnet_id`}}", 34 | "temporary_key_pair_name": "ubuntu-packer-{{timestamp}}", 35 | "vpc_id": "{{user `vpc_id`}}", 36 | "run_tags": { 37 | "Name": "ubuntu-base-packer", 38 | "Application": "base", 39 | "OS": "Ubuntu 18.04" 40 | }, 41 | "tags": { 42 | "OS_Version": "ubuntu 18.04", 43 | "Version": "{{user `build_number`}}", 44 | "Application": "Base Image", 45 | "Runner": "EC2" 46 | } 47 | } 48 | ], 49 | "provisioners": [ 50 | { 51 | "type": "shell", 52 | "script": "{{template_dir}}/../../provisioners/scripts/linux/ubuntu/install_ansible.sh" 53 | }, 54 | { 55 | "type": "shell", 56 | "script": "{{template_dir}}/../../provisioners/scripts/linux/ubuntu/install_aws_ssm.sh" 57 | }, 58 | { 59 | "type": "ansible-local", 60 | "playbook_file": "{{template_dir}}/../../provisioners/ansible/playbooks/cloudwatch-metrics.yml", 61 | "role_paths": [ 62 | "{{template_dir}}/../../provisioners/ansible/roles/cloudwatch-metrics" 63 | ] 64 | } 65 | ] 66 | } 67 | -------------------------------------------------------------------------------- /packfiles/Ubuntu/amazon-ebs.cassandra.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "aws_access_key": "{{env `AWS_ACCESS_KEY_ID`}}", 4 | "aws_secret_key": "{{env `AWS_SECRET_ACCESS_KEY`}}", 5 | "aws_session_token": "{{env `AWS_SESSION_TOKEN`}}", 6 | "build_number": "{{env `BUILD_NUMBER`}}", 7 | "instance_type": "t3.micro" 8 | }, 9 | "builders": [ 10 | { 11 | "type": "amazon-ebs", 12 | "access_key": "{{user `aws_access_key`}}", 13 | "secret_key": "{{user `aws_secret_key`}}", 14 | "token": "{{user `aws_session_token`}}", 15 | "region": "{{user `aws_region`}}", 16 | "source_ami_filter": { 17 | "filters": { 18 | "virtualization-type": "hvm", 19 | "name": "ubuntu/images/*ubuntu-xenial-16.04-amd64-server-*", 20 | "root-device-type": "ebs" 21 | }, 22 | "owners": ["099720109477"], 23 | "most_recent": true 24 | }, 25 | "instance_type": "{{ user `instance_type` }}", 26 | "ssh_username": "ubuntu", 27 | "ami_name": "cassandra-BASE-v{{user `build_number`}}-{{timestamp}}-AMI", 28 | "ami_description": "ubuntu cassandra AMI", 29 | "ami_virtualization_type": "hvm", 30 | "ami_users": "{{ user `ami_users` }}", 31 | "temporary_key_pair_name": "ubuntu-packer-{{timestamp}}", 32 | "vpc_id": "{{user `vpc_id`}}", 33 | "subnet_id": "{{user `subnet_id`}}", 34 | "spot_price": "auto", 35 | "associate_public_ip_address": true, 36 | "run_tags": { 37 | "Name": "ubuntu-cassandra-packer", 38 | "Application": "cassandra" 39 | }, 40 | "tags": { 41 | "OS_Version": "ubuntu", 42 | "Version": "{{user `build_number`}}", 43 | "Application": "Cassandra Image", 44 | "Runner": "EC2" 45 | } 46 | } 47 | ], 48 | "provisioners": [ 49 | { 50 | "type": "shell", 51 | "script": "{{template_dir}}/../../provisioners/scripts/linux/ubuntu/install_ansible.sh" 52 | }, 53 | { 54 | "type": "ansible-local", 55 | "playbook_file": "{{template_dir}}/../../provisioners/ansible/playbooks/cassandra.yml", 56 | "role_paths": [ 57 | "{{template_dir}}/../../provisioners/ansible/roles/cloudwatch-metrics", 58 | "{{template_dir}}/../../provisioners/ansible/roles/devoinc.openjdk" 59 | ] 60 | }, 61 | { 62 | "type": "shell", 63 | "script": "{{template_dir}}/../../provisioners/scripts/linux/ubuntu/install_cassandra.sh" 64 | } 65 | ] 66 | } 67 | -------------------------------------------------------------------------------- /packfiles/Ubuntu/amazon-ebs.vault.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "aws_access_key": "{{ env `AWS_ACCESS_KEY_ID` }}", 4 | "aws_secret_key": "{{ env `AWS_SECRET_ACCESS_KEY` }}", 5 | "aws_session_token": "{{ env `AWS_SESSION_TOKEN` }}", 6 | "aws_region": "{{ env `AWS_REGION` }}", 7 | "build_number": "{{ env `BUILD_NUMBER` }}", 8 | "instance_type": "t2.micro" 9 | }, 10 | "builders": [ 11 | { 12 | "type": "amazon-ebs", 13 | "access_key": "{{user `aws_access_key`}}", 14 | "secret_key": "{{user `aws_secret_key`}}", 15 | "token": "{{user `aws_session_token`}}", 16 | "region": "{{user `aws_region`}}", 17 | "source_ami_filter": { 18 | "filters": { 19 | "virtualization-type": "hvm", 20 | "name": "ubuntu/images/*ubuntu-xenial-16.04-amd64-server-*", 21 | "root-device-type": "ebs" 22 | }, 23 | "owners": ["099720109477"], 24 | "most_recent": true 25 | }, 26 | "ami_description": "ubuntu Vault AMI", 27 | "ami_name": "hashicorp-vault-v{{user `build_number`}}-{{timestamp}}", 28 | "ami_virtualization_type": "hvm", 29 | "associate_public_ip_address": true, 30 | "instance_type": "{{ user `instance_type` }}", 31 | "spot_price": "auto", 32 | "ssh_username": "ubuntu", 33 | "subnet_id": "{{user `subnet_id`}}", 34 | "temporary_key_pair_name": "ubuntu-packer-{{timestamp}}", 35 | "vpc_id": "{{user `vpc_id`}}", 36 | "run_tags": { 37 | "Name": "ubuntu-base-packer", 38 | "Application": "base", 39 | "OS": "Ubuntu 16.04" 40 | }, 41 | "tags": { 42 | "OS_Version": "ubuntu", 43 | "Version": "{{user `build_number`}}", 44 | "Application": "Base Image", 45 | "Runner": "EC2" 46 | } 47 | } 48 | ], 49 | "provisioners": [ 50 | { 51 | "type": "shell", 52 | "inline": [ 53 | "git clone --branch master https://github.com/hashicorp/terraform-aws-vault.git /tmp/terraform-aws-vault", 54 | "/tmp/terraform-aws-vault/modules/install-vault/install-vault --version 0.10.4" 55 | ], 56 | "pause_before": "30s" 57 | } 58 | ] 59 | } 60 | -------------------------------------------------------------------------------- /packfiles/Ubuntu/azure-arm.base.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "client_id": "{{env `ARM_CLIENT_ID`}}", 4 | "client_secret": "{{env `ARM_CLIENT_SECRET`}}", 5 | "subscription_id": "{{env `ARM_SUBSCRIPTION_ID`}}", 6 | "tenant_id": "{{env `ARM_TENANT_ID`}}" 7 | }, 8 | "builders": [ 9 | { 10 | "type": "azure-arm", 11 | "client_id": "{{ user `client_id` }}", 12 | "client_secret": "{{ user `client_secret` }}", 13 | "subscription_id": "{{ user `subscription_id` }}", 14 | "tenant_id": "{{ user `tenant_id` }}", 15 | "resource_group_name": "{{ user `resourcegroup` }}", 16 | "storage_account": "{{ user `storageaccount` }}", 17 | "capture_container_name": "images", 18 | "capture_name_prefix": "packer", 19 | "os_type": "Linux", 20 | "image_publisher": "Canonical", 21 | "image_offer": "UbuntuServer", 22 | "image_sku": "14.04.4-LTS", 23 | "azure_tags": { 24 | "dept": "engineering" 25 | }, 26 | "location": "{{ user `location` }}", 27 | "vm_size": "{{ user `vm_size` }}" 28 | } 29 | ], 30 | "provisioners": [ 31 | { 32 | "execute_command": "chmod +x {{ .Path }}; {{ .Vars }} sudo -E sh '{{ .Path }}'", 33 | "inline": [ 34 | "apt-get update", 35 | "apt-get upgrade -y", 36 | "apt-get -y install nginx", 37 | "/usr/sbin/waagent -force -deprovision+user && export HISTSIZE=0 && sync" 38 | ], 39 | "inline_shebang": "/bin/sh -x", 40 | "type": "shell" 41 | } 42 | ] 43 | } 44 | -------------------------------------------------------------------------------- /packfiles/Ubuntu/azure-arm.server.json: -------------------------------------------------------------------------------- 1 | { 2 | "builders": [ 3 | { 4 | "type": "azure-arm", 5 | "client_id": "f5b6a5cf-fbdf-4a9f-b3b8-3c2cd00225a4", 6 | "client_secret": "0e760437-bf34-4aad-9f8d-870be799c55d", 7 | "tenant_id": "72f988bf-86f1-41af-91ab-2d7cd011db47", 8 | "subscription_id": "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxx", 9 | "managed_image_resource_group_name": "myResourceGroup", 10 | "managed_image_name": "myPackerImage", 11 | "os_type": "Linux", 12 | "image_publisher": "Canonical", 13 | "image_offer": "UbuntuServer", 14 | "image_sku": "16.04-LTS", 15 | "azure_tags": { 16 | "dept": "Engineering", 17 | "task": "Image deployment" 18 | }, 19 | "location": "{{ user `location` }}", 20 | "vm_size": "{{ user `vm_size` }}" 21 | } 22 | ], 23 | "provisioners": [ 24 | { 25 | "execute_command": "chmod +x {{ .Path }}; {{ .Vars }} sudo -E sh '{{ .Path }}'", 26 | "inline": [ 27 | "apt-get update", 28 | "apt-get upgrade -y", 29 | "apt-get -y install nginx", 30 | "/usr/sbin/waagent -force -deprovision+user && export HISTSIZE=0 && sync" 31 | ], 32 | "inline_shebang": "/bin/sh -x", 33 | "type": "shell" 34 | } 35 | ] 36 | } 37 | -------------------------------------------------------------------------------- /packfiles/Ubuntu/azure-arm.ubuntu.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "client_id": "{{env `ARM_CLIENT_ID`}}", 4 | "client_secret": "{{env `ARM_CLIENT_SECRET`}}", 5 | "subscription_id": "{{env `ARM_SUBSCRIPTION_ID`}}" 6 | }, 7 | "builders": [ 8 | { 9 | "type": "azure-arm", 10 | 11 | "client_id": "{{user `client_id`}}", 12 | "client_secret": "{{user `client_secret`}}", 13 | "subscription_id": "{{user `subscription_id`}}", 14 | 15 | "os_type": "Linux", 16 | "image_publisher": "Canonical", 17 | "image_offer": "UbuntuServer", 18 | "image_sku": "16.04-LTS", 19 | 20 | "managed_image_resource_group_name": "packertest", 21 | "managed_image_name": "MyUbuntuImage", 22 | 23 | "azure_tags": { 24 | "dept": "engineering", 25 | "task": "image deployment" 26 | }, 27 | 28 | "location": "South Central US", 29 | "vm_size": "Standard_DS2_v2" 30 | } 31 | ], 32 | "provisioners": [ 33 | { 34 | "execute_command": "chmod +x {{ .Path }}; {{ .Vars }} sudo -E sh '{{ .Path }}'", 35 | "inline": [ 36 | "apt-get update", 37 | "apt-get upgrade -y", 38 | 39 | "/usr/sbin/waagent -force -deprovision+user && export HISTSIZE=0 && sync" 40 | ], 41 | "inline_shebang": "/bin/sh -x", 42 | "type": "shell" 43 | } 44 | ] 45 | } 46 | -------------------------------------------------------------------------------- /packfiles/Ubuntu/azure-arm.ubuntu_quickstart.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "resource_group": "{{env `ARM_RESOURCE_GROUP`}}", 4 | "storage_account": "{{env `ARM_STORAGE_ACCOUNT`}}", 5 | "subscription_id": "{{env `ARM_SUBSCRIPTION_ID`}}" 6 | }, 7 | "builders": [ 8 | { 9 | "type": "azure-arm", 10 | 11 | "resource_group_name": "{{user `resource_group`}}", 12 | "storage_account": "{{user `storage_account`}}", 13 | "subscription_id": "{{user `subscription_id`}}", 14 | 15 | "capture_container_name": "images", 16 | "capture_name_prefix": "packer", 17 | 18 | "os_type": "Linux", 19 | "image_publisher": "Canonical", 20 | "image_offer": "UbuntuServer", 21 | "image_sku": "16.04-LTS", 22 | 23 | "location": "West US", 24 | "vm_size": "Standard_DS2_v2" 25 | } 26 | ], 27 | "provisioners": [ 28 | { 29 | "execute_command": "chmod +x {{ .Path }}; {{ .Vars }} sudo -E sh '{{ .Path }}'", 30 | "inline": [ 31 | "apt-get update", 32 | "apt-get upgrade -y", 33 | "/usr/sbin/waagent -force -deprovision+user && export HISTSIZE=0 && sync" 34 | ], 35 | "inline_shebang": "/bin/sh -x", 36 | "type": "shell" 37 | } 38 | ] 39 | } 40 | -------------------------------------------------------------------------------- /packfiles/Windows/README.MD: -------------------------------------------------------------------------------- 1 | # Windows AMIS 2 | 3 | You can find the latest windows ami using (assuming you're after 2012 r2): 4 | 5 | ```AWS 6 | $Windows=(aws ec2 describe-images --owners self amazon --filters --filters "Name=root-device-type,Values=ebs" "Name=architecture,Values=x86_64" "Name=platform,Values=windows" "Name=name,Values='Windows_Server-2012-R2_RTM-English-64Bit-Base*'|convertfrom-json).Images|sort Name| Select-Object -Last 1 7 | ``` 8 | 9 | But it's easier to use a source AMI filter: 10 | 11 | ``` packer 12 | "source_ami_filter": 13 | { 14 | "filters": { 15 | "virtualization-type": "hvm", 16 | "name": "Windows_Server-2012-R2_RTM-English-64Bit-Base*", 17 | "root-device-type": "ebs" 18 | }, 19 | "owners": [ 20 | "self", 21 | "amazon" 22 | ], 23 | "most_recent": true 24 | } 25 | ``` 26 | -------------------------------------------------------------------------------- /packfiles/Windows/amazon-ebs.winserver2012r2.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "aws_access_key": "{{env `AWS_ACCESS_KEY_ID`}}", 4 | "aws_secret_key": "{{env `AWS_SECRET_ACCESS_KEY`}}", 5 | "build_number": "{{env `BUILD_NUMBER`}}", 6 | "winrm_password": "MyF@v0uriteG@me5", 7 | "region": "", 8 | "instance_type": "t3.large", 9 | "subnet_id": "", 10 | "vpc_id": "" 11 | }, 12 | "builders": [ 13 | { 14 | "type": "amazon-ebs", 15 | "access_key": "{{ user `aws_access_key` }}", 16 | "secret_key": "{{ user `aws_secret_key` }}", 17 | "region": "{{user `region`}}", 18 | "source_ami_filter": { 19 | "filters": { 20 | "virtualization-type": "hvm", 21 | "name": "Windows_Server-2012-R2_RTM-English-64Bit-Base*", 22 | "root-device-type": "ebs" 23 | }, 24 | "owners": ["self", "amazon"], 25 | "most_recent": true 26 | }, 27 | "associate_public_ip_address": "true", 28 | "instance_type": "{{ user `instance_type` }}", 29 | "user_data_file": "{{template_dir}}/setup_winrm.txt", 30 | "communicator": "winrm", 31 | "spot_price": "auto", 32 | "winrm_username": "Administrator", 33 | "winrm_timeout": "10m", 34 | "winrm_password": "{{ user `winrm_password` }}", 35 | "ami_name": "Web Server v{{user `version`}} Windows2012R2 {{timestamp}}", 36 | "ami_description": "Windows 2012 R2 base AMI", 37 | "ami_virtualization_type": "hvm", 38 | "vpc_id": "{{ user `vpc_id` }}", 39 | "subnet_id": "{{ user `subnet_id` }}" 40 | } 41 | ], 42 | "provisioners": [ 43 | { 44 | "type": "powershell", 45 | "inline": [ 46 | "[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12", 47 | "iex ((new-object net.webclient).DownloadString('https://chocolatey.org/install.ps1'))|out-null", 48 | "choco install DotNet4.6.1 -y -force" 49 | ] 50 | }, 51 | { 52 | "type": "powershell", 53 | "inline": [ 54 | "Import-Module -Name ServerManager", 55 | "Install-WindowsFeature Web-Server", 56 | "Install-WindowsFeature AS-Net-Framework", 57 | "Install-WindowsFeature Web-Asp-Net45", 58 | "Install-WindowsFeature Web-Mgmt-Console", 59 | "Install-WindowsFeature Web-Http-Tracing" 60 | ] 61 | } 62 | ] 63 | } 64 | -------------------------------------------------------------------------------- /packfiles/Windows/amazon-ebs.winserver2016.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "aws_access_key": "{{env `AWS_ACCESS_KEY_ID`}}", 4 | "aws_secret_key": "{{env `AWS_SECRET_ACCESS_KEY`}}", 5 | "build_number": "{{env `BUILD_NUMBER`}}", 6 | "winrm_password": "MyF@v0uriteG@me5", 7 | "region": "", 8 | "instance_type": "t3.large", 9 | "subnet_id": "", 10 | "vpc_id": "" 11 | }, 12 | "builders": [ 13 | { 14 | "type": "amazon-ebs", 15 | "access_key": "{{ user `aws_access_key` }}", 16 | "secret_key": "{{ user `aws_secret_key` }}", 17 | "region": "{{user `region`}}", 18 | "source_ami_filter": { 19 | "filters": { 20 | "virtualization-type": "hvm", 21 | "name": "Windows_Server-2016-English-Core-Base*", 22 | "root-device-type": "ebs" 23 | }, 24 | "owners": ["self", "amazon"], 25 | "most_recent": true 26 | }, 27 | "associate_public_ip_address": "true", 28 | "instance_type": "{{ user `instance_type` }}", 29 | "user_data_file": "{{template_dir}}/setup_winrm.txt", 30 | "communicator": "winrm", 31 | "spot_price": "auto", 32 | "winrm_username": "Administrator", 33 | "winrm_timeout": "10m", 34 | "winrm_password": "{{ user `winrm_password` }}", 35 | "ami_name": "Web Server v{{user `version`}} Windows2016 {{timestamp}}", 36 | "ami_description": "Windows 2016 base AMI", 37 | "ami_virtualization_type": "hvm", 38 | "vpc_id": "{{ user `vpc_id` }}", 39 | "subnet_id": "{{ user `subnet_id` }}" 40 | } 41 | ], 42 | "provisioners": [ 43 | { 44 | "type": "powershell", 45 | "inline": [ 46 | "[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12", 47 | "iex ((new-object net.webclient).DownloadString('https://chocolatey.org/install.ps1'))|out-null", 48 | "choco install DotNet4.6.1 -y -force" 49 | ] 50 | }, 51 | { 52 | "type": "powershell", 53 | "inline": [ 54 | "Import-Module -Name ServerManager", 55 | "Install-WindowsFeature Web-Server", 56 | "Install-WindowsFeature AS-Net-Framework", 57 | "Install-WindowsFeature Web-Asp-Net45", 58 | "Install-WindowsFeature Web-Mgmt-Console", 59 | "Install-WindowsFeature Web-Http-Tracing" 60 | ] 61 | } 62 | ] 63 | } 64 | -------------------------------------------------------------------------------- /packfiles/Windows/amazon-ebs.winserver2019.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "aws_access_key": "{{env `AWS_ACCESS_KEY_ID`}}", 4 | "aws_secret_key": "{{env `AWS_SECRET_ACCESS_KEY`}}", 5 | "aws_session_token": "{{env `AWS_SESSION_TOKEN`}}", 6 | "build_number": "{{env `BUILD_NUMBER`}}", 7 | "winrm_password": "MyF@v0uriteG@me5", 8 | "aws_region": "{{env `AWS_REGION`}}", 9 | "instance_type": "t3.large", 10 | "subnet_id": "", 11 | "vpc_id": "" 12 | }, 13 | "builders": [ 14 | { 15 | "type": "amazon-ebs", 16 | "access_key": "{{user `aws_access_key`}}", 17 | "secret_key": "{{user `aws_secret_key`}}", 18 | "token": "{{user `aws_session_token`}}", 19 | "region": "{{user `aws_region`}}", 20 | "source_ami_filter": { 21 | "filters": { 22 | "virtualization-type": "hvm", 23 | "name": "Windows_Server-2019-English-Full-Base*", 24 | "root-device-type": "ebs" 25 | }, 26 | "owners": ["self", "amazon"], 27 | "most_recent": true 28 | }, 29 | "ami_name": "Base v{{user `build_number`}} Windows2019 {{timestamp}}", 30 | "ami_description": "Windows 2019 Base", 31 | "ami_virtualization_type": "hvm", 32 | "associate_public_ip_address": true, 33 | "instance_type": "{{ user `instance_type` }}", 34 | "user_data_file": "{{template_dir}}/bootstrap_win.txt", 35 | "communicator": "winrm", 36 | "spot_price": "auto", 37 | "winrm_username": "Administrator", 38 | "winrm_timeout": "10m", 39 | "winrm_password": "{{ user `winrm_password` }}", 40 | 41 | "vpc_id": "{{ user `vpc_id` }}", 42 | "subnet_id": "{{ user `subnet_id` }}", 43 | "run_tags": { 44 | "Name": "amazon-windows-java", 45 | "Application": "base" 46 | }, 47 | "tags": { 48 | "OS_Version": "Windows Server 2019", 49 | "Version": "{{user `build_number`}}", 50 | "Application": "Base Image", 51 | "Runner": "EC2" 52 | } 53 | } 54 | ], 55 | "provisioners": [ 56 | { 57 | "type": "powershell", 58 | "inline": [ 59 | "[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12", 60 | "iex ((new-object net.webclient).DownloadString('https://chocolatey.org/install.ps1'))|out-null" 61 | ] 62 | }, 63 | { 64 | "type": "powershell", 65 | "inline": ["choco install javaruntime -y -force"] 66 | } 67 | ] 68 | } 69 | -------------------------------------------------------------------------------- /packfiles/Windows/azure-arm.windows.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "client_id": "{{env `ARM_CLIENT_ID`}}", 4 | "client_secret": "{{env `ARM_CLIENT_SECRET`}}", 5 | "subscription_id": "{{env `ARM_SUBSCRIPTION_ID`}}" 6 | }, 7 | "builders": [ 8 | { 9 | "type": "azure-arm", 10 | 11 | "client_id": "{{user `client_id`}}", 12 | "client_secret": "{{user `client_secret`}}", 13 | "subscription_id": "{{user `subscription_id`}}", 14 | 15 | "managed_image_resource_group_name": "packertest", 16 | "managed_image_name": "MyWindowsOSImage", 17 | 18 | "os_type": "Windows", 19 | "image_publisher": "MicrosoftWindowsServer", 20 | "image_offer": "WindowsServer", 21 | "image_sku": "2012-R2-Datacenter", 22 | 23 | "communicator": "winrm", 24 | "winrm_use_ssl": "true", 25 | "winrm_insecure": "true", 26 | "winrm_timeout": "3m", 27 | "winrm_username": "packer", 28 | 29 | "location": "South Central US", 30 | "vm_size": "Standard_DS2_v2" 31 | } 32 | ], 33 | "provisioners": [ 34 | { 35 | "type": "powershell", 36 | "inline": [ 37 | " # NOTE: the following *3* lines are only needed if the you have installed the Guest Agent.", 38 | " while ((Get-Service RdAgent).Status -ne 'Running') { Start-Sleep -s 5 }", 39 | " while ((Get-Service WindowsAzureTelemetryService).Status -ne 'Running') { Start-Sleep -s 5 }", 40 | " while ((Get-Service WindowsAzureGuestAgent).Status -ne 'Running') { Start-Sleep -s 5 }", 41 | 42 | "if( Test-Path $Env:SystemRoot\\windows\\system32\\Sysprep\\unattend.xml ){ rm $Env:SystemRoot\\windows\\system32\\Sysprep\\unattend.xml -Force}", 43 | "& $env:SystemRoot\\System32\\Sysprep\\Sysprep.exe /oobe /generalize /quiet /quit", 44 | "while($true) { $imageState = Get-ItemProperty HKLM:\\SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Setup\\State | Select ImageState; if($imageState.ImageState -ne 'IMAGE_STATE_GENERALIZE_RESEAL_TO_OOBE') { Write-Output $imageState.ImageState; Start-Sleep -s 10 } else { break } }" 45 | ] 46 | } 47 | ] 48 | } 49 | -------------------------------------------------------------------------------- /packfiles/Windows/azure-arm.windows_custom_imagee.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "client_id": "{{env `ARM_CLIENT_ID`}}", 4 | "client_secret": "{{env `ARM_CLIENT_SECRET`}}", 5 | "resource_group": "{{env `ARM_RESOURCE_GROUP`}}", 6 | "storage_account": "{{env `ARM_STORAGE_ACCOUNT`}}", 7 | "subscription_id": "{{env `ARM_SUBSCRIPTION_ID`}}", 8 | "object_id": "{{env `ARM_OJBECT_ID`}}" 9 | }, 10 | "builders": [ 11 | { 12 | "type": "azure-arm", 13 | 14 | "client_id": "{{user `client_id`}}", 15 | "client_secret": "{{user `client_secret`}}", 16 | "resource_group_name": "{{user `resource_group`}}", 17 | "storage_account": "{{user `storage_account`}}", 18 | "subscription_id": "{{user `subscription_id`}}", 19 | "object_id": "{{user `object_id`}}", 20 | 21 | "capture_container_name": "images", 22 | "capture_name_prefix": "packer", 23 | 24 | "os_type": "Windows", 25 | "image_url": "https://my-storage-account.blob.core.windows.net/path/to/your/custom/image.vhd", 26 | 27 | "azure_tags": { 28 | "dept": "engineering", 29 | "task": "image deployment" 30 | }, 31 | 32 | "location": "West US", 33 | "vm_size": "Standard_DS2_v2" 34 | } 35 | ], 36 | "provisioners": [ 37 | { 38 | "type": "powershell", 39 | "inline": [ 40 | " # NOTE: the following *3* lines are only needed if the you have installed the Guest Agent.", 41 | " while ((Get-Service RdAgent).Status -ne 'Running') { Start-Sleep -s 5 }", 42 | " while ((Get-Service WindowsAzureTelemetryService).Status -ne 'Running') { Start-Sleep -s 5 }", 43 | " while ((Get-Service WindowsAzureGuestAgent).Status -ne 'Running') { Start-Sleep -s 5 }", 44 | 45 | "if( Test-Path $Env:SystemRoot\\windows\\system32\\Sysprep\\unattend.xml ){ rm $Env:SystemRoot\\windows\\system32\\Sysprep\\unattend.xml -Force}", 46 | "& $env:SystemRoot\\System32\\Sysprep\\Sysprep.exe /oobe /generalize /quiet /quit", 47 | "while($true) { $imageState = Get-ItemProperty HKLM:\\SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Setup\\State | Select ImageState; if($imageState.ImageState -ne 'IMAGE_STATE_GENERALIZE_RESEAL_TO_OOBE') { Write-Output $imageState.ImageState; Start-Sleep -s 10 } else { break } }" 48 | ] 49 | } 50 | ] 51 | } 52 | -------------------------------------------------------------------------------- /packfiles/Windows/azure-arm.windows_quickstart.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "subscription_id": "{{env `ARM_SUBSCRIPTION_ID`}}" 4 | }, 5 | "builders": [ 6 | { 7 | "type": "azure-arm", 8 | 9 | "subscription_id": "{{user `subscription_id`}}", 10 | 11 | "managed_image_resource_group_name": "packertest", 12 | "managed_image_name": "MyWindowsOSImage", 13 | 14 | "os_type": "Windows", 15 | "image_publisher": "MicrosoftWindowsServer", 16 | "image_offer": "WindowsServer", 17 | "image_sku": "2012-R2-Datacenter", 18 | 19 | "communicator": "winrm", 20 | "winrm_use_ssl": "true", 21 | "winrm_insecure": "true", 22 | "winrm_timeout": "3m", 23 | "winrm_username": "packer", 24 | 25 | "location": "South Central US", 26 | "vm_size": "Standard_DS2_v2" 27 | } 28 | ], 29 | "provisioners": [ 30 | { 31 | "type": "powershell", 32 | "inline": [ 33 | " # NOTE: the following *3* lines are only needed if the you have installed the Guest Agent.", 34 | " while ((Get-Service RdAgent).Status -ne 'Running') { Start-Sleep -s 5 }", 35 | " while ((Get-Service WindowsAzureTelemetryService).Status -ne 'Running') { Start-Sleep -s 5 }", 36 | " while ((Get-Service WindowsAzureGuestAgent).Status -ne 'Running') { Start-Sleep -s 5 }", 37 | 38 | "if( Test-Path $Env:SystemRoot\\windows\\system32\\Sysprep\\unattend.xml ){ rm $Env:SystemRoot\\windows\\system32\\Sysprep\\unattend.xml -Force}", 39 | "& $env:SystemRoot\\System32\\Sysprep\\Sysprep.exe /oobe /generalize /quiet /quit", 40 | "while($true) { $imageState = Get-ItemProperty HKLM:\\SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Setup\\State | Select ImageState; if($imageState.ImageState -ne 'IMAGE_STATE_GENERALIZE_RESEAL_TO_OOBE') { Write-Output $imageState.ImageState; Start-Sleep -s 10 } else { break } }" 41 | ] 42 | } 43 | ] 44 | } 45 | -------------------------------------------------------------------------------- /packfiles/Windows/bootstrap_win.txt: -------------------------------------------------------------------------------- 1 | 2 | # Set administrator password 3 | net user Administrator MyF@v0uriteG@me5 4 | wmic useraccount where "name='Administrator'" set PasswordExpires=FALSE 5 | 6 | # First, make sure WinRM can't be connected to 7 | netsh advfirewall firewall set rule name="Windows Remote Management (HTTP-In)" new enable=yes action=block 8 | 9 | # Delete any existing WinRM listeners 10 | winrm delete winrm/config/listener?Address=*+Transport=HTTP 2>$Null 11 | winrm delete winrm/config/listener?Address=*+Transport=HTTPS 2>$Null 12 | 13 | # Disable group policies which block basic authentication and unencrypted login 14 | 15 | Set-ItemProperty -Path HKLM:\Software\Policies\Microsoft\Windows\WinRM\Client -Name AllowBasic -Value 1 16 | Set-ItemProperty -Path HKLM:\Software\Policies\Microsoft\Windows\WinRM\Client -Name AllowUnencryptedTraffic -Value 1 17 | Set-ItemProperty -Path HKLM:\Software\Policies\Microsoft\Windows\WinRM\Service -Name AllowBasic -Value 1 18 | Set-ItemProperty -Path HKLM:\Software\Policies\Microsoft\Windows\WinRM\Service -Name AllowUnencryptedTraffic -Value 1 19 | 20 | 21 | # Create a new WinRM listener and configure 22 | winrm create winrm/config/listener?Address=*+Transport=HTTP 23 | winrm set winrm/config/winrs '@{MaxMemoryPerShellMB="0"}' 24 | winrm set winrm/config '@{MaxTimeoutms="7200000"}' 25 | winrm set winrm/config/service '@{AllowUnencrypted="true"}' 26 | winrm set winrm/config/service '@{MaxConcurrentOperationsPerUser="12000"}' 27 | winrm set winrm/config/service/auth '@{Basic="true"}' 28 | winrm set winrm/config/client/auth '@{Basic="true"}' 29 | 30 | # Configure UAC to allow privilege elevation in remote shells 31 | $Key = 'HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\Policies\System' 32 | $Setting = 'LocalAccountTokenFilterPolicy' 33 | Set-ItemProperty -Path $Key -Name $Setting -Value 1 -Force 34 | 35 | # Configure and restart the WinRM Service; Enable the required firewall exception 36 | Stop-Service -Name WinRM 37 | Set-Service -Name WinRM -StartupType Automatic 38 | netsh advfirewall firewall set rule name="Windows Remote Management (HTTP-In)" new action=allow localip=any remoteip=any 39 | Start-Service -Name WinRM 40 | 41 | -------------------------------------------------------------------------------- /packfiles/Windows/setup_winrm.txt: -------------------------------------------------------------------------------- 1 | 2 | # Set administrator password 3 | net user Administrator MyF@v0uriteG@me5 4 | wmic useraccount where "name='Administrator'" set PasswordExpires=FALSE 5 | 6 | # First, make sure WinRM can't be connected to 7 | netsh advfirewall firewall set rule name="Windows Remote Management (HTTP-In)" new enable=yes action=block 8 | 9 | # Delete any existing WinRM listeners 10 | winrm delete winrm/config/listener?Address=*+Transport=HTTP 2>$Null 11 | winrm delete winrm/config/listener?Address=*+Transport=HTTPS 2>$Null 12 | 13 | # Disable group policies which block basic authentication and unencrypted login 14 | 15 | Set-ItemProperty -Path HKLM:\Software\Policies\Microsoft\Windows\WinRM\Client -Name AllowBasic -Value 1 16 | Set-ItemProperty -Path HKLM:\Software\Policies\Microsoft\Windows\WinRM\Client -Name AllowUnencryptedTraffic -Value 1 17 | Set-ItemProperty -Path HKLM:\Software\Policies\Microsoft\Windows\WinRM\Service -Name AllowBasic -Value 1 18 | Set-ItemProperty -Path HKLM:\Software\Policies\Microsoft\Windows\WinRM\Service -Name AllowUnencryptedTraffic -Value 1 19 | 20 | 21 | # Create a new WinRM listener and configure 22 | winrm create winrm/config/listener?Address=*+Transport=HTTP 23 | winrm set winrm/config/winrs '@{MaxMemoryPerShellMB="0"}' 24 | winrm set winrm/config '@{MaxTimeoutms="7200000"}' 25 | winrm set winrm/config/service '@{AllowUnencrypted="true"}' 26 | winrm set winrm/config/service '@{MaxConcurrentOperationsPerUser="12000"}' 27 | winrm set winrm/config/service/auth '@{Basic="true"}' 28 | winrm set winrm/config/client/auth '@{Basic="true"}' 29 | 30 | # Configure UAC to allow privilege elevation in remote shells 31 | $Key = 'HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\Policies\System' 32 | $Setting = 'LocalAccountTokenFilterPolicy' 33 | Set-ItemProperty -Path $Key -Name $Setting -Value 1 -Force 34 | 35 | # Configure and restart the WinRM Service; Enable the required firewall exception 36 | Stop-Service -Name WinRM 37 | Set-Service -Name WinRM -StartupType Automatic 38 | netsh advfirewall firewall set rule name="Windows Remote Management (HTTP-In)" new action=allow localip=any remoteip=any 39 | Start-Service -Name WinRM 40 | 41 | -------------------------------------------------------------------------------- /packfiles/fix.ps1: -------------------------------------------------------------------------------- 1 | $packs=gci -filter *.json -recurse 2 | 3 | foreach($pack in $packs) 4 | { 5 | $json=packer fix -validate=true $($pack.FullName) 6 | $json|set-content $($pack.FullName) 7 | } 8 | -------------------------------------------------------------------------------- /packfiles/freebsd/azure-arm.freebsd.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "client_id": "{{env `ARM_CLIENT_ID`}}", 4 | "client_secret": "{{env `ARM_CLIENT_SECRET`}}", 5 | "subscription_id": "{{env `ARM_SUBSCRIPTION_ID`}}", 6 | "tenant_id": "{{env `ARM_TENANT_ID`}}", 7 | "ssh_user": "packer", 8 | "ssh_pass": "{{env `ARM_SSH_PASS`}}" 9 | }, 10 | "builders": [ 11 | { 12 | "type": "azure-arm", 13 | "client_id": "{{user `client_id`}}", 14 | "client_secret": "{{user `client_secret`}}", 15 | "subscription_id": "{{user `subscription_id`}}", 16 | "tenant_id": "{{ user `tenant_id` }}", 17 | "resource_group_name": "{{ user `resourcegroup` }}", 18 | "storage_account": "{{ user `storageaccount` }}", 19 | "capture_container_name": "images", 20 | "capture_name_prefix": "packer", 21 | "ssh_username": "{{user `ssh_user`}}", 22 | "ssh_password": "{{user `ssh_pass`}}", 23 | "os_type": "Linux", 24 | "image_publisher": "MicrosoftOSTC", 25 | "image_offer": "FreeBSD", 26 | "image_sku": "11.1", 27 | "azure_tags": { 28 | "dept": "engineering" 29 | }, 30 | "image_version": "latest", 31 | "location": "{{ user `location` }}", 32 | "vm_size": "{{ user `vm_size` }}" 33 | } 34 | ], 35 | "provisioners": [ 36 | { 37 | "execute_command": "chmod +x {{ .Path }}; {{ .Vars }} sudo -E sh '{{ .Path }}'", 38 | "inline": [ 39 | "env ASSUME_ALWAYS_YES=YES pkg bootstrap", 40 | "/usr/sbin/waagent -force -deprovision+user && export HISTSIZE=0 && sync" 41 | ], 42 | "inline_shebang": "/bin/sh -x", 43 | "type": "shell", 44 | "skip_clean": "true", 45 | "expect_disconnect": "true" 46 | } 47 | ] 48 | } 49 | -------------------------------------------------------------------------------- /packfiles/null/null.bubble.json: -------------------------------------------------------------------------------- 1 | { 2 | "builders": [ 3 | { 4 | "type": "null", 5 | "communicator": "none" 6 | } 7 | ], 8 | "provisioners": [ 9 | { 10 | "type": "comment", 11 | "comment": "Begin", 12 | "ui": true, 13 | "bubble_text": true 14 | }, 15 | { 16 | "type": "shell-local", 17 | "inline": ["echo \"This is a shell script\""] 18 | }, 19 | { 20 | "type": "comment", 21 | "comment": "In the middle of Provisioning run", 22 | "ui": true 23 | }, 24 | { 25 | "type": "shell-local", 26 | "inline": ["echo \"This is another shell script\""] 27 | }, 28 | { 29 | "type": "comment", 30 | "comment": "this comment is invisible and won't go to the UI" 31 | }, 32 | { 33 | "type": "comment", 34 | "comment": "End", 35 | "ui": true, 36 | "bubble_text": true 37 | } 38 | ] 39 | } 40 | -------------------------------------------------------------------------------- /packfiles/suse/azure-arm.suse.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "client_id": "{{env `ARM_CLIENT_ID`}}", 4 | "client_secret": "{{env `ARM_CLIENT_SECRET`}}", 5 | "subscription_id": "{{env `ARM_SUBSCRIPTION_ID`}}", 6 | "ssh_user": "packer", 7 | "ssh_pass": "{{env `ARM_SSH_PASS`}}" 8 | }, 9 | "builders": [ 10 | { 11 | "type": "azure-arm", 12 | 13 | "client_id": "{{user `client_id`}}", 14 | "client_secret": "{{user `client_secret`}}", 15 | "subscription_id": "{{user `subscription_id`}}", 16 | 17 | "managed_image_resource_group_name": "packertest", 18 | "managed_image_name": "MySuseOSImage", 19 | 20 | "ssh_username": "{{user `ssh_user`}}", 21 | "ssh_password": "{{user `ssh_pass`}}", 22 | 23 | "os_type": "Linux", 24 | "image_publisher": "SUSE", 25 | "image_offer": "SLES", 26 | "image_sku": "12-SP3", 27 | "ssh_pty": "true", 28 | 29 | "location": "South Central US", 30 | "vm_size": "Standard_DS2_v2" 31 | } 32 | ], 33 | "provisioners": [ 34 | { 35 | "execute_command": "echo '{{user `ssh_pass`}}' | {{ .Vars }} sudo -S -E sh '{{ .Path }}'", 36 | "inline": [ 37 | "zypper update -y", 38 | 39 | "/usr/sbin/waagent -force -deprovision+user && export HISTSIZE=0 && sync" 40 | ], 41 | "inline_shebang": "/bin/sh -x", 42 | "skip_clean": true, 43 | "type": "shell" 44 | } 45 | ] 46 | } 47 | -------------------------------------------------------------------------------- /provisioners/ansible/playbooks/aws-ssm.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | roles: 4 | - { role: dhoeric.aws-ssm } 5 | -------------------------------------------------------------------------------- /provisioners/ansible/playbooks/cassandra.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | become: yes 4 | become_user: root 5 | roles: 6 | - devoinc.openjdk 7 | - cloudwatch-metrics 8 | -------------------------------------------------------------------------------- /provisioners/ansible/playbooks/cloudwatch-albp.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | become: yes 4 | become_user: root 5 | vars: 6 | logs: 7 | - file: /var/log/messages 8 | format: "%b %d %H:%M:%S" 9 | group_name: /var/log/messages 10 | - file: /var/log/nginx/access.log 11 | group_name: /var/log/nginx/access.log 12 | - file: /var/log/nginx/error.log 13 | group_name: /var/log/nginx/error.log 14 | - file: /var/log/nginx/nginx-access.log 15 | group_name: /var/log/nginx/nginx-access.log 16 | awslogs_loglevel: info 17 | roles: 18 | - cloudwatch-logs 19 | -------------------------------------------------------------------------------- /provisioners/ansible/playbooks/cloudwatch-confluent.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | become: yes 4 | become_user: root 5 | vars: 6 | logs: 7 | - file: /var/log/messages 8 | format: "%b %d %H:%M:%S" 9 | group_name: /var/log/messages 10 | - file: /var/log/kafka/kafka-authorizer.log 11 | group_name: /var/log/kafka/kafka-authorizer.log 12 | - file: /var/log/kafka/kafka-request.log 13 | group_name: /var/log/kafka/kafka-request.log 14 | - file: /var/log/kafka/log-cleaner.log 15 | group_name: /var/log/kafka/log-cleaner.log 16 | - file: /var/log/kafka/state-change.log 17 | group_name: /var/log/kafka/state-change.log 18 | - file: /var/log/kafka/controller.log 19 | group_name: /var/log/kafka/controller.log 20 | - file: /var/log/amazon/ssm/errors.log 21 | group_name: /var/log/amazon/ssm/errors.log 22 | - file: /var/log/amazon/ssm/amazon-ssm-agent.log 23 | group_name: /var/log/amazon/ssm/amazon-ssm-agent.log 24 | - file: /var/log/amazon/ssm/hibernate.log 25 | group_name: /var/log/amazon/ssm/hibernate.log 26 | awslogs_loglevel: info 27 | roles: 28 | - cloudwatch-logs 29 | -------------------------------------------------------------------------------- /provisioners/ansible/playbooks/cloudwatch-metrics.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | become: yes 4 | become_user: root 5 | roles: 6 | - cloudwatch-metrics 7 | -------------------------------------------------------------------------------- /provisioners/ansible/playbooks/confluent-broker.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | become: yes 4 | become_user: root 5 | roles: 6 | - confluent.kafka-broker 7 | -------------------------------------------------------------------------------- /provisioners/ansible/playbooks/confluent-connect-distributed.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | become: yes 4 | become_user: root 5 | roles: 6 | - confluent.connect-distributed 7 | -------------------------------------------------------------------------------- /provisioners/ansible/playbooks/confluent-control-center.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | become: yes 4 | become_user: root 5 | roles: 6 | - confluent.control-center 7 | -------------------------------------------------------------------------------- /provisioners/ansible/playbooks/confluent-private-ssl.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | become: yes 4 | become_user: root 5 | roles: 6 | - confluent.ssl_CA 7 | -------------------------------------------------------------------------------- /provisioners/ansible/playbooks/confluent-schema.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | become: yes 4 | become_user: root 5 | roles: 6 | - confluent.schema-registry 7 | -------------------------------------------------------------------------------- /provisioners/ansible/playbooks/confluent-zookeeper.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | become: yes 4 | become_user: root 5 | roles: 6 | - confluent.zookeeper 7 | - stunnel 8 | -------------------------------------------------------------------------------- /provisioners/ansible/playbooks/confluent.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | become: yes 4 | become_user: root 5 | roles: 6 | - openjdk 7 | - confluent.common 8 | -------------------------------------------------------------------------------- /provisioners/ansible/playbooks/jenkins-master.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | become: yes 4 | become_user: root 5 | tasks: 6 | - name: install base softwares 7 | yum: state=present name={{ item }} 8 | with_items: 9 | - java 10 | - wget 11 | - vim 12 | - tcpdump 13 | - strace 14 | - git 15 | register: task_result 16 | until: task_result is success 17 | retries: 10 18 | delay: 2 19 | - name: Install Jenkins repo key 20 | rpm_key: state=present key=https://pkg.jenkins.io/redhat-stable/jenkins.io.key 21 | - name: Install Jenkins repo 22 | get_url: url=https://pkg.jenkins.io/redhat-stable/jenkins.repo dest=/etc/yum.repos.d/jenkins.repo owner=root group=root mode=0644 23 | - name: Install packages 24 | yum: name=jenkins state=installed 25 | register: task_result 26 | until: task_result is success 27 | retries: 10 28 | delay: 2 29 | - name: Set logs to auto start 30 | shell: chkconfig awslogs on 31 | vars: 32 | logs: 33 | - file: /var/log/messages 34 | format: "%b %d %H:%M:%S" 35 | group_name: /var/log/messages 36 | - file: /var/log/jenkins/jenkins.log 37 | group_name: /var/log/jenkins/jenkins.log 38 | - file: /var/log/amazon/ssm/errors.log 39 | group_name: /var/log/amazon/ssm/errors.log 40 | - file: /var/log/amazon/ssm/amazon-ssm-agent.log 41 | group_name: /var/log/amazon/ssm/amazon-ssm-agent.log 42 | - file: /var/log/amazon/ssm/hibernate.log 43 | group_name: /var/log/amazon/ssm/hibernate.log 44 | awslogs_loglevel: info 45 | roles: 46 | - cloudwatch-logs 47 | -------------------------------------------------------------------------------- /provisioners/ansible/playbooks/jmeter.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | become: yes 4 | become_user: root 5 | roles: 6 | - openjdk 7 | - jmeter 8 | -------------------------------------------------------------------------------- /provisioners/ansible/playbooks/mongodb.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # file: simple-playbook.yml 3 | 4 | - hosts: localhost 5 | 6 | roles: 7 | - mongodb 8 | 9 | vars: 10 | mongodb_version: 3.6.0 11 | -------------------------------------------------------------------------------- /provisioners/ansible/playbooks/ssl_CA.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | become: yes 4 | become_user: root 5 | roles: 6 | - ssl_CA 7 | -------------------------------------------------------------------------------- /provisioners/ansible/playbooks/zookeeper-node.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | become: yes 4 | become_user: root 5 | roles: 6 | - openjdk 7 | - zookeeper 8 | - stunnel 9 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/awslogs-agent/files/awslog.conf: -------------------------------------------------------------------------------- 1 | [general] 2 | state_file = /var/awslogs/state/agent-state 3 | 4 | [/var/log/syslog] 5 | file = /var/log/syslog 6 | log_group_name = /var/log/syslog 7 | log_stream_name = {instance_id} 8 | datetime_format = %b %d %H:%M:%S 9 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/awslogs-agent/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | #install awslog agent on RHEL 3 | - name: Download the awslogs-agent-setup.py script 4 | get_url: 5 | dest: /tmp/awslogs-agent-setup.py 6 | group: root 7 | owner: root 8 | mode: 0600 9 | url: https://s3.amazonaws.com/aws-cloudwatch/downloads/latest/awslogs-agent-setup.py 10 | 11 | - name: create awslog config file 12 | copy: 13 | src: awslog.conf 14 | dest: /tmp/awslog.conf 15 | mode: u=rw,g=r,o= 16 | owner: root 17 | group: root 18 | 19 | - name: Install the AWS CloudWatch Logs daemon 20 | shell: python /tmp/awslogs-agent-setup.py -n -r eu-west-2 -c /tmp/awslog.conf 21 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/cloudwatch-logs/README.md: -------------------------------------------------------------------------------- 1 | # Role Name 2 | 3 | Installs AWS CloudWatch Log Agent 4 | 5 | ## Requirements 6 | 7 | Requires ec2_metadata_facts. 8 | 9 | ## Role Variables 10 | 11 | `logs`, `extra_logs`: list of logs with the following keys: 12 | 13 | | Name | Description | Required | Default | 14 | | ----------- | -------------------------- | -------- | --------------- | 15 | | file | Full path to log file | Yes | 16 | | format | Datetime format | No | None | 17 | | group_name | CloudWatch Log Group | Yes | 18 | | stream_name | CloudWatch Log Stream Name | No | The instance id | 19 | 20 | `awslogs_loglevel`: maximal log level for the Log Agent's logs itself 21 | ("debug", "info", "warning", "error" or "critical"). If this parameter is 22 | not specified, no specific logging configuration will take place and the 23 | default level (info) will be used. This parameter is very basic and does not 24 | allow flexible logging configuration, its only goal is to change the amount 25 | of logs going into the log agent's own logfile. 26 | 27 | ## Dependencies 28 | 29 | This role has no dependencies. 30 | 31 | ## Example Playbook 32 | 33 | - hosts: servers 34 | vars: 35 | logs: 36 | - file: /var/log/auth.log 37 | format: "%b %d %H:%M:%S" 38 | group_name: "auth" 39 | stream_name: "auth-stream" 40 | - file: /home/ubuntu/.bash_history 41 | group_name: "bash_history" 42 | awslogs_loglevel: info 43 | roles: 44 | - { role: dharrisio.aws-cloudwatch-logs } 45 | 46 | ## License 47 | 48 | GPLv3 49 | 50 | ## Author Information 51 | 52 | Created by David Harris 53 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/cloudwatch-logs/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for ansible-aws-cloudwatch-logs-agent 3 | extra_logs: {} 4 | stream_name: "{instance_id}" 5 | aws_region: eu-west-2 6 | awslogs_loglevel: "" 7 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/cloudwatch-logs/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for ansible-aws-cloudwatch-logs-agent 3 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/cloudwatch-logs/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | author: David Harris 4 | description: Install and configure AWS CloudWatch Logs Agent 5 | company: Balihoo 6 | license: license (GPLv3) 7 | min_ansible_version: 1.2 8 | platforms: 9 | - name: EL 10 | versions: 11 | - 6 12 | - name: Amazon 13 | versions: 14 | - all 15 | - name: Ubuntu 16 | versions: 17 | - trusty 18 | 19 | categories: 20 | - cloud 21 | - cloud:ec2 22 | - monitoring 23 | dependencies: [] 24 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/cloudwatch-logs/tasks/Debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Download Install Script (Debian)." 3 | get_url: 4 | url: https://s3.amazonaws.com/aws-cloudwatch/downloads/latest/awslogs-agent-setup.py 5 | dest: /tmp/awslogs-agent-setup.py 6 | mode: 550 7 | 8 | - name: "Create /etc/awslogs (Debian)." 9 | file: 10 | path: /etc/awslogs 11 | state: directory 12 | mode: 755 13 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/cloudwatch-logs/tasks/DebianInstall.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Get ec2 facts (Debian)." 3 | action: ec2_facts 4 | 5 | - name: "Update Package Lists (Debian)." 6 | apt: 7 | update_cache: yes 8 | 9 | - name: "Install AWS CloudWatch Logs Agent (Debian)." 10 | shell: python /tmp/awslogs-agent-setup.py -n -r {{ ansible_ec2_placement_region }} -c /etc/awslogs/awslogs.conf 11 | 12 | - name: "Override /etc/logrotate.d/awslogs" 13 | template: 14 | src: etc/logrotate.d/awslogs_debian.j2 15 | dest: /etc/logrotate.d/awslogs 16 | owner: root 17 | group: root 18 | mode: 0644 19 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/cloudwatch-logs/tasks/RedHat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Install CloudWatch Log Agent (Amazon)" 3 | yum: 4 | name: awslogs 5 | state: present 6 | register: yum_result 7 | ignore_errors: true 8 | 9 | - block: 10 | - name: "Get ec2 facts (RedHat)." 11 | action: ec2_metadata_facts 12 | 13 | - name: "Download Install Script (RedHat)." 14 | get_url: 15 | url: https://s3.amazonaws.com/aws-cloudwatch/downloads/latest/awslogs-agent-setup.py 16 | dest: /tmp/awslogs-agent-setup.py 17 | mode: 550 18 | 19 | - name: "Create /etc/awslogs (RedHat)." 20 | file: 21 | path: /etc/awslogs 22 | state: directory 23 | mode: 755 24 | 25 | - name: "Configure Cloudwatch Log Agent." 26 | include: "conf.yml" 27 | 28 | - name: "Install AWS CloudWatch Logs Agent (RedHat)." 29 | shell: python /tmp/awslogs-agent-setup.py -n -r {{ ansible_ec2_placement_region }} -c /etc/awslogs/awslogs.conf 30 | args: 31 | creates: /etc/logrotate.d/awslogs 32 | 33 | - name: "Make symlink for /var/awslogs/etc/awslogs.conf" 34 | file: 35 | src: /etc/awslogs/awslogs.conf 36 | dest: /var/awslogs/etc/awslogs.conf 37 | state: link 38 | owner: root 39 | group: root 40 | mode: 0644 41 | force: true 42 | 43 | - name: "Override /etc/logrotate.d/awslogs" 44 | template: 45 | src: etc/logrotate.d/awslogs_redhat.j2 46 | dest: /etc/logrotate.d/awslogs 47 | owner: root 48 | group: root 49 | mode: 0644 50 | when: "yum_result.failed" 51 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/cloudwatch-logs/tasks/conf.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: gather EC2 facts 3 | ec2_metadata_facts: 4 | 5 | - name: "Make /var/awslogs/state/ directory" 6 | file: 7 | path: /var/awslogs/state/ 8 | state: directory 9 | mode: 755 10 | 11 | - name: "Configure AWS CloudWatch Logs Agent" 12 | template: 13 | src: etc/awslogs/awslogs.conf.j2 14 | dest: /etc/awslogs/awslogs.conf 15 | owner: root 16 | group: root 17 | mode: 0644 18 | 19 | - name: "Configure AWS CloudWatch Log Agent logging" 20 | template: 21 | src: etc/awslogs/awslogs.logging.conf.j2 22 | dest: /etc/awslogs/awslogs.logging.conf 23 | owner: root 24 | group: root 25 | mode: 0644 26 | when: awslogs_loglevel is defined 27 | 28 | - name: "Configure AWS CloudWatch Logs Agent - Region" 29 | template: 30 | src: etc/awslogs/awscli.conf.j2 31 | dest: /etc/awslogs/awscli.conf 32 | owner: root 33 | group: root 34 | mode: 0644 35 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/cloudwatch-logs/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Install RedHat/AMZN Linux Cloudwatch Log Agent." 3 | include: "RedHat.yml" 4 | when: ansible_os_family == "RedHat" 5 | 6 | - block: 7 | - name: "Download Debian/Ubuntu Cloudwatch Log Agent Install Script." 8 | include: "Debian.yml" 9 | 10 | - name: "Configure Cloudwatch Log Agent." 11 | include: "conf.yml" 12 | 13 | - name: "Install Debian/Ubuntu Cloudwatch Log Agent." 14 | include: "DebianInstall.yml" 15 | when: ansible_os_family == "Debian" 16 | 17 | - name: "Set region for Cloudwatch endpoint" 18 | template: 19 | src: templates/etc/aws.conf.j2 20 | dest: /var/awslogs/etc/aws.conf 21 | owner: root 22 | group: root 23 | mode: 0600 24 | 25 | - name: "Restart awslogs service." 26 | service: 27 | name: awslogs 28 | state: restarted 29 | enabled: yes 30 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/cloudwatch-logs/templates/etc/aws.conf.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | [plugins] 3 | cwlogs = cwlogs 4 | [default] 5 | region = {{ ansible_ec2_placement_region | default(aws_region) }} 6 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/cloudwatch-logs/templates/etc/awslogs/awscli.conf.j2: -------------------------------------------------------------------------------- 1 | [plugins] 2 | cwlogs = cwlogs 3 | [default] 4 | region = {{ ansible_ec2_placement_region | default(aws_region) }} 5 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/cloudwatch-logs/templates/etc/awslogs/awslogs.logging.conf.j2: -------------------------------------------------------------------------------- 1 | # 2 | # Based on the logging configuration example from AWS documentation 3 | # https://docs.aws.amazon.com/fr_fr/AmazonCloudWatch/latest/logs/AgentReference.html 4 | # 5 | [loggers] 6 | keys=root,cwlogs,reader,publisher,event,batch,stream,watcher 7 | 8 | [handlers] 9 | keys=consoleHandler 10 | 11 | [formatters] 12 | keys=simpleFormatter 13 | 14 | [logger_root] 15 | level=INFO 16 | handlers=consoleHandler 17 | 18 | [logger_cwlogs] 19 | level=INFO 20 | handlers=consoleHandler 21 | qualname=cwlogs.push 22 | propagate=0 23 | 24 | [logger_reader] 25 | level={{ awslogs_loglevel | upper }} 26 | handlers=consoleHandler 27 | qualname=cwlogs.push.reader 28 | propagate=0 29 | 30 | [logger_publisher] 31 | level={{ awslogs_loglevel | upper }} 32 | handlers=consoleHandler 33 | qualname=cwlogs.push.publisher 34 | propagate=0 35 | 36 | [logger_event] 37 | level={{ awslogs_loglevel | upper }} 38 | handlers=consoleHandler 39 | qualname=cwlogs.push.event 40 | propagate=0 41 | 42 | [logger_batch] 43 | level={{ awslogs_loglevel | upper }} 44 | handlers=consoleHandler 45 | qualname=cwlogs.push.batch 46 | propagate=0 47 | 48 | [logger_stream] 49 | level={{ awslogs_loglevel | upper }} 50 | handlers=consoleHandler 51 | qualname=cwlogs.push.stream 52 | propagate=0 53 | 54 | [logger_watcher] 55 | level={{ awslogs_loglevel | upper }} 56 | handlers=consoleHandler 57 | qualname=cwlogs.push.watcher 58 | propagate=0 59 | 60 | [handler_consoleHandler] 61 | class=logging.StreamHandler 62 | level={{ awslogs_loglevel | upper }} 63 | formatter=simpleFormatter 64 | args=(sys.stderr,) 65 | 66 | [formatter_simpleFormatter] 67 | format=%(asctime)s - %(name)s - %(levelname)s - %(process)d - %(threadName)s - %(message)s 68 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/cloudwatch-logs/templates/etc/logrotate.d/awslogs_debian.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | # Override of logrotate file https://s3.amazonaws.com/aws-cloudwatch/downloads/latest/awslogs-agent-setup.py 3 | 4 | /var/log/awslogs.log { 5 | su root root 6 | missingok 7 | notifempty 8 | size 100M 9 | create 0600 root root 10 | delaycompress 11 | compress 12 | rotate 4 13 | postrotate 14 | service awslogs restart > /dev/null 15 | endscript 16 | } 17 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/cloudwatch-logs/templates/etc/logrotate.d/awslogs_redhat.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | # Override of logrotate file https://s3.amazonaws.com/aws-cloudwatch/downloads/latest/awslogs-agent-setup.py 3 | 4 | /var/log/awslogs.log { 5 | su root root 6 | missingok 7 | notifempty 8 | size 100M 9 | create 0600 root root 10 | delaycompress 11 | compress 12 | rotate 4 13 | postrotate 14 | systemctl restart awslogs.service > /dev/null 15 | endscript 16 | } 17 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/cloudwatch-logs/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for ansible-aws-cloudwatch-logs-agent 3 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/cloudwatch-metrics/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/cloudwatch-metrics/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for ansible-aws-cloudwatch-metrics-agent 3 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/cloudwatch-metrics/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: "Create /tmp/cloudwatchmetrics" 2 | file: 3 | path: /tmp/cloudwatchmetrics 4 | state: directory 5 | mode: 755 6 | 7 | - name: Install Unzip 8 | yum: 9 | name: "unzip" 10 | state: present 11 | 12 | - name: Unarchive a file that needs to be downloaded 13 | unarchive: 14 | src: https://s3.amazonaws.com/amazoncloudwatch-agent/linux/amd64/latest/AmazonCloudWatchAgent.zip 15 | dest: /tmp/cloudwatchmetrics 16 | remote_src: yes 17 | 18 | - name: "Install AmazonCloudWatchAgent" 19 | shell: /tmp/cloudwatchmetrics/install.sh 20 | args: 21 | chdir: /tmp/cloudwatchmetrics/ 22 | 23 | - name: "Cpoy AWS CloudWatch metrics config" 24 | template: 25 | src: config.json 26 | dest: /opt/aws/amazon-cloudwatch-agent/bin/config.json 27 | owner: root 28 | group: root 29 | mode: 0644 30 | 31 | - name: "Install AWS CloudWatch metrics configuration" 32 | shell: /opt/aws/amazon-cloudwatch-agent/bin/amazon-cloudwatch-agent-ctl -a fetch-config -m ec2 -c file:/opt/aws/amazon-cloudwatch-agent/bin/config.json -s 33 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/cloudwatch-metrics/templates/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "metrics": { 3 | "append_dimensions": { 4 | "AutoScalingGroupName": "${aws:AutoScalingGroupName}", 5 | "ImageId": "${aws:ImageId}", 6 | "InstanceId": "${aws:InstanceId}", 7 | "InstanceType": "${aws:InstanceType}" 8 | }, 9 | "metrics_collected": { 10 | "cpu": { 11 | "measurement": [ 12 | "cpu_usage_idle", 13 | "cpu_usage_iowait", 14 | "cpu_usage_user", 15 | "cpu_usage_system" 16 | ], 17 | "metrics_collection_interval": 60, 18 | "totalcpu": false 19 | }, 20 | "disk": { 21 | "measurement": ["used_percent", "inodes_free"], 22 | "metrics_collection_interval": 60, 23 | "resources": ["*"] 24 | }, 25 | "diskio": { 26 | "measurement": ["io_time"], 27 | "metrics_collection_interval": 60, 28 | "resources": ["*"] 29 | }, 30 | "mem": { 31 | "measurement": ["mem_used_percent"], 32 | "metrics_collection_interval": 60 33 | }, 34 | "swap": { 35 | "measurement": ["swap_used_percent"], 36 | "metrics_collection_interval": 60 37 | } 38 | } 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/cloudwatch-metrics/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for ansible-aws-cloudwatch-logs-agent 3 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/confluent.common/tasks/install.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install ncat 3 | yum: 4 | name: nmap-ncat 5 | state: present 6 | - name: Install Confluent public key 7 | rpm_key: 8 | state: present 9 | key: https://packages.confluent.io/rpm/4.1/archive.key 10 | - name: Add Confluent repository (dist) 11 | yum_repository: 12 | name: Confluent.dist 13 | description: Confluent repository (dist) 14 | file: confluent.repo 15 | baseurl: https://packages.confluent.io/rpm/4.1/7 16 | gpgkey: https://packages.confluent.io/rpm/4.1/archive.key 17 | gpgcheck: yes 18 | enabled: yes 19 | - name: Add Confluent repository 20 | yum_repository: 21 | name: Confluent 22 | description: Confluent repository 23 | file: confluent.repo 24 | baseurl: https://packages.confluent.io/rpm/4.1 25 | gpgkey: https://packages.confluent.io/rpm/4.1/archive.key 26 | gpgcheck: yes 27 | enabled: yes 28 | - name: Install Confluent Enterprise 29 | yum: 30 | name: confluent-platform-2.11 31 | state: present 32 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/confluent.common/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include: install.yml 3 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/confluent.connect-distributed/defaults/main.yml: -------------------------------------------------------------------------------- 1 | security_mode: sasl_ssl 2 | kafka: 3 | connect: 4 | distributed: 5 | config_file: /etc/kafka/connect-distributed.properties 6 | service_name: connect 7 | user: cp-kafka-connect 8 | group: confluent 9 | systemd_config_file: /etc/systemd/system/connect.service 10 | systemd_env_file: /etc/kafka/connect-env.sh 11 | log4j_config_file: /etc/kafka/connect-log4j.properties 12 | start_class: /bin/connect-distributed 13 | jaas_config_file: /etc/kafka/connect_jaas.conf 14 | config: 15 | consumer.interceptor.classes: io.confluent.monitoring.clients.interceptor.MonitoringConsumerInterceptor 16 | producer.interceptor.classes: io.confluent.monitoring.clients.interceptor.MonitoringProducerInterceptor 17 | config.storage.replication.factor: 3 18 | config.storage.topic: connect-configs 19 | group.id: connect-cluster 20 | internal.key.converter: org.apache.kafka.connect.json.JsonConverter 21 | internal.key.converter.schemas.enable: false 22 | internal.value.converter: org.apache.kafka.connect.json.JsonConverter 23 | internal.value.converter.schemas.enable: false 24 | offset.flush.interval.ms: 10000 25 | offset.storage.replication.factor: 3 26 | offset.storage.topic: connect-offsets 27 | status.storage.replication.factor: 3 28 | status.storage.topic: connect-status 29 | key.converter: io.confluent.connect.avro.AvroConverter 30 | value.converter: io.confluent.connect.avro.AvroConverter 31 | plugin.path: /usr/share/java 32 | systemd: 33 | enabled: yes 34 | state: started 35 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/confluent.connect-distributed/handlers/main.yml: -------------------------------------------------------------------------------- 1 | - name: restart connect 2 | systemd: 3 | name: "{{ kafka.connect.distributed.service_name }}" 4 | state: restarted 5 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/confluent.connect-distributed/meta/main.yml: -------------------------------------------------------------------------------- 1 | dependencies: 2 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/confluent.connect-distributed/tasks/install.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install Kerberos library dependencies 3 | yum: 4 | name: krb5-workstation 5 | state: present 6 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/confluent.connect-distributed/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include: install.yml 3 | 4 | - include: configure.yml 5 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/confluent.connect-distributed/templates/connect-distributed.properties.j2: -------------------------------------------------------------------------------- 1 | # Maintained by Ansible 2 | 3 | {% for key, value in kafka.connect.distributed.config.items() %} 4 | {{key}}={{value}} 5 | {% endfor %} 6 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/confluent.connect-distributed/templates/connect-distributed_ssl.properties.j2: -------------------------------------------------------------------------------- 1 | 2 | #SSL additions 3 | ssl.truststore.location=/var/ssl/private/kafka.client.truststore.jks 4 | ssl.truststore.password=REPLACEME 5 | ssl.keystore.location=/var/ssl/private/kafka.client.keystore.jks 6 | ssl.keystore.password=REPLACEME 7 | ssl.key.password=REPLACEME 8 | security.protocol=SSL 9 | producer.ssl.truststore.location=/var/ssl/private/kafka.client.truststore.jks 10 | producer.ssl.truststore.password=REPLACEME 11 | producer.ssl.keystore.location=/var/ssl/private/kafka.client.keystore.jks 12 | producer.ssl.keystore.password=REPLACEME 13 | producer.ssl.key.password=REPLACEME 14 | producer.security.protocol=SSL 15 | consumer.ssl.truststore.location=/var/ssl/private/kafka.client.truststore.jks 16 | consumer.ssl.truststore.password=REPLACEME 17 | consumer.ssl.keystore.location=/var/ssl/private/kafka.client.keystore.jks 18 | consumer.ssl.keystore.password=REPLACEME 19 | consumer.ssl.key.password=REPLACEME 20 | consumer.security.protocol=SSL 21 | producer.confluent.monitoring.interceptor.ssl.truststore.location=/var/ssl/private/kafka.client.truststore.jks 22 | producer.confluent.monitoring.interceptor.ssl.truststore.password=REPLACEME 23 | producer.confluent.monitoring.interceptor.ssl.keystore.location=/var/ssl/private/kafka.client.keystore.jks 24 | producer.confluent.monitoring.interceptor.ssl.keystore.password=REPLACEME 25 | producer.confluent.monitoring.interceptor.ssl.key.password=REPLACEME 26 | producer.confluent.monitoring.interceptor.security.protocol=SSL 27 | consumer.confluent.monitoring.interceptor.ssl.truststore.location=/var/ssl/private/kafka.client.truststore.jks 28 | consumer.confluent.monitoring.interceptor.ssl.truststore.password=REPLACEME 29 | consumer.confluent.monitoring.interceptor.ssl.keystore.location=/var/ssl/private/kafka.client.keystore.jks 30 | consumer.confluent.monitoring.interceptor.ssl.keystore.password=REPLACEME 31 | consumer.confluent.monitoring.interceptor.ssl.key.password=REPLACEME 32 | consumer.confluent.monitoring.interceptor.security.protocol=SSL 33 | 34 | {% for key, value in kafka.connect.distributed.config.items() %} 35 | {{key}}={{value}} 36 | {% endfor %} 37 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/confluent.connect-distributed/templates/connect-env.j2: -------------------------------------------------------------------------------- 1 | export KAFKA_HEAP_OPTS="-Xms6g -Xmx6g -XX:MetaspaceSize=96m -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:G1HeapRegionSize=16M -XX:MinMetaspaceFreeRatio=50 -XX:MaxMetaspaceFreeRatio=80" 2 | export KAFKA_OPTS="-Djava.security.auth.login.config=/etc/kafka/connect_jaas.conf -Dsun.security.krb5.debug=false" 3 | export LOG_DIR=/var/log/connect 4 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/confluent.connect-distributed/templates/connect-log4j.properties.j2: -------------------------------------------------------------------------------- 1 | # Licensed to the Apache Software Foundation (ASF) under one or more 2 | # contributor license agreements. See the NOTICE file distributed with 3 | # this work for additional information regarding copyright ownership. 4 | # The ASF licenses this file to You under the Apache License, Version 2.0 5 | # (the "License"); you may not use this file except in compliance with 6 | # the License. You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | log4j.rootLogger=INFO, stdout, file 17 | 18 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 19 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 20 | log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c:%L)%n 21 | 22 | log4j.logger.org.apache.zookeeper=ERROR 23 | log4j.logger.org.I0Itec.zkclient=ERROR 24 | log4j.logger.org.reflections=ERROR 25 | 26 | log4j.appender.file=org.apache.log4j.RollingFileAppender 27 | log4j.appender.file.maxBackupIndex=10 28 | log4j.appender.file.maxFileSize=100MB 29 | log4j.appender.file.File=${kafka.logs.dir}/connect.log 30 | log4j.appender.file.layout=org.apache.log4j.PatternLayout 31 | log4j.appender.file.layout.ConversionPattern=[%d] %p %m (%c)%n 32 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/confluent.connect-distributed/templates/connect.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Connect distributed server 3 | Documentation=http://confluent.io 4 | Requires=network.target remote-fs.target 5 | After=network.target remote-fs.target 6 | 7 | [Service] 8 | Type=simple 9 | User=cp-kafka-connect 10 | Group=confluent 11 | ExecStart=/bin/connect-distributed /etc/kafka/connect-distributed.properties 12 | 13 | [Install] 14 | WantedBy=multi-user.target 15 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/confluent.connect-distributed/templates/connect_jaas.j2: -------------------------------------------------------------------------------- 1 | KafkaClient { 2 | com.sun.security.auth.module.Krb5LoginModule required 3 | useKeyTab=true 4 | keyTab="/etc/security/keytabs/connect.service.keytab" 5 | storeKey=true 6 | useTicketCache=false 7 | serviceName="kafka" 8 | principal="connect/REPLACEMEWITHSED@domain.name"; 9 | }; 10 | 11 | Client { 12 | com.sun.security.auth.module.Krb5LoginModule required 13 | useKeyTab=true 14 | keyTab="/etc/security/keytabs/connect.service.keytab" 15 | storeKey=true 16 | useTicketCache=false 17 | serviceName="kafka" 18 | principal="connect/REPLACEMEWITHSED@domain.name"; 19 | }; 20 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/confluent.connect-distributed/templates/krb5.conf.j2: -------------------------------------------------------------------------------- 1 | # Configuration snippets may be placed in this directory as well 2 | includedir /etc/krb5.conf.d/ 3 | 4 | [libdefaults] 5 | default_realm = domain.name 6 | clockskew = 300 7 | ticket_lifetime = 1d 8 | forwardable = true 9 | proxiable = true 10 | dns_lookup_realm = true 11 | dns_lookup_kdc = false 12 | 13 | [realms] 14 | domain.name = { 15 | kdc = 10.142.68.19:88 16 | kpasswd_server = 10.142.68.19:464 17 | } 18 | 19 | [domain_realm] 20 | domain.name.com = domain.name 21 | .domain.name.com = domain.name 22 | domain.name = domain.name 23 | .domain.name = domain.name 24 | 25 | [appdefaults] 26 | pam = { 27 | ticket_lifetime = 1d 28 | renew_lifetime = 1d 29 | forwardable = true 30 | proxiable = false 31 | retain_after_close = false 32 | minimum_uid = 0 33 | debug = false 34 | } 35 | 36 | [logging] 37 | default = FILE:/var/log/krb5libs.log 38 | kdc = FILE:/var/log/kdc.log 39 | admin_server = FILE:/var/log/kadmind.log 40 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/confluent.control-center/defaults/main.yml: -------------------------------------------------------------------------------- 1 | security_mode: ssl 2 | confluent: 3 | control: 4 | center: 5 | user: cp-control-center 6 | group: confluent 7 | config_file: /etc/confluent-control-center/control-center-production.properties 8 | service_name: confluent-control-center 9 | config: 10 | confluent.controlcenter.rest.port: 8443 11 | confluent.controlcenter.internal.topics.partitions: 4 12 | confluent.controlcenter.internal.topics.replication: 3 13 | confluent.controlcenter.command.topic.replication: 3 14 | confluent.monitoring.interceptor.topic.partitions: 4 15 | confluent.monitoring.interceptor.topic.replication: 3 16 | confluent.metrics.topic.partitions: 4 17 | confluent.metrics.topic.replication: 3 18 | confluent.controlcenter.streams.num.stream.threads: 8 19 | confluent.controlcenter.data.dir: /var/lib/confluent/control-center 20 | systemd: 21 | enabled: yes 22 | state: started 23 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/confluent.control-center/handlers/main.yml: -------------------------------------------------------------------------------- 1 | - name: restart control center 2 | systemd: 3 | name: "{{ confluent.control.center.service_name }}" 4 | state: restarted 5 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/confluent.control-center/meta/main.yml: -------------------------------------------------------------------------------- 1 | dependencies: 2 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/confluent.control-center/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: control center create group 2 | group: 3 | name: "{{ confluent.control.center.group }}" 4 | - name: control center create user 5 | user: 6 | name: "{{ confluent.control.center.user }}" 7 | comment: "Control Center User" 8 | system: yes 9 | group: "{{ confluent.control.center.group }}" 10 | - name: control center create config 11 | template: 12 | src: control-center.properties.j2 13 | dest: "{{ confluent.control.center.config_file }}" 14 | mode: 0640 15 | owner: "{{ confluent.control.center.user }}" 16 | group: "{{ confluent.control.center.group }}" 17 | when: security_mode == "plaintext" 18 | notify: 19 | - restart control center 20 | - name: control center create config 21 | template: 22 | src: control-center_ssl.properties.j2 23 | dest: "{{ confluent.control.center.config_file }}" 24 | mode: 0640 25 | owner: "{{ confluent.control.center.user }}" 26 | group: "{{ confluent.control.center.group }}" 27 | when: security_mode == "ssl" 28 | notify: 29 | - restart control center 30 | - name: control center create config 31 | template: 32 | src: control-center_sasl_ssl.properties.j2 33 | dest: "{{ confluent.control.center.config_file }}" 34 | mode: 0640 35 | owner: "{{ confluent.control.center.user }}" 36 | group: "{{ confluent.control.center.group }}" 37 | when: security_mode == "sasl_ssl" 38 | notify: 39 | - restart control center 40 | - name: control center configure service 41 | systemd: 42 | name: "{{ confluent.control.center.service_name }}" 43 | enabled: "{{ confluent.control.center.systemd.enabled }}" 44 | state: "{{ confluent.control.center.systemd.state }}" 45 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/confluent.control-center/templates/control-center.properties.j2: -------------------------------------------------------------------------------- 1 | # Maintained by Ansible 2 | 3 | {% for key, value in confluent.control.center.config.items() %} 4 | {{key}}={{value}} 5 | {% endfor %} 6 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/confluent.control-center/templates/control-center_sasl_ssl.properties.j2: -------------------------------------------------------------------------------- 1 | # Maintained by Ansible 2 | bootstrap.servers={% for host in groups['broker'] %}{% if loop.index > 1%},{% endif %}{{ host }}:{{broker.config.port}}{% endfor %} 3 | 4 | confluent.controlcenter.streams.security.protocol=SASL_SSL 5 | 6 | #SSL additions 7 | ssl.truststore.location=/var/ssl/private/kafka.client.truststore.jks 8 | ssl.truststore.password=REPLACEME 9 | ssl.keystore.location=/var/ssl/private/kafka.client.keystore.jks 10 | ssl.keystore.password=REPLACEME 11 | ssl.key.password=REPLACEME 12 | confluent.controlcenter.streams.ssl.truststore.location=/var/ssl/private/kafka.client.truststore.jks 13 | confluent.controlcenter.streams.ssl.truststore.password=REPLACEME 14 | confluent.controlcenter.streams.ssl.keystore.location=/var/ssl/private/kafka.client.keystore.jks 15 | confluent.controlcenter.streams.ssl.keystore.password=REPLACEME 16 | confluent.controlcenter.streams.ssl.key.password=REPLACEME 17 | confluent.controlcenter.rest.ssl.truststore.location=/var/ssl/private/kafka.client.truststore.jks 18 | confluent.controlcenter.rest.ssl.truststore.password=REPLACEME 19 | confluent.controlcenter.rest.ssl.keystore.location=/var/ssl/private/kafka.client.keystore.jks 20 | confluent.controlcenter.rest.ssl.keystore.password=REPLACEME 21 | confluent.controlcenter.rest.ssl.key.password=REPLACEME 22 | 23 | 24 | #SASL additions 25 | confluent.controlcenter.streams.sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required username="client" password="client-secret"; 26 | confluent.controlcenter.streams.security.protocol=SASL_SSL 27 | confluent.controlcenter.streams.sasl.mechanism=GSSAPI 28 | confluent.controlcenter.rest.security.protocol=SASL_SSL 29 | confluent.controlcenter.rest.sasl.mechanism=GSSAPI 30 | sasl.kerberos.service.name=kafka 31 | security.protocol=SASL_SSL 32 | sasl.mechanism=GSSAPI 33 | ssl.client.auth=true 34 | 35 | 36 | {% for key, value in confluent.control.center.config.items() %} 37 | {{key}}={{value}} 38 | {% endfor %} 39 | 40 | 41 | confluent.controlcenter.rest.listeners=https://ukdc1-oc-had403.domain.namepp.local:8443 42 | 43 | 44 | confluent.controlcenter.log.dir=/var/log/control 45 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/confluent.control-center/templates/control-center_ssl.properties.j2: -------------------------------------------------------------------------------- 1 | 2 | #SSL additions 3 | ssl.truststore.location=/var/ssl/private/kafka.client.truststore.jks 4 | ssl.truststore.password=REPLACEME 5 | ssl.keystore.location=/var/ssl/private/kafka.client.keystore.jks 6 | ssl.keystore.password=REPLACEME 7 | ssl.key.password=REPLACEME 8 | confluent.controlcenter.streams.ssl.truststore.location=/var/ssl/private/kafka.client.truststore.jks 9 | confluent.controlcenter.streams.ssl.truststore.password=REPLACEME 10 | confluent.controlcenter.streams.ssl.keystore.location=/var/ssl/private/kafka.client.keystore.jks 11 | confluent.controlcenter.streams.ssl.keystore.password=REPLACEME 12 | confluent.controlcenter.streams.ssl.key.password=REPLACEME 13 | confluent.controlcenter.rest.ssl.truststore.location=/var/ssl/private/kafka.client.truststore.jks 14 | confluent.controlcenter.rest.ssl.truststore.password=REPLACEME 15 | confluent.controlcenter.rest.ssl.keystore.location=/var/ssl/private/kafka.client.keystore.jks 16 | confluent.controlcenter.rest.ssl.keystore.password=REPLACEME 17 | confluent.controlcenter.rest.ssl.key.password=REPLACEME 18 | 19 | {% for key, value in confluent.control.center.config.items() %} 20 | {{key}}={{value}} 21 | {% endfor %} 22 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/confluent.kafka-broker/defaults/main.yml: -------------------------------------------------------------------------------- 1 | security_mode: sasl_ssl 2 | kafka: 3 | broker: 4 | user: cp-kafka 5 | group: confluent 6 | config_file: /etc/kafka/server.properties 7 | jaas_config_file: /etc/kafka/kafka_server_jaas.conf 8 | systemd_env_file: /etc/kafka/kafka-env.sh 9 | systemd_config_file: /etc/systemd/system/kafka.service 10 | start_class: /bin/kafka-server-start 11 | log4j_config_file: /etc/kafka/log4j.properties 12 | service_name: kafka 13 | datadir: 14 | - /var/lib/kafka/data 15 | systemd: 16 | enabled: yes 17 | state: started 18 | config: 19 | group.initial.rebalance.delay.ms: 0 20 | log.retention.check.interval.ms: 300000 21 | log.retention.hours: 168 22 | log.segment.bytes: 1073741824 23 | num.io.threads: 16 24 | num.network.threads: 8 25 | num.partitions: 1 26 | num.recovery.threads.per.data.dir: 2 27 | offsets.topic.replication.factor: 3 28 | socket.receive.buffer.bytes: 102400 29 | socket.request.max.bytes: 104857600 30 | socket.send.buffer.bytes: 102400 31 | transaction.state.log.min.isr: 2 32 | transaction.state.log.replication.factor: 3 33 | zookeeper.connection.timeout.ms: 6000 34 | min.insync.replicas: 2 35 | default.replication.factor: 3 36 | auto.create.topics.enable: "false" 37 | delete.topic.enable: "true" 38 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/confluent.kafka-broker/files/kafka-server-start: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Licensed to the Apache Software Foundation (ASF) under one or more 3 | # contributor license agreements. See the NOTICE file distributed with 4 | # this work for additional information regarding copyright ownership. 5 | # The ASF licenses this file to You under the Apache License, Version 2.0 6 | # (the "License"); you may not use this file except in compliance with 7 | # the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | if [ $# -lt 1 ]; 18 | then 19 | echo "USAGE: $0 [-daemon] server.properties [--override property=value]*" 20 | exit 1 21 | fi 22 | base_dir=$(dirname "$0") 23 | 24 | export KAFKACFGDIR=/etc/kafka 25 | 26 | if [ -f "${KAFKACFGDIR}/kafka-env.sh" ]; then 27 | . "${KAFKACFGDIR}/kafka-env.sh" 28 | fi 29 | 30 | if [ "x$KAFKA_LOG4J_OPTS" = "x" ]; then 31 | LOG4J_CONFIG_NORMAL_INSTALL="/etc/kafka/log4j.properties" 32 | LOG4J_CONFIG_ZIP_INSTALL="$base_dir/../etc/kafka/log4j.properties" 33 | if [ -e "$LOG4J_CONFIG_NORMAL_INSTALL" ]; then # Normal install layout 34 | KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:${LOG4J_CONFIG_NORMAL_INSTALL}" 35 | elif [ -e "${LOG4J_CONFIG_ZIP_INSTALL}" ]; then # Simple zip file layout 36 | KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:${LOG4J_CONFIG_ZIP_INSTALL}" 37 | else # Fallback to normal default 38 | KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/log4j.properties" 39 | fi 40 | fi 41 | export KAFKA_LOG4J_OPTS 42 | 43 | if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then 44 | export KAFKA_HEAP_OPTS="-Xmx1G -Xms1G" 45 | fi 46 | 47 | EXTRA_ARGS=${EXTRA_ARGS-'-name kafkaServer -loggc'} 48 | 49 | COMMAND=$1 50 | case $COMMAND in 51 | -daemon) 52 | EXTRA_ARGS="-daemon "$EXTRA_ARGS 53 | shift 54 | ;; 55 | *) 56 | ;; 57 | esac 58 | 59 | exec "$base_dir/kafka-run-class" "$EXTRA_ARGS" io.confluent.support.metrics.SupportedKafka "$@" 60 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/confluent.kafka-broker/handlers/main.yml: -------------------------------------------------------------------------------- 1 | - name: restart kafka 2 | systemd: 3 | name: "{{ kafka.broker.service_name }}" 4 | state: restarted 5 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/confluent.kafka-broker/meta/main.yml: -------------------------------------------------------------------------------- 1 | dependencies: 2 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/confluent.kafka-broker/tasks/install.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install Kerberos library dependencies 3 | yum: 4 | name: krb5-workstation 5 | state: present 6 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/confluent.kafka-broker/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include: install.yml 3 | 4 | - include: configure.yml 5 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/confluent.kafka-broker/templates/kafka-env.j2: -------------------------------------------------------------------------------- 1 | export KAFKA_HEAP_OPTS="-Xms6g -Xmx6g -XX:MetaspaceSize=96m -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:G1HeapRegionSize=16M -XX:MinMetaspaceFreeRatio=50 -XX:MaxMetaspaceFreeRatio=80" 2 | export KAFKA_OPTS="-Djava.security.auth.login.config=/etc/kafka/kafka_server_jaas.conf -Dsun.security.krb5.debug=false" 3 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/confluent.kafka-broker/templates/kafka.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Apache Kafka server (broker) 3 | Documentation=http://kafka.apache.org/documentation.html 4 | Requires=network.target remote-fs.target 5 | After=network.target remote-fs.target 6 | 7 | [Service] 8 | Type=simple 9 | User=cp-kafka 10 | Group=confluent 11 | ExecStart=/bin/kafka-server-start /etc/kafka/server.properties 12 | ExecStop=/bin/kafka-server-stop 13 | 14 | [Install] 15 | WantedBy=multi-user.target 16 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/confluent.kafka-broker/templates/kafka_server_jaas.j2: -------------------------------------------------------------------------------- 1 | KafkaServer { 2 | com.sun.security.auth.module.Krb5LoginModule required 3 | useKeyTab=true 4 | keyTab="/etc/security/keytabs/kafka.service.keytab" 5 | storeKey=true 6 | useTicketCache=false 7 | principal="kafka/REPLACEMEWITHSED@domain.name"; 8 | }; 9 | 10 | Client { 11 | com.sun.security.auth.module.Krb5LoginModule required 12 | useKeyTab=true 13 | keyTab="/etc/security/keytabs/kafka.service.keytab" 14 | storeKey=true 15 | useTicketCache=false 16 | principal="kafka/REPLACEMEWITHSED@domain.name"; 17 | }; 18 | 19 | KafkaClient { 20 | com.sun.security.auth.module.Krb5LoginModule required 21 | useKeyTab=true 22 | keyTab="/etc/security/keytabs/kafka.service.keytab" 23 | storeKey=true 24 | useTicketCache=false 25 | principal="kafka/REPLACEMEWITHSED@domain.name"; 26 | }; 27 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/confluent.kafka-broker/templates/krb5.conf.j2: -------------------------------------------------------------------------------- 1 | # Configuration snippets may be placed in this directory as well 2 | includedir /etc/krb5.conf.d/ 3 | 4 | [libdefaults] 5 | default_realm = domain.name 6 | clockskew = 300 7 | ticket_lifetime = 1d 8 | forwardable = true 9 | proxiable = true 10 | dns_lookup_realm = true 11 | dns_lookup_kdc = false 12 | 13 | [realms] 14 | domain.name = { 15 | kdc = 10.142.68.19:88 16 | kpasswd_server = 10.142.68.19:464 17 | } 18 | 19 | [domain_realm] 20 | domain.name.com = domain.name 21 | .domain.name.com = domain.name 22 | domain.name = domain.name 23 | .domain.name = domain.name 24 | 25 | [appdefaults] 26 | pam = { 27 | ticket_lifetime = 1d 28 | renew_lifetime = 1d 29 | forwardable = true 30 | proxiable = false 31 | retain_after_close = false 32 | minimum_uid = 0 33 | debug = false 34 | } 35 | 36 | [logging] 37 | default = FILE:/var/log/krb5libs.log 38 | kdc = FILE:/var/log/kdc.log 39 | admin_server = FILE:/var/log/kadmind.log 40 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/confluent.kafka-broker/templates/server.properties.j2: -------------------------------------------------------------------------------- 1 | # Maintained by Ansible 2 | log.dir={% for logdir in kafka.broker.datadir %}{% if loop.index > 1%},{% endif %}{{ logdir }}{% endfor %} 3 | 4 | {% for key, value in kafka.broker.config.items() %} 5 | {{key}}={{value}} 6 | {% endfor %} 7 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/confluent.kafka-broker/templates/server_sasl_ssl.properties.j2: -------------------------------------------------------------------------------- 1 | # Maintained by Ansible 2 | log.dir={% for logdir in kafka.broker.datadir %}{% if loop.index > 1%},{% endif %}{{ logdir }}{% endfor %} 3 | 4 | listeners=REPLICATOR://REPLACEMEWITHSED:9093,SASL_SSL://REPLACEMEWITHSED:6668 5 | listener.security.protocol.map=REPLICATOR:SSL,SASL_SSL:SASL_SSL 6 | 7 | confluent.support.metrics.enable=false 8 | ssl.truststore.location=/var/ssl/private/kafka.server.truststore.jks 9 | ssl.truststore.password=REPLACEME 10 | ssl.keystore.location=/var/ssl/private/kafka.server.keystore.jks 11 | ssl.keystore.password=REPLACEME 12 | ssl.key.password=REPLACEME 13 | security.inter.broker.protocol=SASL_SSL 14 | 15 | # List of enabled mechanisms, can be more than one 16 | sasl.enabled.mechanisms=GSSAPI 17 | 18 | # Specify one of of the SASL mechanisms 19 | sasl.mechanism.inter.broker.protocol=GSSAPI 20 | 21 | sasl.kerberos.service.name=kafka 22 | zookeeper.set.acl=true 23 | 24 | #authorizer.class.name=kafka.security.auth.SimpleAclAuthorizer 25 | super.users=User:kafka 26 | sasl.kerberos.principal.to.local.rules=RULE:[1:@/bin/sh](.*@domain.name)s/@.*//,DEFAULT 27 | 28 | ssl.enabled.protocols=TLSv1.2 29 | 30 | 31 | {% for key, value in kafka.broker.config.items() %} 32 | {{key}}={{value}} 33 | {% endfor %} 34 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/confluent.kafka-broker/templates/server_ssl.properties.j2: -------------------------------------------------------------------------------- 1 | # Maintained by Ansible 2 | log.dir={% for logdir in kafka.broker.datadir %}{% if loop.index > 1%},{% endif %}{{ logdir }}{% endfor %} 3 | 4 | # SSL Additions 5 | listeners=SSL://REPLACEMEWITHSED:9093 6 | security.inter.broker.protocol=SSL 7 | ssl.truststore.location=/var/ssl/private/kafka.server.truststore.jks 8 | ssl.truststore.password=REPLACEME 9 | ssl.keystore.location=/var/ssl/private/kafka.server.keystore.jks 10 | ssl.keystore.password=REPLACEME 11 | ssl.key.password=REPLACEME 12 | confluent.metrics.reporter.security.protocol=SSL 13 | confluent.metrics.reporter.ssl.truststore.location=/var/ssl/private/kafka.server.truststore.jks 14 | confluent.metrics.reporter.ssl.truststore.password=REPLACEME 15 | confluent.metrics.reporter.ssl.keystore.location=/var/ssl/private/kafka.server.keystore.jks 16 | confluent.metrics.reporter.ssl.keystore.password=REPLACEME 17 | confluent.metrics.reporter.ssl.key.password=REPLACEME 18 | 19 | {% for key, value in kafka.broker.config.items() %} 20 | {{key}}={{value}} 21 | {% endfor %} 22 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/confluent.schema-registry/defaults/main.yml: -------------------------------------------------------------------------------- 1 | security_mode: ssl 2 | schema: 3 | registry: 4 | user: cp-schema-registry 5 | group: confluent 6 | kerberos_kdc_url: 7 | config_file: /etc/schema-registry/schema-registry.properties 8 | jaas_config_file: /etc/kafka/schema_jaas.conf 9 | service_name: confluent-schema-registry 10 | config: 11 | kafkastore.topic: _schemas 12 | debug: false 13 | environment: 14 | SCHEMA_REGISTRY_HEAP_OPTS: "-Xmx1000M" 15 | SCHEMA_REGISTRY_OPTS: "-Djava.security.auth.login.config=/etc/kafka/schema_jaas.conf -Djavax.security.auth.useSubjectCredsOnly=false" 16 | systemd: 17 | enabled: yes 18 | state: started 19 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/confluent.schema-registry/handlers/main.yml: -------------------------------------------------------------------------------- 1 | - name: restart schema-registry 2 | systemd: 3 | name: "{{ schema.registry.service_name }}" 4 | state: restarted 5 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/confluent.schema-registry/meta/main.yml: -------------------------------------------------------------------------------- 1 | dependencies: 2 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/confluent.schema-registry/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install Kerberos library dependencies 3 | yum: 4 | name: krb5-workstation 5 | state: present 6 | 7 | - name: schema-registry create group 8 | group: 9 | name: "{{ schema.registry.group }}" 10 | 11 | - name: schema-registry create user 12 | user: 13 | name: "{{ schema.registry.user }}" 14 | comment: "Schema Registry User" 15 | system: yes 16 | group: "{{ schema.registry.group }}" 17 | 18 | #- name: update the krb5.conf config file 19 | # template: 20 | # src: krb5.conf.j2 21 | # dest: /etc/krb5.conf 22 | # owner: root 23 | # group: root 24 | # mode: 0600 25 | 26 | - name: schema-registry create config (plaintext) 27 | template: 28 | src: schema-registry.properties.j2 29 | dest: "{{ schema.registry.config_file }}" 30 | mode: 0640 31 | owner: "{{ schema.registry.user }}" 32 | group: "{{ schema.registry.group }}" 33 | when: security_mode == "plaintext" 34 | notify: 35 | - restart schema-registry 36 | 37 | - name: schema-registry create config (ssl) 38 | template: 39 | src: schema-registry_ssl.properties.j2 40 | dest: "{{ schema.registry.config_file }}" 41 | mode: 0640 42 | owner: "{{ schema.registry.user }}" 43 | group: "{{ schema.registry.group }}" 44 | when: security_mode == "ssl" 45 | notify: 46 | - restart schema-registry 47 | 48 | - name: schema-registry create config (sasl_sll) 49 | template: 50 | src: schema-registry_sasl_ssl.properties.j2 51 | dest: "{{ schema.registry.config_file }}" 52 | mode: 0640 53 | owner: "{{ schema.registry.user }}" 54 | group: "{{ schema.registry.group }}" 55 | when: security_mode == "sasl_ssl" 56 | notify: 57 | - restart schema-registry 58 | 59 | - name: schema sasl_ssl JAAS config 60 | template: 61 | src: schema_jaas.conf.j2 62 | dest: "{{ schema.registry.jaas_config_file }}" 63 | mode: 0640 64 | owner: "{{ schema.registry.user }}" 65 | group: "{{ schema.registry.group }}" 66 | when: security_mode == "sasl_ssl" 67 | notify: 68 | - restart schema-registry 69 | 70 | - name: schema-registry configure service 71 | systemd: 72 | name: "{{ schema.registry.service_name }}" 73 | enabled: "{{ schema.registry.systemd.enabled }}" 74 | state: "{{ schema.registry.systemd.state }}" 75 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/confluent.schema-registry/templates/krb5.conf.j2: -------------------------------------------------------------------------------- 1 | [libdefaults] 2 | default_realm = domain.name 3 | dns_lookup_realm = true 4 | dns_lookup_kdc = true 5 | ticket_lifetime = 10h 6 | renewal_lifetime = 3d 7 | 8 | forwardable = true 9 | udp_preference_limit = 1000000 10 | default_tkt_enctypes = des-cbc-md5 des-cbc-crc des3-cbc-sha1 11 | default_tgs_enctypes = des-cbc-md5 des-cbc-crc des3-cbc-sha1 12 | permitted_enctypes = des-cbc-md5 des-cbc-crc des3-cbc-sha1 13 | 14 | [realms] 15 | domain.name = { 16 | kdc = {{ schema.registry.kerberos_kdc_url }}:88 17 | admin_server = {{ schema.registry.kerberos_kdc_url }}:749 18 | default_domain = domain.name 19 | } 20 | 21 | [domain_realm] 22 | .domain.name = domain.name 23 | domain.name = domain.name 24 | 25 | [logging] 26 | kdc = FILE:/var/log/krb5kdc.log 27 | admin_server = FILE:/var/log/kadmin.log 28 | default = FILE:/var/log/krb5lib.log 29 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/confluent.schema-registry/templates/schema-registry.properties.j2: -------------------------------------------------------------------------------- 1 | # Maintained by Ansible 2 | 3 | listeners=http://0.0.0.0:8081 4 | {% for key, value in schema.registry.config.items() %} 5 | {{key}}={{value}} 6 | {% endfor %} 7 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/confluent.schema-registry/templates/schema-registry_sasl_ssl.properties.j2: -------------------------------------------------------------------------------- 1 | # Maintained by Ansible 2 | 3 | listeners=https://0.0.0.0:8082 4 | schema.registry.inter.instance.protocol=https 5 | 6 | #SSL additions 7 | kafkastore.ssl.truststore.location=/var/ssl/private/kafka.client.truststore.jks 8 | kafkastore.ssl.truststore.password=REPLACEME 9 | kafkastore.ssl.keystore.location=/var/ssl/private/kafka.client.keystore.jks 10 | kafkastore.ssl.keystore.password=REPLACEME 11 | kafkastore.ssl.key.password=REPLACEME 12 | 13 | #SASL additions 14 | kafkastore.sasl.mechanism=GSSAPI 15 | sasl.kerberos.service.name=kafka 16 | kafkastore.security.protocol=SASL_SSL 17 | security.protocol=SASL_SSL 18 | 19 | {% for key, value in schema.registry.config.items() %} 20 | {{key}}={{value}} 21 | {% endfor %} 22 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/confluent.schema-registry/templates/schema-registry_ssl.properties.j2: -------------------------------------------------------------------------------- 1 | 2 | #SSL additions 3 | listeners=https://0.0.0.0:8082 4 | schema.registry.inter.instance.protocol=https 5 | 6 | kafkastore.ssl.truststore.location=/var/ssl/private/kafka.client.truststore.jks 7 | kafkastore.ssl.truststore.password=REPLACEME 8 | kafkastore.ssl.keystore.location=/var/ssl/private/kafka.client.keystore.jks 9 | kafkastore.ssl.keystore.password=REPLACEME 10 | kafkastore.ssl.key.password=REPLACEME 11 | kafkastore.security.protocol=SSL 12 | 13 | {% for key, value in schema.registry.config.items() %} 14 | {{key}}={{value}} 15 | {% endfor %} 16 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/confluent.schema-registry/templates/schema_jaas.conf.j2: -------------------------------------------------------------------------------- 1 | Server { 2 | com.sun.security.auth.module.Krb5LoginModule required 3 | useKeyTab=true 4 | keyTab="/etc/security/keytabs/schema.service.keytab" 5 | storeKey=true 6 | useTicketCache=false 7 | principal="schema/REPLACEMEWITHSED@domain.name"; 8 | }; 9 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/confluent.ssl_CA/meta/main.yml: -------------------------------------------------------------------------------- 1 | dependencies: 2 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/confluent.ssl_CA/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: create ssl certificate directory 2 | file: 3 | path: /var/ssl/private 4 | state: directory 5 | mode: 0755 6 | - name: pull out cert gen script 7 | synchronize: 8 | src: scripts/ 9 | dest: /tmp/scripts 10 | delegate_to: localhost 11 | - name: Changing perm of certs-create, adding "+x" 12 | file: 13 | dest: /tmp/scripts/certs-create.sh 14 | mode: a+x 15 | register: out 16 | - name: create ssl certs 17 | command: /tmp/scripts/certs-create.sh 18 | args: 19 | chdir: /var/ssl/private 20 | run_once: true 21 | register: out 22 | - debug: var=out.stdout_lines 23 | - debug: var=out.stderr_lines 24 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/confluent.zookeeper/defaults/main.yml: -------------------------------------------------------------------------------- 1 | security_mode: sasl_ssl 2 | zookeeper: 3 | user: cp-kafka 4 | group: confluent 5 | config_file: /etc/kafka/zookeeper.properties 6 | log4j_config_file: /etc/kafka/zookeeper-log4j.properties 7 | jaas_config_file: /etc/kafka/zookeeper_jaas.conf 8 | systemd_env_file: /etc/kafka/zookeeper-env.sh 9 | systemd_config_file: /etc/systemd/system/zookeeper.service 10 | service_name: zookeeper.service 11 | start_class: /bin/zookeeper-server-start 12 | config: 13 | maxClientCnxns: 0 14 | autopurge.snapRetainCount: 10 15 | autopurge.purgeInterval: 1 16 | dataDir: /var/lib/zookeeper 17 | clientPort: 2181 18 | environment: 19 | KAFKA_HEAP_OPTS: "-Xmx1000M" 20 | LOG_DIR: /var/log/zookeeper 21 | systemd: 22 | enabled: yes 23 | state: started 24 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/confluent.zookeeper/files/zookeeper-server-start: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Licensed to the Apache Software Foundation (ASF) under one or more 3 | # contributor license agreements. See the NOTICE file distributed with 4 | # this work for additional information regarding copyright ownership. 5 | # The ASF licenses this file to You under the Apache License, Version 2.0 6 | # (the "License"); you may not use this file except in compliance with 7 | # the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | 18 | if [ $# -lt 1 ]; 19 | then 20 | echo "USAGE: $0 [-daemon] zookeeper.properties" 21 | exit 1 22 | fi 23 | base_dir=$(dirname "$0") 24 | 25 | export ZOOCFGDIR=/etc/kafka 26 | if [ -f "${ZOOCFGDIR}/zookeeper-env.sh" ]; then 27 | . "${ZOOCFGDIR}/zookeeper-env.sh" 28 | 29 | fi 30 | 31 | if [ "x$KAFKA_LOG4J_OPTS" = "x" ]; then 32 | LOG4J_CONFIG_NORMAL_INSTALL="/etc/kafka/log4j.properties" 33 | LOG4J_CONFIG_ZIP_INSTALL="$base_dir/../etc/kafka/log4j.properties" 34 | if [ -e "$LOG4J_CONFIG_NORMAL_INSTALL" ]; then # Normal install layout 35 | KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:${LOG4J_CONFIG_NORMAL_INSTALL}" 36 | elif [ -e "${LOG4J_CONFIG_ZIP_INSTALL}" ]; then # Simple zip file layout 37 | KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:${LOG4J_CONFIG_ZIP_INSTALL}" 38 | else # Fallback to normal default 39 | KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:$base_dir/../config/log4j.properties" 40 | fi 41 | fi 42 | export KAFKA_LOG4J_OPTS 43 | 44 | if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then 45 | export KAFKA_HEAP_OPTS="-Xmx512M -Xms512M" 46 | fi 47 | 48 | EXTRA_ARGS=${EXTRA_ARGS-'-name zookeeper -loggc'} 49 | 50 | COMMAND=$1 51 | case $COMMAND in 52 | -daemon) 53 | EXTRA_ARGS="-daemon "$EXTRA_ARGS 54 | shift 55 | ;; 56 | *) 57 | ;; 58 | esac 59 | 60 | exec "$base_dir/kafka-run-class" "$EXTRA_ARGS" org.apache.zookeeper.server.quorum.QuorumPeerMain "$@" 61 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/confluent.zookeeper/handlers/main.yml: -------------------------------------------------------------------------------- 1 | - name: restart zookeeper 2 | systemd: 3 | name: "{{ zookeeper.service_name }}" 4 | state: restarted 5 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/confluent.zookeeper/meta/main.yml: -------------------------------------------------------------------------------- 1 | dependencies: 2 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/confluent.zookeeper/templates/krb5.conf.j2: -------------------------------------------------------------------------------- 1 | # Configuration snippets may be placed in this directory as well 2 | includedir /etc/krb5.conf.d/ 3 | 4 | [libdefaults] 5 | default_realm = domain.name 6 | clockskew = 300 7 | ticket_lifetime = 1d 8 | forwardable = true 9 | proxiable = true 10 | dns_lookup_realm = true 11 | dns_lookup_kdc = false 12 | 13 | [realms] 14 | domain.name = { 15 | kdc = 10.142.68.19:88 16 | kpasswd_server = 10.142.68.19:464 17 | } 18 | 19 | [domain_realm] 20 | domain.name.com = domain.name 21 | .domain.name.com = domain.name 22 | domain.name = domain.name 23 | .domain.name = domain.name 24 | 25 | [appdefaults] 26 | pam = { 27 | ticket_lifetime = 1d 28 | renew_lifetime = 1d 29 | forwardable = true 30 | proxiable = false 31 | retain_after_close = false 32 | minimum_uid = 0 33 | debug = false 34 | } 35 | 36 | [logging] 37 | default = FILE:/var/log/krb5libs.log 38 | kdc = FILE:/var/log/kdc.log 39 | admin_server = FILE:/var/log/kadmind.log 40 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/confluent.zookeeper/templates/zookeeper.env.j2: -------------------------------------------------------------------------------- 1 | export KAFKA_OPTS="-Djava.security.auth.login.config={{ zookeeper.jaas_config_file }}" 2 | export KAFKA_LOG4J_OPTS="-Dlog4j.configuration=file:/etc/kafka/zookeeper-log4j.properties" 3 | export KAFKA_HEAP_OPTS="-Xmx1024m -Xms1024m" 4 | export LOG_DIR=/var/log/zookeeper 5 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/confluent.zookeeper/templates/zookeeper.properties.j2: -------------------------------------------------------------------------------- 1 | # Maintained by Ansible 2 | {% for key, value in zookeeper.config.items() %} 3 | {{key}}={{value}} 4 | {% endfor %} 5 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/confluent.zookeeper/templates/zookeeper.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Apache Zookeeper server 3 | Documentation=http://zookeeper.apache.org 4 | Requires=network.target remote-fs.target 5 | After=network.target remote-fs.target 6 | 7 | [Service] 8 | Type=simple 9 | User=cp-kafka 10 | Group=confluent 11 | ExecStart=/bin/zookeeper-server-start /etc/kafka/zookeeper.properties 12 | ExecStop=/bin/zookeeper-server-stop 13 | 14 | [Install] 15 | WantedBy=multi-user.target 16 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/confluent.zookeeper/templates/zookeeper_jaas.conf.j2: -------------------------------------------------------------------------------- 1 | Server { 2 | com.sun.security.auth.module.Krb5LoginModule required 3 | useKeyTab=true 4 | keyTab="/etc/security/keytabs/zookeeper.service.keytab" 5 | storeKey=true 6 | useTicketCache=false 7 | principal="zookeeper/REPLACEMEWITHSED@domain.name"; 8 | }; 9 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/confluent.zookeeper/templates/zookeeper_sasl.properties.j2: -------------------------------------------------------------------------------- 1 | # Maintained by Ansible 2 | 3 | #SASL Additions 4 | sasl.kerberos.service.name=zookeeper 5 | zookeeper.set.acl=true 6 | authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider 7 | requireClientAuthScheme=sasl 8 | 9 | jaasLoginRenew=3600000 10 | kerberos.removeHostFromPrincipal=true 11 | kerberos.removeRealmFromPrincipal=true 12 | 13 | 14 | {% for key, value in zookeeper.config.items() %} 15 | {{key}}={{value}} 16 | {% endfor %} 17 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/debugging_tools/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | network_debug_tools: no 3 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/debugging_tools/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include: network.yml 3 | when: network_debug_tools 4 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/debugging_tools/tasks/network.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install tools needed to debug network issues 3 | yum: state=present name={{ item }} 4 | with_items: 5 | - strace 6 | - make 7 | - gcc 8 | - tcpdump 9 | 10 | - name: Download and extract proxychains source 11 | unarchive: 12 | src: https://github.com/rofl0r/proxychains-ng/releases/download/v4.12/proxychains-ng-4.12.tar.xz 13 | dest: /opt 14 | remote_src: yes 15 | list_files: yes 16 | register: proxychains_src 17 | 18 | - name: Configure proxychains 19 | command: ./configure 20 | args: 21 | chdir: /opt/{{ proxychains_src.files[0].split('/')[0] }} 22 | 23 | - name: Build proxychains 24 | make: 25 | chdir: /opt/{{ proxychains_src.files[0].split('/')[0] }} 26 | 27 | - name: Install proxychains 28 | make: 29 | target: install 30 | chdir: /opt/{{ proxychains_src.files[0].split('/')[0] }} 31 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/devoinc.openjdk/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # OpenJDK version 3 | java__version: 8 4 | 5 | # If the APT configuration is not detected, the role will install the default 6 | # Java packages for a given OS release. 7 | java__install: "{{ True 8 | if (ansible_local|d() and ansible_local.apt|d() and 9 | ansible_local.apt.configured|d()) 10 | else False }}" 11 | 12 | # By default the role installs only the Java Runtime Environment (JRE) 13 | # packages. Other Ansible roles can request installation of the compatible Java 14 | # Development Kit (JDK) by enabling this variable. 15 | java__install_jdk: false 16 | 17 | # 18 | # List of default APT packages which should be installed for Java Runtime 19 | # Environment. 20 | java__base_packages: '{{ ([ "openjdk-" + java__version|string + "-jre-headless" 21 | ] if java__install|bool else [ "default-jre-headless" ]) 22 | + [ "ca-certificates-java" ] }}' 23 | 24 | # List of default APT packages which should be installed for Java Development 25 | # Kit. 26 | java__jdk_packages: '{{ ([ "openjdk-" + java__version|string + "-jdk-headless" ] 27 | if java__install|bool 28 | else ([ "default-jdk" ] 29 | if (ansible_distribution_release in 30 | [ "wheezy", "jessie", "precise", "trusty" ]) 31 | else [ "default-jdk-headless" ])) 32 | if java__install_jdk|bool else [] }}' 33 | 34 | # List of packages which should be installed on all hosts in Ansible 35 | # inventory. 36 | java__packages: [] 37 | 38 | # List of packages which should be installed on a group of hosts in Ansible 39 | # inventory. 40 | java__group_packages: [] 41 | 42 | # List of packages which should be installed on specific hosts in Ansible 43 | # inventory. 44 | java__host_packages: [] 45 | 46 | # List of packages requested by other Ansible roles. 47 | java__dependent_packages: [] 48 | 49 | # Set java alternatives. 50 | java__alternatives: "" 51 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/devoinc.openjdk/meta/.galaxy_install_info: -------------------------------------------------------------------------------- 1 | {install_date: 'Tue Nov 26 15:04:20 2019', version: v1.1.1} 2 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/devoinc.openjdk/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | role_name: openjdk 4 | author: "Fran Rodríguez" 5 | description: "Manage Java OpenJRE/OpenJDK environment" 6 | company: "DEVO Inc." 7 | license: "GPL-3.0" 8 | min_ansible_version: "2.0.0" 9 | platforms: 10 | - name: Ubuntu 11 | versions: 12 | - precise 13 | - quantal 14 | - raring 15 | - saucy 16 | - trusty 17 | - name: Debian 18 | versions: 19 | - wheezy 20 | - jessie 21 | galaxy_tags: 22 | - java 23 | - development 24 | - jre 25 | - jdk 26 | - openjdk 27 | - openjre 28 | 29 | dependencies: [] 30 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/devoinc.openjdk/molecule/default/Dockerfile.j2: -------------------------------------------------------------------------------- 1 | # Molecule managed 2 | 3 | {% if item.registry is defined %} 4 | FROM {{ item.registry.url }}/{{ item.image }} 5 | {% else %} 6 | FROM {{ item.image }} 7 | {% endif %} 8 | 9 | RUN if [ $(command -v apt-get) ]; then apt-get update && apt-get install -y python sudo bash ca-certificates && apt-get clean; \ 10 | elif [ $(command -v dnf) ]; then dnf makecache && dnf --assumeyes install python sudo python-devel python2-dnf bash && dnf clean all; \ 11 | elif [ $(command -v yum) ]; then yum makecache fast && yum install -y python sudo yum-plugin-ovl bash && sed -i 's/plugins=0/plugins=1/g' /etc/yum.conf && yum clean all; \ 12 | elif [ $(command -v zypper) ]; then zypper refresh && zypper install -y python sudo bash python-xml && zypper clean -a; \ 13 | elif [ $(command -v apk) ]; then apk update && apk add --no-cache python sudo bash ca-certificates; \ 14 | elif [ $(command -v xbps-install) ]; then xbps-install -Syu && xbps-install -y python sudo bash ca-certificates && xbps-remove -O; fi 15 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/devoinc.openjdk/molecule/default/INSTALL.rst: -------------------------------------------------------------------------------- 1 | ******* 2 | Docker driver installation guide 3 | ******* 4 | 5 | Requirements 6 | ============ 7 | 8 | * General molecule dependencies (see https://molecule.readthedocs.io/en/latest/installation.html) 9 | * Docker Engine 10 | * docker-py 11 | * docker 12 | 13 | Install 14 | ======= 15 | 16 | $ sudo pip install docker-py 17 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/devoinc.openjdk/molecule/default/molecule.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependency: 3 | name: galaxy 4 | driver: 5 | name: docker 6 | lint: 7 | name: yamllint 8 | platforms: 9 | - name: centos-7 10 | image: centos:7 11 | - name: ubuntu-16.04 12 | image: ubuntu:16.04 13 | - name: ubuntu-18.04 14 | image: ubuntu:18.04 15 | provisioner: 16 | name: ansible 17 | lint: 18 | name: ansible-lint 19 | scenario: 20 | name: default 21 | verifier: 22 | name: testinfra 23 | lint: 24 | name: flake8 25 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/devoinc.openjdk/molecule/default/playbook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Converge 3 | hosts: all 4 | become: true 5 | 6 | pre_tasks: 7 | - name: Update apt cache. 8 | apt: 9 | update_cache: true 10 | cache_valid_time: 600 11 | when: ansible_os_family == 'Debian' 12 | changed_when: false 13 | 14 | roles: 15 | - role: ansible-openjdk 16 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/devoinc.openjdk/molecule/default/tests/test_default.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import testinfra.utils.ansible_runner 4 | 5 | testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( 6 | os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') 7 | 8 | 9 | def test_hosts_file(host): 10 | f = host.file('/etc/hosts') 11 | 12 | assert f.exists 13 | assert f.user == 'root' 14 | assert f.group == 'root' 15 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/devoinc.openjdk/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Include version-specific variables for CentOS/RHEL. 3 | include_vars: "RedHat-java-{{ java__version }}.yml" 4 | when: ansible_distribution == 'CentOS' or 5 | ansible_distribution == 'Red Hat Enterprise Linux' or 6 | ansible_distribution == 'RedHat' 7 | 8 | - name: Install Java packages 9 | package: 10 | name: "{{ item }}" 11 | state: "present" 12 | with_flattened: 13 | - "{{ java__base_packages }}" 14 | - "{{ java__jdk_packages }}" 15 | - "{{ java__packages }}" 16 | - "{{ java__group_packages }}" 17 | - "{{ java__host_packages }}" 18 | - "{{ java__dependent_packages }}" 19 | 20 | - name: Update Java alternatives 21 | command: "update-java-alternatives -s {{ java__alternatives }}" 22 | when: java__alternatives|d() 23 | 24 | - name: Create environment var file 25 | copy: 26 | dest: "/etc/profile.d/java.sh" 27 | content: | 28 | JAVA_HOME=$(dirname $(dirname $(readlink -f $(which java)))) 29 | export JAVA_HOME 30 | 31 | - name: Make sure that Ansible local facts directory exists 32 | file: 33 | path: "/etc/ansible/facts.d" 34 | state: "directory" 35 | owner: "root" 36 | group: "root" 37 | mode: "0755" 38 | 39 | - name: Save Java local facts 40 | template: 41 | src: "etc/ansible/facts.d/java.fact.j2" 42 | dest: "/etc/ansible/facts.d/java.fact" 43 | owner: "root" 44 | group: "root" 45 | mode: "0755" 46 | register: java__register_facts 47 | 48 | - name: Update Ansible facts if they were modified 49 | action: setup 50 | when: java__register_facts is changed 51 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/devoinc.openjdk/templates/etc/ansible/facts.d/java.fact.j2: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # {{ ansible_managed }} 4 | 5 | from __future__ import print_function 6 | from json import dumps 7 | from sys import exit 8 | import subprocess 9 | import signal 10 | import os 11 | 12 | output = {"installed": True} 13 | 14 | try: 15 | with open(os.devnull, 'w') as devnull: 16 | java_stdout = subprocess.check_output( 17 | ["/usr/bin/java -version 2>&1"], 18 | shell=True) 19 | 20 | except subprocess.CalledProcessError: 21 | pass 22 | 23 | if java_stdout: 24 | for line in java_stdout.split('\n'): 25 | if 'version' in line: 26 | output['version'] = line.split()[2].strip('"').split('_')[0] 27 | 28 | print(dumps(output, sort_keys=True, indent=2)) 29 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/devoinc.openjdk/vars/RedHat-java-11.yml: -------------------------------------------------------------------------------- 1 | --- 2 | java__base_packages: 3 | - java-{{ java__version|string }}-openjdk-headless 4 | java__jdk_packages: [] 5 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/devoinc.openjdk/vars/RedHat-java-8.yml: -------------------------------------------------------------------------------- 1 | --- 2 | java__base_packages: 3 | - java-1.{{ java__version|string }}.0-openjdk-headless 4 | java__jdk_packages: [] 5 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/dhoeric.aws-ssm/.travis.yml: -------------------------------------------------------------------------------- 1 | --- 2 | sudo: required 3 | 4 | dist: trusty 5 | language: python 6 | python: "2.7" 7 | services: 8 | - docker 9 | 10 | env: 11 | - distro: centos7 12 | init: /usr/lib/systemd/systemd 13 | run_opts: "--privileged --volume=/sys/fs/cgroup:/sys/fs/cgroup:ro" 14 | - distro: centos6 15 | init: /sbin/init 16 | run_opts: "" 17 | - distro: ubuntu1604 18 | init: /lib/systemd/systemd 19 | run_opts: "--privileged --volume=/sys/fs/cgroup:/sys/fs/cgroup:ro" 20 | - distro: ubuntu1404 21 | init: /sbin/init 22 | run_opts: "" 23 | 24 | before_install: 25 | # Pull container from Docker Hub. 26 | - "docker pull geerlingguy/docker-${distro}-ansible:latest" 27 | 28 | script: 29 | # Create a random file to store the container ID. 30 | - container_id=$(mktemp) 31 | 32 | # Run container detached, with our role mounted inside. 33 | - 'docker run --detach --volume="${PWD}":/etc/ansible/roles/role_under_test:ro ${run_opts} geerlingguy/docker-${distro}-ansible:latest "${init}" > "${container_id}"' 34 | 35 | # Ansible syntax check. 36 | - 'docker exec --tty "$(cat ${container_id})" env TERM=xterm ansible-playbook /etc/ansible/roles/role_under_test/tests/test.yml --syntax-check' 37 | 38 | # Test role. 39 | - 'docker exec --tty "$(cat ${container_id})" env TERM=xterm ansible-playbook /etc/ansible/roles/role_under_test/tests/test.yml -vvv' 40 | 41 | notifications: 42 | webhooks: https://galaxy.ansible.com/api/v1/notifications/ 43 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/dhoeric.aws-ssm/README.md: -------------------------------------------------------------------------------- 1 | dhoeric.aws-ssm 2 | ========= 3 | 4 | [![Build Status](https://travis-ci.org/dhoeric/ansible-aws-ssm.svg?branch=master)](https://travis-ci.org/dhoeric/ansible-aws-ssm) 5 | [![Ansible Role](https://img.shields.io/ansible/role/17714.svg)](https://galaxy.ansible.com/dhoeric/aws-ssm/) 6 | [![Ansible Role](https://img.shields.io/ansible/role/d/17714.svg)](https://galaxy.ansible.com/dhoeric/aws-ssm/) 7 | 8 | Install AWS EC2 Systems Manager (SSM) agent 9 | 10 | 11 | 12 | Requirements 13 | ------------ 14 | 15 | None 16 | 17 | Role Variables 18 | -------------- 19 | 20 | Available variables are listed below, along with default values: 21 | 22 | ```yaml 23 | # The defaults provided by this role are specific to each distribution. 24 | url: 'amd64' 25 | ``` 26 | 27 | For installation in [Raspbian](https://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-manual-agent-install.html#agent-install-raspbianjessie), please find the activation code and id before using this role 28 | 29 | ```yaml 30 | url: 'arm' 31 | aws_ssm_activation_code: 32 | aws_ssm_activation_id: 33 | aws_ssm_ec2_region: "{{ec2_region}}" 34 | ``` 35 | 36 | Dependencies 37 | ------------ 38 | 39 | None 40 | 41 | Example Playbook 42 | ---------------- 43 | 44 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 45 | 46 | ```yaml 47 | - hosts: servers 48 | roles: 49 | - { role: dhoeric.aws-ssm } 50 | ``` 51 | 52 | License 53 | ------- 54 | 55 | MIT 56 | 57 | Author Information 58 | ------------------ 59 | 60 | 61 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/dhoeric.aws-ssm/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for aws-ssm 3 | url: "amd64" 4 | #aws_ssm_activation_code: 5 | #aws_ssm_activation_id: 6 | #aws_ssm_ec2_region: "{{ec2_region}}" 7 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/dhoeric.aws-ssm/files/policy.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/dhoeric.aws-ssm/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart amazon-ssm-agent 3 | service: 4 | name: amazon-ssm-agent 5 | state: restarted 6 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/dhoeric.aws-ssm/meta/.galaxy_install_info: -------------------------------------------------------------------------------- 1 | {install_date: 'Tue Nov 26 13:53:22 2019', version: v1.0.1} 2 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/dhoeric.aws-ssm/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks file for aws-ssm 3 | - name: Get CPU architecture 4 | command: getconf LONG_BIT 5 | register: cpu_arch 6 | changed_when: False 7 | check_mode: no 8 | 9 | - name: Change URL destination for 32bit arch 10 | set_fact: 11 | url: "386" 12 | when: cpu_arch.stdout == '32' 13 | 14 | - name: Install rpm file for Redhat Family (Amazon Linux, RHEL, and CentOS) 32/64-bit 15 | become: yes 16 | become_user: root 17 | yum: 18 | name: "https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_{{ url }}/amazon-ssm-agent.rpm" 19 | state: present 20 | when: ansible_os_family == 'RedHat' 21 | 22 | - name: Install deb file for Debian family 32/64-bit 23 | become: yes 24 | become_user: root 25 | apt: 26 | deb: "https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/debian_{{ url }}/amazon-ssm-agent.deb" 27 | when: ansible_os_family == 'Debian' 28 | 29 | - include: register.yml 30 | when: aws_ssm_activation_code is defined and aws_ssm_activation_id is defined 31 | 32 | - name: Register to service 33 | become: yes 34 | become_user: root 35 | service: 36 | name: amazon-ssm-agent 37 | enabled: yes 38 | state: started 39 | when: ansible_os_family == 'Debian' 40 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/dhoeric.aws-ssm/tasks/register.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check if node is registered 3 | stat: 4 | path: /var/lib/amazon/ssm/registration 5 | register: stat_result 6 | 7 | - name: Register managed instance 8 | command: amazon-ssm-agent -register -clear -code '{{aws_ssm_activation_code}}' -id '{{aws_ssm_activation_id}}' -region '{{aws_ssm_ec2_region}}' -y 9 | notify: restart amazon-ssm-agent 10 | when: not stat_result.stat.exists 11 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/dhoeric.aws-ssm/tests/inventory: -------------------------------------------------------------------------------- 1 | localhost 2 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/dhoeric.aws-ssm/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | remote_user: root 4 | roles: 5 | - role_under_test 6 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/dhoeric.aws-ssm/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for aws-ssm 3 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/jmeter/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Download JMeter installer 3 | get_url: 4 | url: http://mirrors.ukfast.co.uk/sites/ftp.apache.org//jmeter/source/apache-jmeter-5.0_src.tgz 5 | dest: /tmp/apache-jmeter.tgz 6 | mode: 0644 7 | 8 | - name: Extract .tgz file into directory 9 | unarchive: 10 | src: /tmp/apache-jmeter.tgz 11 | dest: / 12 | 13 | - name: Remove Jmeter installer 14 | file: 15 | state: absent 16 | path: /tmp/apache-jmeter.tgz 17 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/openjdk/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install OpenJDK 3 | yum: 4 | name: java-1.8.0-openjdk 5 | state: latest 6 | update_cache: yes 7 | tags: [install, yum] 8 | 9 | - name: Check the Java version 10 | command: java -version 11 | changed_when: false 12 | register: java_version 13 | 14 | - name: Display the Java version. 15 | debug: 16 | var: java_version 17 | verbosity: 2 18 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/oracle_sdk/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | oracle_java_url: "" 3 | oracle_java_rpm: "{{ oracle_java_url.split('/')[8] }}" 4 | oracle_java_checksum: "" 5 | java_home: "/usr/java/latest" 6 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/oracle_sdk/tasks/install.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Download the Oracle Java RPM file 3 | get_url: 4 | url: "{{ oracle_java_url }}" 5 | dest: "/opt/{{ oracle_java_rpm }}" 6 | headers: "Cookie:oraclelicense=accept-securebackup-cookie" 7 | checksum: "sha256:{{ oracle_java_checksum }}" 8 | 9 | - name: Install Oracle Java 10 | yum: 11 | name: "/opt/{{ oracle_java_rpm }}" 12 | state: present 13 | 14 | - name: Put in place JAVA_HOME env variable 15 | lineinfile: 16 | dest: /etc/profile 17 | regexp: "^(export JAVA_HOME=)" 18 | state: present 19 | line: "export JAVA_HOME={{ java_home }}" 20 | 21 | - name: Clean-up Oracle Java RPM file 22 | file: 23 | path: "/opt/{{ oracle_java_rpm }}" 24 | state: absent 25 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/oracle_sdk/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Determine if Oracle Java is installed 3 | stat: path={{ java_home }}/bin/java 4 | register: java_install_status 5 | 6 | - name: Determine version of Oracle Java installed 7 | shell: "{{ java_home }}/bin/java -version" 8 | register: java_version_output 9 | when: java_install_status.stat.exists 10 | 11 | - name: Set installed Oracle Java version fact 12 | set_fact: 13 | installed_oracle_java_version: '{{ java_version_output.stderr | regex_search(".*") }}' 14 | when: java_install_status.stat.exists 15 | 16 | - include: install.yml 17 | when: installed_oracle_java_version is not defined or installed_oracle_java_version != oracle_java_version 18 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/ssmagent/README.md: -------------------------------------------------------------------------------- 1 | # dhoeric.aws-ssm 2 | 3 | [![Build Status](https://travis-ci.org/dhoeric/ansible-aws-ssm.svg?branch=master)](https://travis-ci.org/dhoeric/ansible-aws-ssm) 4 | [![Ansible Role](https://img.shields.io/ansible/role/17714.svg)](https://galaxy.ansible.com/dhoeric/aws-ssm/) 5 | [![Ansible Role](https://img.shields.io/ansible/role/d/17714.svg)](https://galaxy.ansible.com/dhoeric/aws-ssm/) 6 | 7 | Install AWS EC2 Systems Manager (SSM) agent 8 | 9 | 10 | 11 | ## Requirements 12 | 13 | None 14 | 15 | ## Role Variables 16 | 17 | None 18 | 19 | ## Dependencies 20 | 21 | None 22 | 23 | ## Example Playbook 24 | 25 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 26 | 27 | - hosts: servers 28 | roles: 29 | - { role: dhoeric.aws-ssm } 30 | 31 | ## License 32 | 33 | MIT 34 | 35 | ## Author Information 36 | 37 | 38 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/ssmagent/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for aws-ssm 3 | url: "amd64" 4 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/ssmagent/files/policy.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/ssmagent/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for aws-ssm 3 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/ssmagent/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks file for aws-ssm 3 | - name: Get CPU architecture 4 | command: getconf LONG_BIT 5 | register: cpu_arch 6 | changed_when: False 7 | check_mode: no 8 | 9 | - name: Change URL destination for 32bit arch 10 | set_fact: 11 | url: "386" 12 | when: cpu_arch.stdout == '32' 13 | 14 | - name: Install rpm file for Redhat Family (Amazon Linux, RHEL, and CentOS) 32/64-bit 15 | become: yes 16 | become_user: root 17 | yum: 18 | name: "https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_{{ url }}/amazon-ssm-agent.rpm" 19 | state: present 20 | when: ansible_os_family == 'RedHat' 21 | 22 | - name: Install deb file for Debian family 32/64-bit 23 | become: yes 24 | become_user: root 25 | apt: 26 | deb: "https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/debian_{{ url }}/amazon-ssm-agent.deb" 27 | when: ansible_os_family == 'Debian' 28 | 29 | - name: Register to service 30 | become: yes 31 | become_user: root 32 | service: 33 | name: amazon-ssm-agent 34 | enabled: yes 35 | state: started 36 | when: ansible_os_family == 'Debian' 37 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/ssmagent/tests/inventory: -------------------------------------------------------------------------------- 1 | localhost 2 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/ssmagent/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | remote_user: root 4 | roles: 5 | - role_under_test 6 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/ssmagent/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for aws-ssm 3 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/stunnel/defaults/main.yaml: -------------------------------------------------------------------------------- 1 | # Location of the Stunnel config file 2 | stunnel_config_file: /etc/stunnel/stunnel.conf 3 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/stunnel/files/stunnel.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=SSL tunnel for network daemons 3 | After=network.target 4 | After=syslog.target 5 | 6 | [Install] 7 | WantedBy=multi-user.target 8 | Alias=stunnel.target 9 | 10 | [Service] 11 | Type=forking 12 | ExecStart=/usr/bin/stunnel /etc/stunnel/stunnel.conf 13 | ExecStop=/usr/bin/killall -9 stunnel 14 | 15 | # Give up if ping don't get an answer 16 | TimeoutSec=600 17 | 18 | Restart=always 19 | PrivateTmp=false 20 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/stunnel/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for stunnel 3 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/stunnel/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install Stunnel 3 | become: yes 4 | become_user: root 5 | yum: 6 | name: stunnel 7 | state: present 8 | 9 | - name: Copying Stunnel Service file 10 | copy: 11 | src: "{{ role_path }}/files/stunnel.service" 12 | dest: /lib/systemd/system/stunnel.service 13 | 14 | - name: Start Stunnel at boot time 15 | systemd: 16 | name: stunnel 17 | enabled: yes 18 | masked: no 19 | 20 | - name: create stunnel directory 21 | file: 22 | path: /var/run/stunnel 23 | state: directory 24 | 25 | - name: Stunnel Directory Permissions 26 | file: 27 | path: /var/run/stunnel 28 | state: directory 29 | mode: 0755 30 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/stunnel/vars/main.yml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/JamesWoolfenden/packer-by-example/8d42183bd35c515015dac55993821e33f89b8241/provisioners/ansible/roles/stunnel/vars/main.yml -------------------------------------------------------------------------------- /provisioners/ansible/roles/sudoers/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | sudoer_aliases: {} 3 | sudoer_specs: [] 4 | sudoer_defaults: 5 | # - requiretty 6 | - "!visiblepw" 7 | - always_set_home 8 | - env_reset 9 | - env_keep: 10 | - COLORS 11 | - DISPLAY 12 | - HOSTNAME 13 | - HISTSIZE 14 | - INPUTRC 15 | - KDEDIR 16 | - LS_COLORS 17 | - MAIL 18 | - PS1 19 | - PS2 20 | - QTDIR 21 | - USERNAME 22 | - LANG 23 | - LC_ADDRESS 24 | - LC_CTYPE 25 | - LC_COLLATE 26 | - LC_IDENTIFICATION 27 | - LC_MEASUREMENT 28 | - LC_MESSAGES 29 | - LC_MONETARY 30 | - LC_NAME 31 | - LC_NUMERIC 32 | - LC_PAPER 33 | - LC_TELEPHONE 34 | - LC_TIME 35 | - LC_ALL 36 | - LANGUAGE 37 | - LINGUAS 38 | - _XKB_CHARSET 39 | - XAUTHORITY 40 | - secure_path: /sbin:/bin:/usr/sbin:/usr/bin 41 | sudoer_separate_specs: True 42 | sudoer_rewrite_sudoers_file: True 43 | sudoer_remove_unauthorized_specs: True 44 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/sudoers/filter_plugins/to_list.py: -------------------------------------------------------------------------------- 1 | def to_list(a, *args, **kw): 2 | if (isinstance(a, list)): 3 | return a 4 | else: 5 | return [a] 6 | 7 | class FilterModule(object): 8 | def filters(self): 9 | return { 10 | 'to_list': to_list 11 | } 12 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/sudoers/meta/main.yml: -------------------------------------------------------------------------------- 1 | galaxy_info: 2 | author: 3 | - Tyler Cross 4 | - Andrew J. Huffman 5 | description: Controls the configuration of the sudoers file and /etc/sudoers.d/ files 6 | issue_tracker_url: https://github.com/wtcross/ansible-sudoers/issues 7 | license: MIT 8 | min_ansible_version: 2.0 9 | #github_branch: master 10 | platforms: 11 | - name: EL 12 | versions: 13 | - all 14 | - name: Fedora 15 | versions: 16 | - all 17 | - name: opensuse 18 | versions: 19 | - all 20 | - name: Amazon 21 | versions: 22 | - all 23 | - name: Ubuntu 24 | versions: 25 | - all 26 | - name: SLES 27 | versions: 28 | - all 29 | - name: Debian 30 | versions: 31 | - all 32 | 33 | galaxy_tags: 34 | - sudo 35 | - sudoers 36 | - sudoers.d 37 | - admin 38 | - system 39 | 40 | dependencies: [] 41 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/sudoers/templates/sudoer_spec.j2: -------------------------------------------------------------------------------- 1 | #{{ ansible_managed }} 2 | 3 | {% if item.comment is defined and item.comment %} 4 | #{{ item.comment }} 5 | {% endif %} 6 | {% if item.defaults is defined and item.defaults %} 7 | Defaults:{{ item.users | to_list | join(',') }} {{ item.defaults | to_list | join(',') }} 8 | {% endif %} 9 | {{ item.users | to_list | join(',') }} {{ item.hosts | to_list | join(',') }}={% if item.operators is defined and item.operators %}({{ item.operators | to_list | join(',') }}){% endif %} {% if item.tags is defined and item.tags %}{{ item.tags | to_list | join(':') }}: {% endif %}{{ item.commands | to_list | join(',') }} 10 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/sudoers/templates/sudoers_nospec.j2: -------------------------------------------------------------------------------- 1 | #{{ ansible_managed }} 2 | 3 | {% for default in sudoer_defaults %} 4 | {% if default is mapping %} 5 | {% for name, values in default.iteritems() %} 6 | {% for items in values | to_list | slice(6) %} 7 | {% if items %} 8 | Defaults {{ name }} {% if not loop.first %}+{% endif %}= "{{ items | to_list | join(' ') }}" 9 | {% endif -%} 10 | {% endfor %} 11 | {% endfor %} 12 | {% elif default|first == ':' %} 13 | Defaults{{ default }} 14 | {% else %} 15 | Defaults {{ default }} 16 | {% endif %} 17 | {% endfor %} 18 | 19 | {% if sudoer_aliases.user is defined and sudoer_aliases.user %} 20 | #User Aliases 21 | {% for alias in sudoer_aliases.user %} 22 | {% if alias.comment is defined and alias.comment %} 23 | #**{{ alias.comment }} 24 | {% endif %} 25 | User_Alias {{ alias.name }} = {{ alias.users | join(',') }} 26 | {% endfor %} 27 | 28 | {% endif %} 29 | {% if sudoer_aliases.runas is defined and sudoer_aliases.runas %} 30 | #Runas Aliases 31 | {% for alias in sudoer_aliases.runas %} 32 | {% if alias.comment is defined and alias.comment %} 33 | #**{{ alias.comment }} 34 | {% endif %} 35 | Runas_Alias {{ alias.name }} = {{ alias.users | join(',') }} 36 | {% endfor %} 37 | 38 | {% endif %} 39 | {% if sudoer_aliases.host is defined and sudoer_aliases.host %} 40 | #Host Aliases 41 | {% for alias in sudoer_aliases.host %} 42 | {% if alias.comment is defined and alias.comment %} 43 | #**{{ alias.comment }} 44 | {% endif %} 45 | Host_Alias {{ alias.name }} = {{ alias.hosts | join(',') }} 46 | {% endfor %} 47 | 48 | {% endif %} 49 | {% if sudoer_aliases.command is defined and sudoer_aliases.command %} 50 | #Command Aliases 51 | {% for alias in sudoer_aliases.command %} 52 | {% if alias.comment is defined and alias.comment %} 53 | #**{{ alias.comment }} 54 | {% endif %} 55 | Cmnd_Alias {{ alias.name }} = {{ alias.commands | join(',') }} 56 | {% endfor %} 57 | 58 | {% endif %} 59 | 60 | ## Allow root to run any commands anywhere 61 | root ALL=(ALL) ALL 62 | 63 | # Include all sudoer specifications 64 | #includedir /etc/sudoers.d 65 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/sudoers/templates/sudoers_plus_spec.j2: -------------------------------------------------------------------------------- 1 | #{{ ansible_managed }} 2 | 3 | {% for default in sudoer_defaults %} 4 | {% if default is mapping %} 5 | {% for name, values in default.iteritems() %} 6 | {% for items in values | to_list | slice(6) %} 7 | {% if items %} 8 | Defaults {{ name }} {% if not loop.first %}+{% endif %}= "{{ items | to_list | join(' ') }}" 9 | {% endif -%} 10 | {% endfor %} 11 | {% endfor %} 12 | {% elif default|first == ':' %} 13 | Defaults{{ default }} 14 | {% else %} 15 | Defaults {{ default }} 16 | {% endif %} 17 | {% endfor %} 18 | 19 | {% if sudoer_aliases.user is defined and sudoer_aliases.user %} 20 | #User Aliases 21 | {% for alias in sudoer_aliases.user %} 22 | {% if alias.comment is defined %} 23 | #**{{ alias.comment }} 24 | {% endif %} 25 | User_Alias {{ alias.name }} = {{ alias.users | join(',') }} 26 | {% endfor %} 27 | 28 | {% endif %} 29 | {% if sudoer_aliases.runas is defined and sudoer_aliases.runas %} 30 | #Runas Aliases 31 | {% for alias in sudoer_aliases.runas %} 32 | {% if alias.comment is defined %} 33 | #**{{ alias.comment }} 34 | {% endif %} 35 | Runas_Alias {{ alias.name }} = {{ alias.users | join(',') }} 36 | {% endfor %} 37 | 38 | {% endif %} 39 | {% if sudoer_aliases.host is defined and sudoer_aliases.host %} 40 | #Host Aliases 41 | {% for alias in sudoer_aliases.host %} 42 | {% if alias.comment is defined %} 43 | #**{{ alias.comment }} 44 | {% endif %} 45 | Host_Alias {{ alias.name }} = {{ alias.hosts | join(',') }} 46 | {% endfor %} 47 | 48 | {% endif %} 49 | {% if sudoer_aliases.command is defined and sudoer_aliases.command %} 50 | #Command Aliases 51 | {% for alias in sudoer_aliases.command %} 52 | {% if alias.comment is defined %} 53 | #**{{ alias.comment }} 54 | {% endif %} 55 | Cmnd_Alias {{ alias.name }} = {{ alias.commands | join(',') }} 56 | {% endfor %} 57 | 58 | {% endif %} 59 | 60 | ## Allow root to run any commands anywhere 61 | root ALL=(ALL) ALL 62 | 63 | {% if sudoer_specs %} 64 | #Sudoer specifications 65 | {% for spec in sudoer_specs %} 66 | {% if spec.comment is defined %} 67 | #**{{ spec.comment }} 68 | {% endif %} 69 | {% if spec.defaults is defined and spec.defaults %} 70 | Defaults:{{ spec.users | to_list | join(',') }} {{ spec.defaults | to_list | join(',') }} 71 | {% endif %} 72 | {{ spec.users | to_list | join(',') }} {{ spec.hosts | to_list | join(',') }}={% if spec.operators is defined and spec.operators %}({{ spec.operators | to_list | join(',') }}){% endif %} {% if spec.tags is defined and spec.tags %}{{ spec.tags | to_list | join(':') }}: {% endif %}{{ spec.commands | to_list | join(',') }} 73 | {% endfor %} 74 | {% endif %} 75 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/sudoers/test/ansible-setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -e 3 | 4 | ## This is an example setup script that you would encapsulate the installation 5 | # What version of avm setup to use 6 | echo "Setting up Ansible Version Manager" 7 | AVM_VERSION="v1.0.0-rc.8" 8 | ## Install Ansible 1.9.6 using pip and label it 'v1.9' 9 | export ANSIBLE_VERSIONS_0="1.9.6" 10 | export INSTALL_TYPE_0="pip" 11 | export ANSIBLE_LABEL_0="v1.9" 12 | ## Install Ansible 2.2.1 using pip and label it 'v2.2' 13 | export ANSIBLE_VERSIONS_1="2.2.1.0" 14 | export INSTALL_TYPE_1="pip" 15 | export ANSIBLE_LABEL_1="v2.2" 16 | ## Install Ansible 2.3.1 using pip and label it 'v2.3' 17 | export ANSIBLE_VERSIONS_2="2.3.1.0" 18 | export INSTALL_TYPE_2="pip" 19 | export ANSIBLE_LABEL_2="v2.3" 20 | # Whats the default version 21 | ANSIBLE_DEFAULT_VERSION="v2.3" 22 | 23 | ## Create a temp dir to download avm 24 | avm_dir="$(mktemp -d 2> /dev/null || mktemp -d -t 'mytmpdir')" 25 | git clone https://github.com/ahelal/avm.git "${avm_dir}" > /dev/null 2>&1 26 | 27 | ## Run the setup 28 | /bin/sh "${avm_dir}/setup.sh" 29 | 30 | exit 0 31 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/sudoers/test/integration/default/default.yml: -------------------------------------------------------------------------------- 1 | - hosts: test-kitchen 2 | remote_user: root 3 | 4 | roles: 5 | - role: ansible-sudoers 6 | sudoer_rewrite_sudoers_file: true 7 | sudoer_remove_unauthorized_specs: false 8 | sudoer_separate_specs: true 9 | sudoer_defaults: 10 | - "!visiblepw" 11 | - always_set_home 12 | - env_reset 13 | - secure_path: /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin 14 | - env_keep: 15 | - COLORS 16 | - DISPLAY 17 | - HOSTNAME 18 | - HISTSIZE 19 | - KDEDIR 20 | - LS_COLORS 21 | - MAIL 22 | - PS1 23 | - PS2 24 | - QTDIR 25 | - USERNAME 26 | - LANG 27 | - LC_ADDRESS 28 | - LC_CTYPE 29 | - LC_COLLATE 30 | - LC_IDENTIFICATION 31 | - LC_MEASUREMENT 32 | - LC_MESSAGES 33 | - LC_MONETARY 34 | - LC_NAME 35 | - LC_NUMERIC 36 | - LC_PAPER 37 | - LC_TELEPHONE 38 | - LC_TIME 39 | - LC_ALL 40 | - LANGUAGE 41 | - LINGUAS 42 | - _XKB_CHARSET 43 | - XAUTHORITY 44 | sudoer_aliases: 45 | user: 46 | - name: ADMINS 47 | comment: Alias of for groups of admin users 48 | users: 49 | - "%admin" 50 | - "%wheel" 51 | - "%adm" 52 | - "%sudo" 53 | sudoer_specs: 54 | - name: admins 55 | comment: Members of one of the admin groups may gain root privileges. 56 | users: ADMINS 57 | hosts: ALL 58 | operators: ALL 59 | commands: ALL 60 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/sudoers/test/integration/default/serverspec/default_spec.rb: -------------------------------------------------------------------------------- 1 | require_relative './spec_helper' 2 | 3 | describe 'ansible-sudoers::default' do 4 | 5 | describe package('sudo') do 6 | it { should be_installed } 7 | end 8 | 9 | describe file('/etc/sudoers') do 10 | it { should exist } 11 | it { should be_mode 440 } 12 | it { should be_owned_by 'root' } 13 | its(:content) { should match(/#includedir \/etc\/sudoers.d/) } 14 | its(:content) { should match(/User_Alias ADMINS = %admin,%wheel/) } 15 | end 16 | 17 | describe file('/etc/sudoers.d/kitchen') do 18 | it { should exist } 19 | it { should be_mode 440 } 20 | it { should be_owned_by 'root' } 21 | its(:content) { should match(/kitchen ALL=\(ALL\) NOPASSWD: ALL/) } 22 | end 23 | 24 | describe file('/etc/sudoers.d/admins') do 25 | it { should exist } 26 | it { should be_mode 440 } 27 | it { should be_owned_by 'root' } 28 | its(:content) { should match(/ADMINS ALL=\(ALL\) ALL/) } 29 | end 30 | 31 | end 32 | -------------------------------------------------------------------------------- /provisioners/ansible/roles/sudoers/test/integration/default/serverspec/spec_helper.rb: -------------------------------------------------------------------------------- 1 | require 'serverspec' 2 | 3 | # :backend can be either :exec or :ssh 4 | # since we are running local we use :exec 5 | set :backend, :exec 6 | 7 | RSpec.configure do |c| 8 | c.before :all do 9 | c.path = '/usr/local/bin:/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/sbin' 10 | end 11 | end 12 | -------------------------------------------------------------------------------- /provisioners/scripts/linux/install-cassandra-dse.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | sudo yum install java-1.8.0-openjdk -y 3 | sudo yum install libaio -y 4 | 5 | cat < ${TOOL}_${VERSION}_${EDITION}_SHA256SUMS 20 | 21 | # Verify the SHASUM matches the binary. 22 | shasum -a 256 -c "${TOOL}_${VERSION}_${EDITION}_SHA256SUMS" 23 | 24 | unzip "${TOOL}_${VERSION}_linux_amd64.zip" 25 | rm "${TOOL}_${VERSION}_linux_amd64.zip" 26 | rm "${TOOL}_${VERSION}_SHA256SUMS" 27 | rm "${TOOL}_${VERSION}_${EDITION}_SHA256SUMS" 28 | rm "${TOOL}_${VERSION}_SHA256SUMS.sig" 29 | 30 | "${TOOL}" --version 31 | --------------------------------------------------------------------------------