├── .bumpversion.cfg ├── .github ├── CONTRIBUTING.md ├── ISSUE_TEMPLATE.md ├── PULL_REQUEST_TEMPLATE.md └── workflows │ ├── codeql-analysis.yml │ └── release.yml ├── .gitignore ├── .pep8speaks.yml ├── CODE_OF_CONDUCT.md ├── Dockerfile ├── LICENSE ├── MANIFEST.in ├── README.md ├── RELEASE.md ├── build_scripts ├── bin │ └── az ├── build_package.sh ├── freeze_requirements.sh └── run_tests.sh ├── examples ├── aws-kubernetes │ ├── .opsconfig.yaml │ ├── README.md │ ├── clusters │ │ ├── kubeconfigs │ │ │ └── README.md │ │ └── my-kubernetes-cluster.yaml │ ├── compositions │ │ └── generic │ │ │ ├── aws-eks │ │ │ ├── backends.tf.jinja2 │ │ │ ├── common_variables.tf.jinja2 │ │ │ ├── configure-local-kubectl.tf.jinja2 │ │ │ ├── eks-cluster-autoscaler.tf.jinja2 │ │ │ ├── eks-cluster.tf.jinja2 │ │ │ ├── eks-worker-nodes-auth-configmap.tf.jinja2 │ │ │ ├── eks-worker-nodes.tf.jinja2 │ │ │ ├── outputs.tf.jinja2 │ │ │ ├── providers.tf.jinja2 │ │ │ ├── scripts │ │ │ │ └── fileexist.sh │ │ │ ├── variables.tf.jinja2 │ │ │ └── vpc.tf.jinja2 │ │ │ ├── common │ │ │ ├── backends.tf.jinja2 │ │ │ ├── common_variables.tf.jinja2 │ │ │ └── providers.tf.jinja2 │ │ │ ├── helm-init │ │ │ ├── backends.tf.jinja2 │ │ │ └── helm-init.tf.jinja2 │ │ │ └── helm │ │ │ ├── backends.tf.jinja2 │ │ │ ├── cluster-autoscaler.tf │ │ │ ├── common_variables.tf.jinja2 │ │ │ ├── dashboard.tf │ │ │ ├── kube-state-metrics.tf │ │ │ ├── kube2iam.tf │ │ │ ├── provider_helm.tf.jinja2 │ │ │ ├── providers.tf.jinja2 │ │ │ └── variables.tf.jinja2 │ └── update.sh ├── cassandra-stress │ ├── .opsconfig.yaml │ ├── README.md │ ├── ansible.cfg │ ├── ansible │ │ ├── install_stress_tool.yaml │ │ ├── setup.yaml │ │ ├── setup12.yaml │ │ └── templates │ │ │ ├── cassandra-env.sh │ │ │ ├── cassandra_defaults.yaml │ │ │ ├── cassandra_defaults_12.yaml │ │ │ └── stress.yaml │ ├── cluster1.yaml │ ├── cluster2.yaml │ └── terraform │ │ ├── main │ │ ├── main.tf.jinja2 │ │ └── variables.tf.jinja2 │ │ ├── modules │ │ ├── cassandra │ │ │ └── main.tf.jinja2 │ │ ├── macros.tf.jinja2 │ │ ├── shared_iam │ │ │ └── default_policy.json │ │ └── vpc │ │ │ └── main.tf.jinja2 │ │ └── user_data └── features │ ├── ansible-vault │ ├── README.md │ ├── cluster │ │ ├── dev │ │ │ └── dev.yaml │ │ └── prod │ │ │ └── prod.yaml │ ├── inventory │ │ └── hosts │ ├── password_dev.txt │ ├── password_prod.txt │ ├── playbook │ │ └── example.yaml │ └── vault │ │ ├── vault_dev.yaml │ │ └── vault_prod.yaml │ ├── inventory │ ├── .opsconfig.yaml │ ├── README.md │ ├── local_inventory │ │ └── hosts │ ├── my-aws-cluster.yaml │ └── my-azure-cluster.yaml │ ├── packer │ ├── README.md │ ├── clusters │ │ └── ubuntu.yaml │ └── packer │ │ └── ubuntu.json │ ├── terraform-and-ansible │ ├── .gitignore │ ├── README.md │ ├── ansible │ │ ├── ansible.cfg │ │ ├── playbooks │ │ │ └── site.yaml │ │ ├── roles │ │ │ ├── common │ │ │ │ └── tasks │ │ │ │ │ └── main.yaml │ │ │ ├── db │ │ │ │ └── tasks │ │ │ │ │ └── main.yaml │ │ │ └── web │ │ │ │ └── tasks │ │ │ │ └── main.yaml │ │ └── tasks │ │ │ └── copy-key.yaml │ ├── clusters │ │ └── example.yaml │ └── terraform │ │ ├── main │ │ ├── main.tf.jinja2 │ │ └── shared_variables.tf │ │ ├── modules │ │ ├── db │ │ │ ├── instance.tf.jinja2 │ │ │ └── main.tf.jinja2 │ │ ├── macros.tf.jinja2 │ │ ├── shared_iam │ │ │ └── default_policy.json │ │ ├── vpc │ │ │ └── main.tf.jinja2 │ │ └── web │ │ │ ├── elb.tf.jinja2 │ │ │ ├── instance.tf.jinja2 │ │ │ └── main.tf.jinja2 │ │ └── user_data │ └── terraform-hierarchical │ ├── .opsconfig.yaml │ ├── README.md │ ├── compositions │ └── terraform │ │ ├── cluster │ │ └── main.tf │ │ └── network │ │ └── main.tf │ ├── config │ └── env=dev │ │ ├── cluster=cluster1 │ │ ├── composition=cluster │ │ │ └── conf.yaml │ │ ├── composition=network │ │ │ └── conf.yaml │ │ └── conf.yaml │ │ ├── cluster=cluster2 │ │ ├── composition=cluster │ │ │ └── conf.yaml │ │ ├── composition=network │ │ │ └── conf.yaml │ │ └── conf.yaml │ │ └── default.yaml │ └── modules │ ├── cluster │ └── main.tf │ └── network │ └── main.tf ├── renovate.json ├── requirements.txt ├── setup.cfg ├── setup.py ├── src └── ops │ ├── __init__.py │ ├── ansible │ ├── __init__.py │ ├── callback_plugins │ │ └── __init__.py │ ├── filter_plugins │ │ ├── __init__.py │ │ └── commonfilters.py │ └── vars_plugins │ │ ├── __init__.py │ │ ├── clusterconfig.py │ │ └── opsconfig.py │ ├── cli │ ├── __init__.py │ ├── aws.py │ ├── config.py │ ├── config_generator.py │ ├── helmfile.py │ ├── inventory.py │ ├── packer.py │ ├── parser.py │ ├── playbook.py │ ├── run.py │ ├── ssh.py │ ├── sync.py │ └── terraform.py │ ├── data │ ├── ansible │ │ ├── ansible.cfg │ │ └── tasks │ │ │ ├── deploy_prometheus_alert_rules.yml │ │ │ ├── install_rpm.yml │ │ │ └── remove_prometheus_alert_rules.yml │ ├── ssh │ │ ├── ssh.config │ │ ├── ssh.scb.proxy.config.tpl │ │ └── ssh.tunnel.config │ └── terraform │ │ └── terraformrc │ ├── git_utils.py │ ├── hierarchical │ ├── __init__.py │ └── composition_config_generator.py │ ├── inventory │ ├── SKMS.py │ ├── __init__.py │ ├── azurerm.py │ ├── caching.py │ ├── ec2inventory.py │ ├── generator.py │ ├── plugin │ │ ├── __init__.py │ │ ├── azr.py │ │ ├── cns.py │ │ ├── ec2.py │ │ ├── legacy_pcs.py │ │ └── skms.py │ └── sshconfig.py │ ├── jinja │ └── __init__.py │ ├── main.py │ ├── opsconfig.py │ ├── simpleconsul.py │ ├── simplessm.py │ ├── simplevault.py │ └── terraform │ ├── __init__.py │ └── terraform_cmd_generator.py └── tests ├── e2e ├── common.py ├── fixture │ ├── ansible │ │ ├── .opsconfig.yaml │ │ ├── ansible.cfg │ │ ├── clusters │ │ │ ├── test.yaml │ │ │ └── test_filters.yaml │ │ ├── inventory │ │ │ └── hosts │ │ ├── modules │ │ │ └── my_module.py │ │ ├── playbooks │ │ │ └── play_module.yaml │ │ └── plugins │ │ │ └── filter_plugins │ │ │ └── filters.py │ ├── inventory │ │ ├── .opsconfig.yaml │ │ └── clusters │ │ │ ├── common_plugins.yaml │ │ │ ├── plugin_generator.yaml │ │ │ └── plugin_generator_scb.yaml │ └── terraform │ │ ├── .opsconfig.yaml │ │ ├── clusters │ │ └── prod │ │ │ └── test.yaml │ │ └── terraform │ │ ├── main │ │ └── main.tf.jinja2 │ │ └── user_data ├── test_inventory.py ├── test_jinja_filters.py ├── test_playbook.py ├── test_ssh.py └── test_terraform.py └── unit ├── fixture ├── .opsconfig.yaml └── clusters │ ├── dev │ ├── .opsconfig.yaml │ └── us-west-1 │ │ └── test.yaml │ └── prod │ ├── .opsconfig.yaml │ └── us-east-1 │ ├── .opsconfig.yaml │ └── test.yaml ├── test_composition_config_generator.py └── test_opsconfig.py /.bumpversion.cfg: -------------------------------------------------------------------------------- 1 | [bumpversion] 2 | current_version = 2.3.1 3 | commit = True 4 | tag = True 5 | tag_name = {new_version} 6 | message = [RELEASE] - Release version {new_version} 7 | parse = (?P\d+)\.(?P\d+)\.(?P\d+[a-z]*) 8 | serialize = 9 | {major}.{minor}.{patch} 10 | 11 | [bumpversion:file:setup.py] 12 | 13 | [bumpversion:file:README.md] 14 | -------------------------------------------------------------------------------- /.github/CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | Thanks for choosing to contribute! 4 | 5 | The following are a set of guidelines to follow when contributing to this project. 6 | 7 | ## Code Of Conduct 8 | 9 | This project adheres to the Adobe [code of conduct](../CODE_OF_CONDUCT.md). By participating, 10 | you are expected to uphold this code. Please report unacceptable behavior to 11 | [Grp-opensourceoffice@adobe.com](mailto:Grp-opensourceoffice@adobe.com). 12 | 13 | ## Have A Question? 14 | 15 | Start by filing an issue. The existing committers on this project work to reach 16 | consensus around project direction and issue solutions within issue threads 17 | (when appropriate). 18 | 19 | ## Contributor License Agreement 20 | 21 | All third-party contributions to this project must be accompanied by a signed contributor 22 | license agreement. This gives Adobe permission to redistribute your contributions 23 | as part of the project. [Sign our CLA](http://opensource.adobe.com/cla.html). You 24 | only need to submit an Adobe CLA one time, so if you have submitted one previously, 25 | you are good to go! 26 | 27 | ## Code Reviews 28 | 29 | All submissions should come in the form of pull requests and need to be reviewed 30 | by project committers. Read [GitHub's pull request documentation](https://help.github.com/articles/about-pull-requests/) 31 | for more information on sending pull requests. 32 | 33 | Lastly, please follow the [pull request template](PULL_REQUEST_TEMPLATE.md) when 34 | submitting a pull request! 35 | 36 | ## From Contributor To Committer 37 | 38 | We love contributions from our community! If you'd like to go a step beyond contributor 39 | and become a committer with full write access and a say in the project, you must 40 | be invited to the project. The existing committers employ an internal nomination 41 | process that must reach lazy consensus (silence is approval) before invitations 42 | are issued. If you feel you are qualified and want to get more deeply involved, 43 | feel free to reach out to existing committers to have a conversation about that. 44 | 45 | ## Security Issues 46 | 47 | Security issues shouldn't be reported on this issue tracker. Instead, [file an issue to our security experts](https://helpx.adobe.com/security/alertus.html) 48 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | ### Expected Behaviour 5 | 6 | ### Actual Behaviour 7 | 8 | ### Reproduce Scenario (including but not limited to) 9 | 10 | #### Steps to Reproduce 11 | 12 | #### Platform and Version 13 | 14 | #### Sample Code that illustrates the problem 15 | 16 | #### Logs taken while reproducing problem 17 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | ## Description 4 | 5 | 6 | 7 | ## Related Issue 8 | 9 | 10 | 11 | 12 | 13 | 14 | ## Motivation and Context 15 | 16 | 17 | 18 | ## How Has This Been Tested? 19 | 20 | 21 | 22 | 23 | 24 | ## Screenshots (if appropriate): 25 | 26 | ## Types of changes 27 | 28 | 29 | 30 | - [ ] Bug fix (non-breaking change which fixes an issue) 31 | - [ ] New feature (non-breaking change which adds functionality) 32 | - [ ] Breaking change (fix or feature that would cause existing functionality to change) 33 | 34 | ## Checklist: 35 | 36 | 37 | 38 | 39 | - [ ] I have signed the [Adobe Open Source CLA](http://opensource.adobe.com/cla.html). 40 | - [ ] My code follows the code style of this project. 41 | - [ ] My change requires a change to the documentation. 42 | - [ ] I have updated the documentation accordingly. 43 | - [ ] I have read the **CONTRIBUTING** document. 44 | - [ ] I have added tests to cover my changes. 45 | - [ ] All new and existing tests passed. 46 | -------------------------------------------------------------------------------- /.github/workflows/codeql-analysis.yml: -------------------------------------------------------------------------------- 1 | # For most projects, this workflow file will not need changing; you simply need 2 | # to commit it to your repository. 3 | # 4 | # You may wish to alter this file to override the set of languages analyzed, 5 | # or to provide custom queries or build logic. 6 | # 7 | # ******** NOTE ******** 8 | # We have attempted to detect the languages in your repository. Please check 9 | # the `language` matrix defined below to confirm you have the correct set of 10 | # supported CodeQL languages. 11 | # 12 | name: "CodeQL" 13 | 14 | on: 15 | push: 16 | branches: [ master ] 17 | pull_request: 18 | # The branches below must be a subset of the branches above 19 | branches: [ master ] 20 | schedule: 21 | - cron: '36 0 * * 1' 22 | 23 | jobs: 24 | analyze: 25 | name: Analyze 26 | runs-on: ubuntu-latest 27 | permissions: 28 | actions: read 29 | contents: read 30 | security-events: write 31 | 32 | strategy: 33 | fail-fast: false 34 | matrix: 35 | language: [ 'python' ] 36 | # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ] 37 | # Learn more: 38 | # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed 39 | 40 | steps: 41 | - name: Checkout repository 42 | uses: actions/checkout@v4 43 | 44 | # Initializes the CodeQL tools for scanning. 45 | - name: Initialize CodeQL 46 | uses: github/codeql-action/init@v2 47 | with: 48 | languages: ${{ matrix.language }} 49 | # If you wish to specify custom queries, you can do so here or in a config file. 50 | # By default, queries listed here will override any specified in a config file. 51 | # Prefix the list here with "+" to use these queries and those in the config file. 52 | # queries: ./path/to/local/query, your-org/your-repo/queries@main 53 | 54 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). 55 | # If this step fails, then you should remove it and run the build manually (see below) 56 | - name: Autobuild 57 | uses: github/codeql-action/autobuild@v2 58 | 59 | # ℹ️ Command-line programs to run using the OS shell. 60 | # 📚 https://git.io/JvXDl 61 | 62 | # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines 63 | # and modify them (or add more) to build your code if your project 64 | # uses a compiled language 65 | 66 | #- run: | 67 | # make bootstrap 68 | # make release 69 | 70 | - name: Perform CodeQL Analysis 71 | uses: github/codeql-action/analyze@v2 72 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | # This workflow will upload a Python Package using Twine when a release is created 2 | # For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries 3 | 4 | # This workflow uses actions that are not certified by GitHub. 5 | # They are provided by a third-party and are governed by 6 | # separate terms of service, privacy policy, and support 7 | # documentation. 8 | 9 | name: Release packages 10 | 11 | on: 12 | push: 13 | tags: [ '*' ] 14 | 15 | env: 16 | # Use docker.io for Docker Hub if empty 17 | REGISTRY: ghcr.io 18 | # github.repository as / 19 | IMAGE_NAME: adobe/ops-cli 20 | 21 | jobs: 22 | publish-to-pypi: 23 | runs-on: ubuntu-latest 24 | env: 25 | BOTO_CONFIG: /dev/null 26 | steps: 27 | - uses: actions/checkout@v4 28 | - name: Set up Python 29 | uses: actions/setup-python@v5 30 | with: 31 | python-version: '3.x' 32 | - name: Build package 33 | run: | 34 | python -m pip install --upgrade pip 35 | pip install build 36 | - name: Freeze requirements.txt 37 | run: bash build_scripts/freeze_requirements.sh 38 | - name: Build package 39 | run: python -m build 40 | - name: Test package 41 | run: pip install --no-cache-dir dist/ops*.tar.gz && ops --verbose -h 42 | - name: Publish package 43 | uses: pypa/gh-action-pypi-publish@c12cc61414480c03e10ea76e2a0a1a17d6c764e2 44 | with: 45 | user: __token__ 46 | password: ${{ secrets.ADOBE_BOT_PYPI_TOKEN }} 47 | 48 | publish-docker-image-to-ghcr: 49 | runs-on: ubuntu-latest 50 | permissions: 51 | contents: read 52 | packages: write 53 | steps: 54 | - name: Checkout repository 55 | uses: actions/checkout@v4 56 | # Login against a Docker registry except on PR 57 | # https://github.com/docker/login-action 58 | - name: Log into registry ${{ env.REGISTRY }} 59 | if: github.event_name != 'pull_request' 60 | uses: docker/login-action@v3 61 | with: 62 | registry: ${{ env.REGISTRY }} 63 | username: ${{ github.actor }} 64 | password: ${{ secrets.GITHUB_TOKEN }} 65 | 66 | # Extract metadata (tags, labels) for Docker 67 | # https://github.com/docker/metadata-action 68 | - name: Extract Docker metadata 69 | id: meta 70 | uses: docker/metadata-action@v4 71 | with: 72 | images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} 73 | 74 | # Build and push Docker image with Buildx (don't push on PR) 75 | # https://github.com/docker/build-push-action 76 | - name: Build and push Docker image 77 | uses: docker/build-push-action@v5 78 | with: 79 | context: . 80 | push: ${{ github.event_name != 'pull_request' }} 81 | tags: ${{ steps.meta.outputs.tags }} 82 | labels: ${{ steps.meta.outputs.labels }} 83 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | ops.egg-info/ 2 | *.DS_Store 3 | *.plan 4 | *.tf.json 5 | *.tfvars.json 6 | *.tfstate 7 | .cache/ 8 | *.pyc 9 | .terraform 10 | build/ 11 | dist/ 12 | buildrunner.results 13 | .idea 14 | .env 15 | src/ops_cli.egg-info 16 | -------------------------------------------------------------------------------- /.pep8speaks.yml: -------------------------------------------------------------------------------- 1 | scanner: 2 | linter: flake8 3 | 4 | flake8: 5 | max-line-length: 99 6 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Adobe Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as 6 | contributors and maintainers pledge to making participation in our project and 7 | our community a harassment-free experience for everyone, regardless of age, body 8 | size, disability, ethnicity, gender identity and expression, level of experience, 9 | nationality, personal appearance, race, religion, or sexual identity and 10 | orientation. 11 | 12 | ## Our Standards 13 | 14 | Examples of behavior that contributes to creating a positive environment 15 | include: 16 | 17 | * Using welcoming and inclusive language 18 | * Being respectful of differing viewpoints and experiences 19 | * Gracefully accepting constructive criticism 20 | * Focusing on what is best for the community 21 | * Showing empathy towards other community members 22 | 23 | Examples of unacceptable behavior by participants include: 24 | 25 | * The use of sexualized language or imagery and unwelcome sexual attention or 26 | advances 27 | * Trolling, insulting/derogatory comments, and personal or political attacks 28 | * Public or private harassment 29 | * Publishing others' private information, such as a physical or electronic 30 | address, without explicit permission 31 | * Other conduct which could reasonably be considered inappropriate in a 32 | professional setting 33 | 34 | ## Our Responsibilities 35 | 36 | Project maintainers are responsible for clarifying the standards of acceptable 37 | behavior and are expected to take appropriate and fair corrective action in 38 | response to any instances of unacceptable behavior. 39 | 40 | Project maintainers have the right and responsibility to remove, edit, or 41 | reject comments, commits, code, wiki edits, issues, and other contributions 42 | that are not aligned to this Code of Conduct, or to ban temporarily or 43 | permanently any contributor for other behaviors that they deem inappropriate, 44 | threatening, offensive, or harmful. 45 | 46 | ## Scope 47 | 48 | This Code of Conduct applies both within project spaces and in public spaces 49 | when an individual is representing the project or its community. Examples of 50 | representing a project or community include using an official project e-mail 51 | address, posting via an official social media account, or acting as an appointed 52 | representative at an online or offline event. Representation of a project may be 53 | further defined and clarified by project maintainers. 54 | 55 | ## Enforcement 56 | 57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 58 | reported by contacting the project team at Grp-opensourceoffice@adobe.com. All 59 | complaints will be reviewed and investigated and will result in a response that 60 | is deemed necessary and appropriate to the circumstances. The project team is 61 | obligated to maintain confidentiality with regard to the reporter of an incident. 62 | Further details of specific enforcement policies may be posted separately. 63 | 64 | Project maintainers who do not follow or enforce the Code of Conduct in good 65 | faith may face temporary or permanent repercussions as determined by other 66 | members of the project's leadership. 67 | 68 | ## Attribution 69 | 70 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, 71 | available at [https://contributor-covenant.org/version/1/4][version] 72 | 73 | [homepage]: https://contributor-covenant.org 74 | [version]: https://contributor-covenant.org/version/1/4/ 75 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.11.4-alpine3.18 AS compile-image 2 | ARG TERRAFORM_VERSION="0.12.6" 3 | ARG AZURE_CLI_VERSION="2.0.67" 4 | 5 | ENV BOTO_CONFIG=/dev/null 6 | COPY . /sources/ 7 | WORKDIR /sources 8 | 9 | RUN wget -q -O terraform.zip https://releases.hashicorp.com/terraform/${TERRAFORM_VERSION}/terraform_${TERRAFORM_VERSION}_linux_amd64.zip \ 10 | && unzip terraform.zip -d /usr/local/bin \ 11 | && rm -rf terraform.zip 12 | RUN apk add --virtual=build bash gcc libffi-dev musl-dev openssl-dev make git 13 | RUN ln -s /usr/local/bin/python /usr/bin/python 14 | RUN pip --no-cache-dir install virtualenv \ 15 | && virtualenv /azure-cli \ 16 | && source /azure-cli/bin/activate \ 17 | && python -m pip install --upgrade pip \ 18 | && env CRYPTOGRAPHY_DONT_BUILD_RUST=1 pip install azure-cli==${AZURE_CLI_VERSION} \ 19 | && deactivate 20 | RUN bash build_scripts/freeze_requirements.sh 21 | RUN bash build_scripts/run_tests.sh 22 | RUN bash build_scripts/build_package.sh 23 | RUN apk del --purge build 24 | 25 | 26 | FROM python:3.11.4-alpine3.18 27 | ARG TERRAFORM_VERSION="0.12.6" 28 | ARG VAULT_VERSION="1.1.3" 29 | ARG KUBECTL_VERSION="v1.17.0" 30 | ARG AWS_IAM_AUTHENTICATOR_VERSION="1.13.7/2019-06-11" 31 | ARG HELM_VERSION="v3.6.3" 32 | ARG HELM_FILE_VERSION="v0.81.3" 33 | ARG HELM_DIFF_VERSION="2.11.0%2B5" 34 | 35 | 36 | COPY --from=compile-image /sources/dist /dist 37 | 38 | RUN adduser ops -Du 2342 -h /home/ops \ 39 | && ln -s /usr/local/bin/python /usr/bin/python \ 40 | && /usr/bin/python -m pip install --upgrade pip \ 41 | && apk add --no-cache bash zsh ca-certificates curl jq openssh-client git \ 42 | && apk add --virtual=build gcc libffi-dev musl-dev openssl-dev make \ 43 | # Install ops python package 44 | && env CRYPTOGRAPHY_DONT_BUILD_RUST=1 pip --no-cache-dir install --upgrade /dist/ops*.tar.gz \ 45 | && rm -rf /dist \ 46 | # Dry-run 47 | && ops --verbose -h \ 48 | && apk del --purge build \ 49 | && wget -q https://storage.googleapis.com/kubernetes-release/release/${KUBECTL_VERSION}/bin/linux/amd64/kubectl -O /usr/local/bin/kubectl \ 50 | && chmod +x /usr/local/bin/kubectl \ 51 | && wget -q https://get.helm.sh/helm-${HELM_VERSION}-linux-amd64.tar.gz -O - | tar -xzO linux-amd64/helm > /usr/local/bin/helm \ 52 | && chmod +x /usr/local/bin/helm \ 53 | && wget -q -O terraform.zip https://releases.hashicorp.com/terraform/${TERRAFORM_VERSION}/terraform_${TERRAFORM_VERSION}_linux_amd64.zip \ 54 | && unzip terraform.zip -d /usr/local/bin \ 55 | && rm -rf terraform.zip \ 56 | && mkdir -p ~/.terraform.d/plugins && wget -q -O ~/.terraform.d/plugins/terraform-provider-vault https://github.com/amuraru/terraform-provider-vault/releases/download/vault-namespaces/terraform-provider-vault \ 57 | && chmod 0755 ~/.terraform.d/plugins/terraform-provider-vault \ 58 | && wget -q -O vault.zip https://releases.hashicorp.com/vault/${VAULT_VERSION}/vault_${VAULT_VERSION}_linux_amd64.zip \ 59 | && unzip vault.zip -d /usr/local/bin \ 60 | && rm -rf vault.zip \ 61 | && wget -q https://amazon-eks.s3-us-west-2.amazonaws.com/${AWS_IAM_AUTHENTICATOR_VERSION}/bin/linux/amd64/aws-iam-authenticator -O /usr/local/bin/aws-iam-authenticator \ 62 | && chmod +x /usr/local/bin/aws-iam-authenticator \ 63 | && wget -q https://github.com/roboll/helmfile/releases/download/${HELM_FILE_VERSION}/helmfile_linux_amd64 -O /usr/local/bin/helmfile \ 64 | && chmod +x /usr/local/bin/helmfile 65 | 66 | # install utils under `ops` user 67 | USER ops 68 | ENV HOME=/home/ops 69 | WORKDIR /home/ops 70 | 71 | USER root 72 | RUN helm plugin install https://github.com/databus23/helm-diff 73 | RUN helm plugin install https://github.com/jkroepke/helm-secrets --version v3.8.2 74 | RUN helm plugin install https://github.com/rimusz/helm-tiller 75 | 76 | 77 | COPY --from=compile-image /azure-cli /home/ops/.local/azure-cli 78 | COPY build_scripts/bin/az /home/ops/bin/ 79 | 80 | RUN touch /home/ops/.zshrc 81 | 82 | USER ops 83 | ENV PATH="/home/ops/bin:${PATH}" 84 | ENV PS1="%d $ " 85 | CMD /bin/zsh 86 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include README.md 2 | include requirements.txt 3 | recursive-include examples * 4 | -------------------------------------------------------------------------------- /RELEASE.md: -------------------------------------------------------------------------------- 1 | # Release 2 | 3 | 1. Install: `pip install bump2version` 4 | 2. Bump version: `bump2version patch --new-version=2.0.4` 5 | 3. Push the release commit: `git push --follow-tags` 6 | 4. Wait for GH Actions to release packages: https://github.com/adobe/ops-cli/actions/workflows/release.yml/ 7 | * This will publish the pypi package to https://pypi.org/project/ops-cli/ 8 | * Publish a new docker image version to https://github.com/adobe/ops-cli/pkgs/container/ops-cli 9 | 5. Open a new `dev` cycle: e.g `bump2version patch --new-version=2.0.5dev` 10 | -------------------------------------------------------------------------------- /build_scripts/bin/az: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | env PATH=/home/ops/.local/azure-cli/bin:$PATH python -m azure.cli "$@" 4 | -------------------------------------------------------------------------------- /build_scripts/build_package.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | echo "Building package" 5 | rm -rf dist/ 6 | export BOTO_CONFIG=/dev/null 7 | export CRYPTOGRAPHY_DONT_BUILD_RUST=1 8 | python setup.py sdist bdist_wheel 9 | ls -l dist/ 10 | -------------------------------------------------------------------------------- /build_scripts/freeze_requirements.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | echo "Freezing requirements.txt" 5 | pip install pipenv 6 | 7 | rm -rf Pipfile* deps 8 | pipenv lock --clear 9 | pipenv requirements 1>deps 10 | grep '==' deps | sed "s/;\\sextra.*//" > requirements.txt 11 | rm -rf Pipfile* deps 12 | -------------------------------------------------------------------------------- /build_scripts/run_tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | echo "Running tests" 5 | export BOTO_CONFIG=/dev/null 6 | export CRYPTOGRAPHY_DONT_BUILD_RUST=1 7 | 8 | pip install --no-cache-dir -r requirements.txt 9 | pip install pytest 10 | 11 | pip install -e . 12 | python -m pytest tests 13 | -------------------------------------------------------------------------------- /examples/aws-kubernetes/.opsconfig.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | terraform.remove_local_cache: True 3 | -------------------------------------------------------------------------------- /examples/aws-kubernetes/clusters/kubeconfigs/README.md: -------------------------------------------------------------------------------- 1 | These is where the kube configs are being generated, for your Kubernetes clusters. -------------------------------------------------------------------------------- /examples/aws-kubernetes/clusters/my-kubernetes-cluster.yaml: -------------------------------------------------------------------------------- 1 | terraform: 2 | boto_profile: my-aws-profile 3 | paths: 4 | aws-eks: compositions/generic/aws-eks 5 | helm-init: compositions/generic/helm-init 6 | helm: compositions/generic/helm 7 | state: 8 | type: local # store terraform state files on local disk 9 | # type: s3 # store terraform state files on S3 bucket 10 | # kms_arn: 'arn:aws:kms:us-east-1:111111222222:key/ddddddddd-3333-4444-aaaa-bbbbbbbbbbbb' # ARN of KMS key used for S3 encryption 11 | vars: 12 | terraform_var1: '' 13 | kubernetes: 14 | version: 1.11 15 | cluster_id: mykubernetescluster # change the cluster name here 16 | environment: stage 17 | aws: 18 | profile: my-aws-profile # enter the right aws profile 19 | region: us-east-1 20 | account_id: 11111111111 # enter your AWS account id here. This is used for the cluster-autoscaler helm chart. 21 | short_region: va6 22 | availability_zones: '["us-east-1a", "us-east-1b", "us-east-1c", "us-east-1d"]' 23 | allow_my_ip: "192.168.0.1/32" # enter your public ip (eg. by using http://ipv4.icanhazip.com) 24 | tags: 25 | Cluster: mykubernetescluster 26 | worker_nodes: 27 | instance_type: m5.large 28 | ami_id: ami-0b4eb1d8782fc3aea 29 | max_size: 2 30 | min_size: 2 31 | helm: 32 | tiller_version: 2.12.3 33 | -------------------------------------------------------------------------------- /examples/aws-kubernetes/compositions/generic/aws-eks/backends.tf.jinja2: -------------------------------------------------------------------------------- 1 | ../common/backends.tf.jinja2 -------------------------------------------------------------------------------- /examples/aws-kubernetes/compositions/generic/aws-eks/common_variables.tf.jinja2: -------------------------------------------------------------------------------- 1 | ../common/common_variables.tf.jinja2 -------------------------------------------------------------------------------- /examples/aws-kubernetes/compositions/generic/aws-eks/configure-local-kubectl.tf.jinja2: -------------------------------------------------------------------------------- 1 | resource "null_resource" "configure-kubectl-local" { 2 | 3 | provisioner "local-exec" { 4 | command = </dev/null; then 7 | echo "$package is already present. Skipping." 8 | else 9 | HOMEBREW_NO_AUTO_UPDATE=1 brew install $package 10 | fi 11 | } 12 | 13 | function brew_install_or_upgrade { 14 | package=$1 15 | echo "Installing/upgrading $package via brew" 16 | if brew ls --versions $package >/dev/null; then 17 | HOMEBREW_NO_AUTO_UPDATE=1 brew upgrade $package 18 | else 19 | HOMEBREW_NO_AUTO_UPDATE=1 brew install $package 20 | fi 21 | } 22 | 23 | echo "Installing prerequisites" 24 | 25 | brew update 26 | 27 | brew_install_if_not_present terraform 28 | brew_install_or_upgrade wget 29 | brew_install_or_upgrade aws-iam-authenticator 30 | brew_install_or_upgrade kubernetes-helm 31 | brew_install_or_upgrade kubectl 32 | brew_install_or_upgrade jq 33 | 34 | helm repo update 35 | -------------------------------------------------------------------------------- /examples/cassandra-stress/.opsconfig.yaml: -------------------------------------------------------------------------------- 1 | ssh.user: ec2-user -------------------------------------------------------------------------------- /examples/cassandra-stress/README.md: -------------------------------------------------------------------------------- 1 | # Main configuration files 2 | 3 | See ansible/templates/stress.yaml for the configuration for `cassandra-stress`; the column family, distribution of data 4 | and 5 | 6 | # Start and configure the cluster 7 | 8 | ``` 9 | cp cluster1.yaml mycluster.yaml 10 | 11 | ops mycluster.yaml terraform plan 12 | ops mycluster.yaml terraform apply 13 | ops mycluster.yaml play ansible/setup.yaml 14 | 15 | ``` 16 | 17 | 18 | # Run stress tests 19 | 20 | ``` 21 | # ssh on the bastion where the stress configurations are located 22 | ops mycluster.yaml ssh bastion 23 | 24 | # insert one million rows 25 | cassandra-stress user profile=/etc/cassandra/conf/stress.yaml ops\(insert=1\) n=1000000 -node cassandra-1 26 | 27 | # mixed workload with 9 selects and 1 inserts 28 | cassandra-stress user profile=/etc/cassandra/conf/stress.yaml ops\(insert=1,dgraph=9\) n=1000000 -node cassandra-1 29 | 30 | # specifying number of threads 31 | cassandra-stress user profile=/etc/cassandra/conf/stress.yaml ops\(insert=1,dgraph=9\) n=1000000 -rate threads=50 -node cassandra-1 32 | ``` 33 | 34 | # Example results 35 | 36 | ``` 37 | 38 | type, total ops, op/s, pk/s, row/s, mean, med, .95, .99, .999, max, time, stderr, errors, gc: #, max ms, sum ms, sdv ms, mb 39 | total, 1000000, 32277, 32277, 32277, 3.1, 2.0, 8.0, 26.2, 33.9, 61.4, 32.5, 0.03181, 0, 0, 0, 0, 0, 0 40 | 41 | 42 | Results: 43 | op rate : 30750 [dgraph:30750] 44 | partition rate : 30750 [dgraph:30750] 45 | row rate : 30750 [dgraph:30750] 46 | latency mean : 3.2 [dgraph:3.2] 47 | latency median : 2.0 [dgraph:2.0] 48 | latency 95th percentile : 7.3 [dgraph:7.3] 49 | latency 99th percentile : 19.3 [dgraph:19.3] 50 | latency 99.9th percentile : 88.7 [dgraph:88.7] 51 | latency max : 648.3 [dgraph:648.3] 52 | Total partitions : 1000000 [dgraph:1000000] 53 | Total errors : 0 [dgraph:0] 54 | total gc count : 0 55 | total gc mb : 0 56 | total gc time (s) : 0 57 | avg gc time(ms) : NaN 58 | stdev gc time(ms) : 0 59 | ``` 60 | 61 | # Testing cassandra 1.2 62 | 63 | Because it doesn't support cql V2, we have to use the legacy mode for cassandra test 64 | 65 | ``` 66 | cassandra-stress write n=1000000 -col n=fixed\(3\) \ 67 | size=fixed\(34\) -rate threads=100 -log interval=10 \ 68 | -mode thrift -node cassandra-1 69 | ``` 70 | 71 | 72 | 73 | -------------------------------------------------------------------------------- /examples/cassandra-stress/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | vars_plugins = plugins/vars -------------------------------------------------------------------------------- /examples/cassandra-stress/ansible/install_stress_tool.yaml: -------------------------------------------------------------------------------- 1 | - hosts: bastion 2 | sudo: yes 3 | tasks: 4 | - name: download cassandra 5 | get_url: 6 | url: https://rpm.datastax.com/community/noarch/cassandra22-2.2.3-1.noarch.rpm 7 | dest: /tmp/cassandra.rpm 8 | register: status 9 | 10 | - name: install 11 | shell: rpm -iUvh /tmp/cassandra.rpm 12 | when: status.changed 13 | 14 | - template: src=templates/stress.yaml dest=/etc/cassandra/conf/stress.yaml 15 | tags: stress -------------------------------------------------------------------------------- /examples/cassandra-stress/ansible/setup.yaml: -------------------------------------------------------------------------------- 1 | - hosts: cassandra 2 | sudo: yes 3 | strategy: free 4 | tasks: 5 | - name: download cassandra 6 | get_url: 7 | url: https://rpm.datastax.com/community/noarch/cassandra22-2.2.3-1.noarch.rpm 8 | dest: /tmp/cassandra.rpm 9 | register: status 10 | 11 | - name: install 12 | shell: rpm -iUvh /tmp/cassandra.rpm 13 | when: status.changed 14 | 15 | - template: src=templates/cassandra_defaults.yaml dest=/etc/cassandra/conf/cassandra.yaml 16 | - template: src=templates/cassandra-env.sh dest=/etc/cassandra/conf/cassandra-env.sh 17 | 18 | - file: path=/cass state=directory owner=cassandra 19 | 20 | - service: name=cassandra state=restarted 21 | 22 | - template: src=templates/stress.yaml dest=/etc/cassandra/conf/stress.yaml 23 | tags: stress 24 | 25 | 26 | - include: install_stress_tool.yaml -------------------------------------------------------------------------------- /examples/cassandra-stress/ansible/setup12.yaml: -------------------------------------------------------------------------------- 1 | - hosts: cassandra 2 | sudo: yes 3 | tasks: 4 | - hostname: name="{{ ec2_private_dns_name }}" 5 | 6 | - name: download cassandra 7 | get_url: 8 | url: https://rpm.datastax.com/community/noarch/cassandra12-1.2.19-1.noarch.rpm 9 | dest: /tmp/cassandra12.rpm 10 | register: status 11 | 12 | - name: install 13 | shell: rpm -iUvh /tmp/cassandra12.rpm 14 | when: status.changed 15 | 16 | - template: src=templates/cassandra_defaults_12.yaml dest=/etc/cassandra/conf/cassandra.yaml 17 | 18 | - file: path=/cass state=directory owner=cassandra 19 | 20 | - service: name=cassandra state=restarted 21 | 22 | - include: install_stress_tool.yaml 23 | 24 | 25 | 26 | -------------------------------------------------------------------------------- /examples/cassandra-stress/ansible/templates/stress.yaml: -------------------------------------------------------------------------------- 1 | # 2 | # This is an example YAML profile for cassandra-stress 3 | # 4 | # insert data 5 | # cassandra-stress user profile=/home/jake/stress1.yaml ops(insert=1) 6 | # 7 | # read, using query simple1: 8 | # cassandra-stress profile=/home/jake/stress1.yaml ops(simple1=1) 9 | # 10 | # mixed workload (90/10) 11 | # cassandra-stress user profile=/home/jake/stress1.yaml ops(insert=1,simple1=9) 12 | 13 | 14 | # 15 | # Keyspace info 16 | # 17 | keyspace: mytable 18 | 19 | # 20 | # The CQL for creating a keyspace (optional if it already exists) 21 | # 22 | keyspace_definition: | 23 | CREATE KEYSPACE "mytable" WITH replication = { 24 | 'class': 'SimpleStrategy', 25 | 'replication_factor': '1' 26 | }; 27 | 28 | # 29 | # Table info 30 | # 31 | table: device_metadata 32 | 33 | # 34 | # The CQL for creating a table you wish to stress (optional if it already exists) 35 | # 36 | table_definition: | 37 | CREATE TABLE "device_metadata" ( 38 | device_id blob, 39 | data blob, 40 | PRIMARY KEY (device_id) 41 | ) WITH COMPACT STORAGE AND 42 | bloom_filter_fp_chance=0.100000 AND 43 | caching='ROWS_ONLY' AND 44 | comment='' AND 45 | dclocal_read_repair_chance=0.000000 AND 46 | gc_grace_seconds=2592000 AND 47 | read_repair_chance=0.300000 AND 48 | replicate_on_write='true' AND 49 | compression={'chunk_length_kb': '64', 'sstable_compression': 'SnappyCompressor'}; 50 | # CREATE TABLE "clusters" ( 51 | # device_id blob, 52 | # dpid int, 53 | # data blob, 54 | # PRIMARY KEY (device_id, dpid) 55 | # ) WITH COMPACT STORAGE AND 56 | # bloom_filter_fp_chance=0.100000 AND 57 | # caching='ROWS_ONLY' AND 58 | # comment='' AND 59 | # dclocal_read_repair_chance=0.000000 AND 60 | # gc_grace_seconds=2592000 AND 61 | # read_repair_chance=0.300000 AND 62 | # replicate_on_write='true' AND 63 | # compression={'chunk_length_kb': '64', 'sstable_compression': 'SnappyCompressor'}; 64 | 65 | 66 | # 67 | # Optional meta information on the generated columns in the above table 68 | # The min and max only apply to text and blob types 69 | # The distribution field represents the total unique population 70 | # distribution of that column across rows. Supported types are 71 | # 72 | # EXP(min..max) An exponential distribution over the range [min..max] 73 | # EXTREME(min..max,shape) An extreme value (Weibull) distribution over the range [min..max] 74 | # GAUSSIAN(min..max,stdvrng) A gaussian/normal distribution, where mean=(min+max)/2, and stdev is (mean-min)/stdvrng 75 | # GAUSSIAN(min..max,mean,stdev) A gaussian/normal distribution, with explicitly defined mean and stdev 76 | # UNIFORM(min..max) A uniform distribution over the range [min, max] 77 | # FIXED(val) A fixed distribution, always returning the same value 78 | # Aliases: extr, gauss, normal, norm, weibull 79 | # 80 | # If preceded by ~, the distribution is inverted 81 | # 82 | # Defaults for all columns are size: uniform(4..8), population: uniform(1..100B), cluster: fixed(1) 83 | # 84 | columnspec: 85 | - name: device_id 86 | size: fixed(36) 87 | population: uniform(1..10M) # the range of unique values to select for the field (default is 100Billion) 88 | - name: data 89 | size: uniform(50..100) 90 | 91 | insert: 92 | partitions: fixed(10) # number of unique partitions to update in a single operation 93 | # if batchcount > 1, multiple batches will be used but all partitions will 94 | # occur in all batches (unless they finish early); only the row counts will vary 95 | batchtype: LOGGED # type of batch to use 96 | select: uniform(1..10)/10 # uniform chance any single generated CQL row will be visited in a partition; 97 | # generated for each partition independently, each time we visit it 98 | 99 | # 100 | # A list of queries you wish to run against the schema 101 | # 102 | queries: 103 | dgraph: 104 | cql: SELECT * FROM mytable.device_metadata WHERE device_id = ? 105 | fields: samerow # samerow or multirow (select arguments from the same row, or randomly from all rows in the partition) 106 | -------------------------------------------------------------------------------- /examples/cassandra-stress/cluster1.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | inventory: 3 | - plugin: ec2 4 | args: 5 | region: us-east-1 6 | boto_profile: mcis 7 | filters: 8 | "tag:cluster": "{{ cluster }}" 9 | bastion: 10 | "tag:cluster": "{{ cluster }}" 11 | "tag:role": "bastion" 12 | 13 | terraform: 14 | path: terraform/main 15 | boto_profile: mcis 16 | 17 | cassandra_instance_count: 3 18 | cassandra_instance_type: r3.xlarge 19 | 20 | domain: "{{ cluster }}.{{ environment }}.demdex.com" 21 | 22 | vars: 23 | cluster_prefix: '{{cluster}}' 24 | region: us-east-1 25 | 26 | vpc_cidr: "172.16.0.0/19" 27 | dmz_subnets: 28 | 172.16.1.0/25 29 | 172.16.1.128/25 30 | cassandra_subnets: 31 | 172.16.2.0/24 32 | 172.16.3.0/24 33 | 34 | public_key: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDPoftvk6oT5adTX+3aCgS956Jech/9t/ykXUoV63/tXaDbQ+r3anmXxrwcdQyCkbGPkAg9N02DhrrEK5LUuCyCYm66+/DBH0RshM9ZASb6PSTyawJpSM3PomXyCHV0YqmruvGyTnQVY5C809BEWs6bFZzOdu6PKPPkhdA3NBgu5fzslH1BfgR58M1bSAYaVrGhORsGwQeXkfsv59T8+9P2Jaw90y64svYE/ykW6Foh0L5jklGMUEihZIXUeZrm9sFJ4yOnVTvYT2eDI9FQs1A1+tVpW6HDZ0/2zNWKiKjiRuDI684INsc09aZSrPPWGpA/4XZ5637rfAU55x1S7RlH debugger' 35 | 36 | environment: 'dev' -------------------------------------------------------------------------------- /examples/cassandra-stress/cluster2.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | inventory: 3 | - plugin: ec2 4 | args: 5 | region: us-east-1 6 | boto_profile: mcis 7 | filters: 8 | "tag:cluster": "{{ cluster }}" 9 | bastion: 10 | "tag:cluster": "{{ cluster }}" 11 | "tag:role": "bastion" 12 | 13 | terraform: 14 | path: terraform/main 15 | boto_profile: mcis 16 | 17 | cassandra_instance_count: 3 18 | cassandra_instance_type: r3.xlarge 19 | 20 | domain: "{{ cluster }}.{{ environment }}.demdex.com" 21 | 22 | vars: 23 | cluster_prefix: '{{cluster}}' 24 | region: us-east-1 25 | 26 | vpc_cidr: "172.16.0.0/19" 27 | dmz_subnets: 28 | 172.16.1.0/25 29 | 172.16.1.128/25 30 | cassandra_subnets: 31 | 172.16.2.0/24 32 | 172.16.3.0/24 33 | 34 | public_key: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDPoftvk6oT5adTX+3aCgS956Jech/9t/ykXUoV63/tXaDbQ+r3anmXxrwcdQyCkbGPkAg9N02DhrrEK5LUuCyCYm66+/DBH0RshM9ZASb6PSTyawJpSM3PomXyCHV0YqmruvGyTnQVY5C809BEWs6bFZzOdu6PKPPkhdA3NBgu5fzslH1BfgR58M1bSAYaVrGhORsGwQeXkfsv59T8+9P2Jaw90y64svYE/ykW6Foh0L5jklGMUEihZIXUeZrm9sFJ4yOnVTvYT2eDI9FQs1A1+tVpW6HDZ0/2zNWKiKjiRuDI684INsc09aZSrPPWGpA/4XZ5637rfAU55x1S7RlH debugger' 35 | 36 | environment: 'dev' -------------------------------------------------------------------------------- /examples/cassandra-stress/terraform/main/main.tf.jinja2: -------------------------------------------------------------------------------- 1 | variable "user_data" { 2 | description = "The user-data passed to the instance. Changing this will re-create the instances." 3 | default = { 4 | "0" = "{{ 'terraform/user_data' | read_file | escape_new_lines }}" 5 | } 6 | } 7 | 8 | provider "aws" { 9 | access_key = "{{ access_key }}" 10 | secret_key = "{{ secret_key }}" 11 | region = "{{ terraform.vars.region }}" 12 | } 13 | 14 | {% import "terraform/main/variables.tf.jinja2" as variables with context %} 15 | {% import "terraform/modules/macros.tf.jinja2" as macros with context %} 16 | 17 | {% include "terraform/modules/vpc/main.tf.jinja2" %} 18 | {% include "terraform/modules/cassandra/main.tf.jinja2" %} 19 | -------------------------------------------------------------------------------- /examples/cassandra-stress/terraform/main/variables.tf.jinja2: -------------------------------------------------------------------------------- 1 | {% set amazon_linux_hvm_ami = { 2 | "ap-northeast-1": "ami-18869819", 3 | "ap-southeast-1": "ami-96bb90c4", 4 | "ap-southeast-2": "ami-d50773ef", 5 | "eu-west-1": "ami-9d23aeea", 6 | "sa-east-1": "ami-af9925b2", 7 | "us-east-1": "ami-146e2a7c", 8 | "us-west-1": "ami-42908907", 9 | "us-west-2": "ami-dfc39aef" 10 | } %} 11 | 12 | {% set amazon_linux_nat_ami = { 13 | "ap-northeast-1": "ami-27d6e626", 14 | "ap-southeast-1": "ami-6aa38238", 15 | "ap-southeast-2": "ami-893f53b3", 16 | "eu-west-1": "ami-14913f63", 17 | "sa-east-1": "ami-8122969c", 18 | "us-east-1": "ami-184dc970", 19 | "us-west-1": "ami-a98396ec", 20 | "us-west-2": "ami-290f4119" 21 | } %} 22 | 23 | {% set azs = { 24 | 'us-east-1': 'us-east-1e' 25 | } %} -------------------------------------------------------------------------------- /examples/cassandra-stress/terraform/modules/cassandra/main.tf.jinja2: -------------------------------------------------------------------------------- 1 | {{ macros.iam_role("cassandra") }} 2 | {{ macros.subnet("cassandra", terraform.vars.cassandra_subnets, public_ip=True) }} 3 | 4 | resource "aws_security_group" "cassandra_sg" { 5 | description = "cassandra sg" 6 | name = "{{ cluster }}-{{ environment }}-cassandra" 7 | vpc_id = "${aws_vpc.vpc.id}" 8 | tags { 9 | Name = "{{ cluster }}-{{ environment }}-cassandra" 10 | } 11 | 12 | ingress { 13 | cidr_blocks = ["{{ terraform.vars.vpc_cidr }}"] 14 | from_port = 0 15 | protocol = "-1" 16 | to_port = 0 17 | } 18 | 19 | egress { 20 | // Allow all outbound traffic 21 | // TODO: convert this to an aws_security_group_rule when possible to do so 22 | from_port = 0 23 | to_port = 0 24 | protocol = "-1" 25 | cidr_blocks = ["0.0.0.0/0"] 26 | } 27 | } 28 | 29 | resource "aws_instance" "cassandra" { 30 | count = "{{ terraform.cassandra_instance_count }}" 31 | ami = "{{ variables.amazon_linux_hvm_ami[terraform.vars.region] }}" 32 | iam_instance_profile = "${aws_iam_instance_profile.profile_cassandra.name}" 33 | instance_type = "{{ terraform.cassandra_instance_type }}" 34 | key_name = "${aws_key_pair.ssh_key.key_name}" 35 | security_groups = ["${aws_security_group.cassandra_sg.id}"] 36 | subnet_id = "${element(aws_subnet.subnet_cassandra.*.id, count.index)}" 37 | user_data = "${lookup(var.user_data, 0)}" 38 | root_block_device { 39 | volume_size = 50 40 | } 41 | ephemeral_block_device { 42 | device_name = "/dev/xvdb" 43 | virtual_name = "ephemeral0" 44 | } 45 | tags { 46 | Name = "cassandra-${count.index+1}" 47 | hostname = "cassandra-${count.index+1}.{{ terraform.domain }}" 48 | role = "cassandra" 49 | group = "TechOps" 50 | cluster = "{{ cluster }}" 51 | environment = "{{ environment }}" 52 | } 53 | } 54 | 55 | {% for i in range(0, terraform.cassandra_instance_count) %} 56 | resource "aws_route53_record" "cassandra{{ i }}" { 57 | zone_id = "${aws_route53_zone.vpc_private_zone.zone_id}" 58 | name = "cassandra-{{ i + 1 }}.{{ terraform.domain }}" 59 | type = "A" 60 | ttl = "60" 61 | records = ["${aws_instance.cassandra.{{ i }}.private_ip}"] 62 | } 63 | {% endfor %} -------------------------------------------------------------------------------- /examples/cassandra-stress/terraform/modules/macros.tf.jinja2: -------------------------------------------------------------------------------- 1 | {% macro subnet(name, cidrs, public_ip=False) %} 2 | 3 | resource "aws_subnet" "subnet_{{ name }}" { 4 | availability_zone = "${element(split(" ", "{{ variables.azs[terraform.vars.region] }}"), count.index)}" 5 | cidr_block = "${element(split(" ", "{{ cidrs }}"), count.index)}" 6 | count = 2 7 | map_public_ip_on_launch = {{ "true" if public_ip else "false" }} 8 | vpc_id = "${aws_vpc.vpc.id}" 9 | tags { 10 | Name = "{{ cluster }}-{{ environment }}-{{ name }}${count.index+1}" 11 | } 12 | } 13 | 14 | resource "aws_route_table_association" "subnet_route_table_association_{{ name }}" { 15 | count = 2 // this must be the same number as count in subnet creation 16 | route_table_id = "${aws_route_table.route_table_public.id}" 17 | subnet_id = "${element(aws_subnet.subnet_{{ name }}.*.id, count.index)}" 18 | } 19 | {% endmacro %} 20 | 21 | {% macro iam_role(role_name) %} 22 | 23 | // Roles 24 | resource "aws_iam_role" "role_{{ role_name }}" { 25 | name = "{{ cluster }}-role-{{ role_name }}" 26 | assume_role_policy = < /etc/sudoers.d/ec2-user-tty -------------------------------------------------------------------------------- /examples/features/ansible-vault/README.md: -------------------------------------------------------------------------------- 1 | ## Minimal example to cover usage of vault with ansible playbooks 2 | 3 | ### Editing vault items 4 | 5 | Edit / create your vault files using the `ansible-vault` utility 6 | 7 | ``` 8 | $> ansible-vault edit vault/vault_dev.yaml --vault-password-file password_dev.txt 9 | ``` 10 | 11 | Both `vault/vault_dev.yaml` and `vault/vault_prod.yaml` have a variable called `vault_variable` 12 | 13 | ``` 14 | $> ansible-vault view vault/vault_dev.yaml --vault-password-file password_dev.txt 15 | 16 | vault_variable: "A dev value" 17 | 18 | $> ansible-vault view vault/vault_prod.yaml --vault-password-file password_prod.txt 19 | 20 | vault_variable: "A prod value" 21 | ``` 22 | 23 | 24 | ## Running with ops 25 | 26 | In the example playbook provided, we include the vault file appropriate for the given cluster environment 27 | 28 | ``` 29 | $> ops cluster/dev/dev.yaml play playbook/example.yaml -- --vault-password-file password_dev.txt 30 | 31 | PLAY *************************************************************************** 32 | 33 | TASK [setup] ******************************************************************* 34 | ok: [localhost] 35 | 36 | TASK [debug] ******************************************************************* 37 | ok: [localhost] => { 38 | "env": "dev" 39 | } 40 | 41 | TASK [include_vars] ************************************************************ 42 | ok: [localhost] 43 | 44 | TASK [debug] ******************************************************************* 45 | ok: [localhost] => { 46 | "vault_variable": "A dev value" 47 | } 48 | 49 | 50 | ``` -------------------------------------------------------------------------------- /examples/features/ansible-vault/cluster/dev/dev.yaml: -------------------------------------------------------------------------------- 1 | inventory: 2 | - directory: inventory 3 | 4 | env: dev -------------------------------------------------------------------------------- /examples/features/ansible-vault/cluster/prod/prod.yaml: -------------------------------------------------------------------------------- 1 | inventory: 2 | - directory: inventory 3 | 4 | 5 | env: prod -------------------------------------------------------------------------------- /examples/features/ansible-vault/inventory/hosts: -------------------------------------------------------------------------------- 1 | web1 ansible_connection=local 2 | web2 ansible_connection=local 3 | 4 | [web] 5 | web1 6 | 7 | -------------------------------------------------------------------------------- /examples/features/ansible-vault/password_dev.txt: -------------------------------------------------------------------------------- 1 | the vault pass for dev; obviously, this password will not be committed to git -------------------------------------------------------------------------------- /examples/features/ansible-vault/password_prod.txt: -------------------------------------------------------------------------------- 1 | the vault pass for prod; obviously, this password will not be committed to git -------------------------------------------------------------------------------- /examples/features/ansible-vault/playbook/example.yaml: -------------------------------------------------------------------------------- 1 | - hosts: web 2 | tasks: 3 | - debug: var=env 4 | - include_vars: "../../vault/vault_{{ env }}.yaml" 5 | 6 | - debug: var=vault_variable -------------------------------------------------------------------------------- /examples/features/ansible-vault/vault/vault_dev.yaml: -------------------------------------------------------------------------------- 1 | $ANSIBLE_VAULT;1.1;AES256 2 | 66356338633939386334303936373731303130346330643037396534363566626130383035653861 3 | 3633623237383936343762383662663938383466666431610a323738376161653366656163303533 4 | 35613533353833323262623932383737386164643234393532343138393136613335303637393730 5 | 3162633730666532610a633635323535303561373833623664396538663332366333396239613838 6 | 36663736633838626336346131636563333463616633383333396661663136656362 7 | -------------------------------------------------------------------------------- /examples/features/ansible-vault/vault/vault_prod.yaml: -------------------------------------------------------------------------------- 1 | $ANSIBLE_VAULT;1.1;AES256 2 | 62373963626534663036376563613336396536366536323138326330383339616435353933656438 3 | 6161636330346237643164343964323433316337316535610a626561613465303237663633663930 4 | 30363333656266336634386263623264396264356663336437343666326537623463303035333736 5 | 3730656364393631660a316264303139633865383738343163623066666162313636393066363065 6 | 65313363656231626162376337653662663432363066373639376465376561653637 7 | -------------------------------------------------------------------------------- /examples/features/inventory/.opsconfig.yaml: -------------------------------------------------------------------------------- 1 | terraform.version: v0.6.3 2 | inventory.max_age: 604800 # 7 days -------------------------------------------------------------------------------- /examples/features/inventory/README.md: -------------------------------------------------------------------------------- 1 | Assuming you have instances already running in AWS, you can use `ops` to manage them or, at the very least, you can list them. 2 | The tool leverages an AWS tag that needs to be present on the EC2 instances called `cluster`. 3 | 4 | The following examples lists nodes for the defined AWS profile, that have the tag `cluster` = `mycluster1`. 5 | Check `my-aws-cluster.yaml` for configuration. 6 | 7 | ```sh 8 | aws configure --profile aam-npe 9 | 10 | AWS Access Key ID [None]: 11 | AWS Secret Access Key [None]: 12 | Default region name [None]: us-east-1 13 | ``` 14 | 15 | ```sh 16 | $ ops my-aws-cluster.yaml inventory 17 | ``` 18 | 19 | This will return the list of instances. 20 | 21 | You can then SSH to one of the nodes. For instance: 22 | ```sh 23 | $ ops my-aws-cluster.yaml ssh mywebapp-1 24 | ``` 25 | -------------------------------------------------------------------------------- /examples/features/inventory/local_inventory/hosts: -------------------------------------------------------------------------------- 1 | [use-dcs1:children] 2 | use-prod-dcs-1 3 | 4 | [irl1-dcs1:children] 5 | irl1-prod-dcs-1 -------------------------------------------------------------------------------- /examples/features/inventory/my-aws-cluster.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | inventory: 3 | - plugin: cns 4 | args: 5 | clusters: 6 | - region: us-east-1 7 | boto_profile: aam-npe # make sure you have this profile in your ~/.aws/credentials file 8 | names: [mycluster1] # this assumes the EC2 nodes have the Tag Name "cluster" with Value "mycluster1" 9 | -------------------------------------------------------------------------------- /examples/features/inventory/my-azure-cluster.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | inventory: 3 | - plugin: azr 4 | args: 5 | tags: environment=prod 6 | locations: westeurope,northeurope 7 | -------------------------------------------------------------------------------- /examples/features/packer/README.md: -------------------------------------------------------------------------------- 1 | In this directory: 2 | ```shell 3 | ops clusters/ubuntu.yaml packer validate 4 | ops clusters/ubuntu.yaml packer build 5 | ``` 6 | -------------------------------------------------------------------------------- /examples/features/packer/clusters/ubuntu.yaml: -------------------------------------------------------------------------------- 1 | packer: 2 | clouds: 3 | aws: 4 | boto_profile: yourbotoprofile 5 | template: 'packer/ubuntu.json' 6 | variables: 7 | ami_name: 'packer-example' 8 | instance_type: t2.micro 9 | region: "us-east-1" 10 | source_ami: 'ami-fce3c696' 11 | ssh_username: "ubuntu" 12 | -------------------------------------------------------------------------------- /examples/features/packer/packer/ubuntu.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "ami_name": "", 4 | "aws_access_key": "", 5 | "aws_secret_key": "", 6 | "instance_type": "", 7 | "region": "", 8 | "source_ami": "", 9 | "ssh_username": "" 10 | }, 11 | "builders": [{ 12 | "type": "amazon-ebs", 13 | "access_key": "{{user `aws_access_key`}}", 14 | "secret_key": "{{user `aws_secret_key`}}", 15 | "region": "{{user `region`}}", 16 | "source_ami": "{{user `source_ami`}}", 17 | "instance_type": "{{user `instance_type`}}", 18 | "ssh_username": "{{user `ssh_username`}}", 19 | "ami_name": "{{user `ami_name`}} {{timestamp}}" 20 | }] 21 | } 22 | -------------------------------------------------------------------------------- /examples/features/terraform-and-ansible/.gitignore: -------------------------------------------------------------------------------- 1 | .terraform 2 | -------------------------------------------------------------------------------- /examples/features/terraform-and-ansible/README.md: -------------------------------------------------------------------------------- 1 | # Example terraform and ansible set-up 2 | 3 | ## Quick-start 4 | 5 | Scan clusters/example.yaml and look over what needs to be changed, especially your public key that you will use to ssh to 6 | hosts and your boto_profile 7 | 8 | ``` 9 | # see the resources that will be created in your aws account 10 | ops clusters/example.yaml terraform plan 11 | 12 | # create the resources 13 | # for this example, you will get a cluster with vpc with a bastion box, a nat box, 14 | # 1 web behind a load balancer and 1 db host 15 | ops clusters/example.yaml terraform apply 16 | 17 | # see the inventory 18 | ops clusters/example.yaml inventory 19 | 20 | # configure the cluster 21 | ops clusters/example.yaml play ansible/playbooks/site.yaml 22 | 23 | # destroy 24 | ops clusters/example.yaml terraform destroy 25 | ``` 26 | -------------------------------------------------------------------------------- /examples/features/terraform-and-ansible/ansible/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | host_key_checking = False 3 | timeout = 30 4 | roles_path = /etc/ansible/roles:ansible/roles 5 | 6 | [privilege_escalation] 7 | become = True 8 | 9 | [ssh_connection] 10 | scp_if_ssh = False 11 | pipelining = True -------------------------------------------------------------------------------- /examples/features/terraform-and-ansible/ansible/playbooks/site.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | 4 | # include custom variables before each run 5 | pre_tasks: 6 | - name: include os vars 7 | include_vars: "{{ item }}" 8 | with_first_found: 9 | - "./vars/dist/{{ ansible_distribution }}.yaml" 10 | tags: always 11 | 12 | roles: 13 | - { role: common, tags: common} 14 | 15 | - hosts: web 16 | sudo: yes 17 | roles: 18 | - { role: web, tags: web} 19 | 20 | - hosts: db 21 | sudo: yes 22 | roles: 23 | - { role: db, tags: db} -------------------------------------------------------------------------------- /examples/features/terraform-and-ansible/ansible/roles/common/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hostname: name={{ ec2_tag_hostname | default(ansible_hostname) }} 3 | sudo: yes 4 | tags: hostname -------------------------------------------------------------------------------- /examples/features/terraform-and-ansible/ansible/roles/db/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks for the db role 3 | - debug: msg="Running db tasks" 4 | 5 | -------------------------------------------------------------------------------- /examples/features/terraform-and-ansible/ansible/roles/web/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks for the web role 3 | - debug: msg="Running web tasks" -------------------------------------------------------------------------------- /examples/features/terraform-and-ansible/ansible/tasks/copy-key.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | sudo: yes 4 | gather_facts: no 5 | vars: 6 | admins_pub_keys: 7 | vlascean: ssh-rsa mykey 8 | tasks: 9 | - file: path=~/.ssh state=directory mode=500 10 | - lineinfile: dest=~/.ssh/authorized_keys line="{{ item.value }}" 11 | with_dict: "{{ admins_pub_keys }}" 12 | 13 | - lineinfile: dest=/home/ec2-user/.ssh/authorized_keys line="{{ item.value }}" 14 | with_dict: "{{ admins_pub_keys }}" 15 | -------------------------------------------------------------------------------- /examples/features/terraform-and-ansible/clusters/example.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | ssh_user: ec2-user 3 | 4 | inventory_settings: 5 | max_age: 60 6 | 7 | inventory: 8 | - plugin: cns 9 | args: 10 | clusters: 11 | - region: us-west-2 12 | boto_profile: debug 13 | names: ['{{ cluster }}'] 14 | 15 | terraform: 16 | path: terraform/main 17 | boto_profile: debug 18 | vars: 19 | cluster_prefix: '{{ cluster }}' 20 | region: us-west-2 21 | 22 | vpc_cidr: "172.16.0.0/19" 23 | number_of_az: 2 24 | dmz_subnets: 25 | 172.16.1.0/25 26 | 172.16.1.128/25 27 | web_subnets: 28 | 172.16.2.0/24 29 | 172.16.3.0/24 30 | db_subnets: 31 | 172.16.4.0/24 32 | 172.16.5.0/24 33 | 34 | environment: 'dev' 35 | 36 | public_key: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDPoftvk6oT5adTX+3aCgS956Jech/9t/ykXUoV63/tXaDbQ+r3anmXxrwcdQyCkbGPkAg9N02DhrrEK5LUuCyCYm66+/DBH0RshM9ZASb6PSTyawJpSM3PomXyCHV0YqmruvGyTnQVY5C809BEWs6bFZzOdu6PKPPkhdA3NBgu5fzslH1BfgR58M1bSAYaVrGhORsGwQeXkfsv59T8+9P2Jaw90y64svYE/ykW6Foh0L5jklGMUEihZIXUeZrm9sFJ4yOnVTvYT2eDI9FQs1A1+tVpW6HDZ0/2zNWKiKjiRuDI684INsc09aZSrPPWGpA/4XZ5637rfAU55x1S7RlH debugger' -------------------------------------------------------------------------------- /examples/features/terraform-and-ansible/terraform/main/main.tf.jinja2: -------------------------------------------------------------------------------- 1 | variable "access_key" {} 2 | variable "secret_key" {} 3 | variable "cluster_prefix" {} 4 | variable "environment" {} 5 | variable "region" {} 6 | variable "vpc_cidr" {} 7 | variable "dmz_subnets" {} 8 | variable "public_key" {} 9 | variable "number_of_az" {} 10 | 11 | variable "user_data" { 12 | description = "The user-data passed to the instance. Changing this will re-create the instances." 13 | default = { 14 | "0" = "{{ 'terraform/user_data' | read_file | escape_new_lines }}" 15 | } 16 | } 17 | 18 | provider "aws" { 19 | access_key = "${var.access_key}" 20 | secret_key = "${var.secret_key}" 21 | region = "${var.region}" 22 | } 23 | 24 | {% include "terraform/modules/vpc/main.tf.jinja2" %} 25 | {% include "terraform/modules/web/main.tf.jinja2" %} 26 | {% include "terraform/modules/db/main.tf.jinja2" %} -------------------------------------------------------------------------------- /examples/features/terraform-and-ansible/terraform/main/shared_variables.tf: -------------------------------------------------------------------------------- 1 | variable "eu-west-1" { 2 | description = "This variable contains the list of all AZs available in EU Ireland Region" 3 | default = { 4 | "0" = "eu-west-1a" 5 | "1" = "eu-west-1b" 6 | "2" = "eu-west-1c" 7 | } 8 | } 9 | 10 | variable "us-east-1" { 11 | description = "This variable contains the list of all AZs available in US East N.Virginia Region" 12 | default = { 13 | "0" = "us-east-11" 14 | "1" = "us-east-1c" 15 | "2" = "us-east-1d" 16 | "3" = "us-east-1e" 17 | } 18 | } 19 | 20 | variable "us-west-2" { 21 | description = "This variable contains the list of all AZs available in US West Oregon Region" 22 | default = { 23 | "0" = "us-west-2a" 24 | "1" = "us-west-2b" 25 | "2" = "us-west-2c" 26 | } 27 | } 28 | 29 | variable "sa-east-1" { 30 | description = "This variable contains the list of all AZs available in South America (São Paulo) Region" 31 | default = { 32 | "0" = "sa-east-1a" 33 | "1" = "sa-east-1b" 34 | "2" = "sa-east-1c" 35 | } 36 | } 37 | 38 | variable "ap-southeast-1" { 39 | description = "This variable contains the list of all AZs available in Asia Pacific (Singapore) Region" 40 | default = { 41 | "0" = "ap-southeast-1a" 42 | "1" = "ap-southeast-1b" 43 | } 44 | } 45 | 46 | variable "ap-southeast-2" { 47 | description = "This variable contains the list of all AZs available in Asia Pacific (Sydney) Region" 48 | default = { 49 | "0" = "ap-southeast-2a" 50 | "1" = "ap-southeast-2b" 51 | } 52 | } 53 | 54 | variable "ap-northeast-1" { 55 | description = "This variable contains the list of all AZs available in Asia Pacific (Tokyo) Region" 56 | default = { 57 | "0" = "ap-northeast-1a" 58 | "1" = "ap-northeast-1b" 59 | } 60 | } 61 | 62 | variable "amazon_linux_hvm_ami" { 63 | default = { 64 | ap-northeast-1 = "ami-18869819" 65 | ap-southeast-1 = "ami-96bb90c4" 66 | ap-southeast-2 = "ami-d50773ef" 67 | eu-west-1 = "ami-9d23aeea" 68 | sa-east-1 = "ami-af9925b2" 69 | us-east-1 = "ami-146e2a7c" 70 | us-west-1 = "ami-42908907" 71 | us-west-2 = "ami-dfc39aef" 72 | } 73 | } 74 | variable "amazon_linux_nat_ami" { 75 | default = { 76 | ap-northeast-1 = "ami-27d6e626" 77 | ap-southeast-1 = "ami-6aa38238" 78 | ap-southeast-2 = "ami-893f53b3" 79 | eu-west-1 = "ami-14913f63" 80 | sa-east-1 = "ami-8122969c" 81 | us-east-1 = "ami-184dc970" 82 | us-west-1 = "ami-a98396ec" 83 | us-west-2 = "ami-290f4119" 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /examples/features/terraform-and-ansible/terraform/modules/db/instance.tf.jinja2: -------------------------------------------------------------------------------- 1 | resource "aws_instance" "db" { 2 | count = "{{ terraform.db_instance_count | default(1) }}" 3 | ami = "${lookup(var.amazon_linux_hvm_ami, var.region)}" 4 | iam_instance_profile = "${aws_iam_instance_profile.profile_db.name}" 5 | instance_type = "{{ terraform.db_instance_size | default("t2.micro") }}" 6 | key_name = "${aws_key_pair.ssh_key.key_name}" 7 | security_groups = ["${aws_security_group.db_sg.id}"] 8 | subnet_id = "${element(aws_subnet.subnet_db.*.id, count.index)}" 9 | user_data = "${lookup(var.user_data, 0)}" 10 | root_block_device { 11 | volume_size = 50 12 | } 13 | ephemeral_block_device { 14 | device_name = "/dev/xvdb" 15 | virtual_name = "ephemeral0" 16 | } 17 | tags { 18 | Name = "${var.cluster_prefix}-${var.environment}-db-${count.index+1}" 19 | hostname = "${var.cluster_prefix}-${var.environment}-db-${count.index+1}.${var.cluster_prefix}.debug.mycompany.com" 20 | role = "db" 21 | group = "TechOps" 22 | cluster = "${var.cluster_prefix}" 23 | environment = "${var.environment}" 24 | } 25 | } -------------------------------------------------------------------------------- /examples/features/terraform-and-ansible/terraform/modules/db/main.tf.jinja2: -------------------------------------------------------------------------------- 1 | variable "db_subnets" {} 2 | 3 | {% include "terraform/modules/db/instance.tf.jinja2" %} 4 | {% import "terraform/modules/macros.tf.jinja2" as macros %} 5 | 6 | {{ macros.iam_role("db") }} 7 | {{ macros.private_subnet("db", terraform.vars.db_subnets) }} 8 | 9 | resource "aws_security_group" "db_sg" { 10 | description = "db sg" 11 | name = "${var.cluster_prefix}-${var.environment}-db" 12 | vpc_id = "${aws_vpc.vpc.id}" 13 | tags { 14 | Name = "${var.cluster_prefix}-${var.environment}-db" 15 | } 16 | 17 | ingress { 18 | cidr_blocks = ["${var.vpc_cidr}"] 19 | from_port = 0 20 | protocol = "-1" 21 | to_port = 0 22 | } 23 | 24 | egress { 25 | // Allow all outbound traffic 26 | // TODO: convert this to an aws_security_group_rule when possible to do so 27 | from_port = 0 28 | to_port = 0 29 | protocol = "-1" 30 | cidr_blocks = ["0.0.0.0/0"] 31 | } 32 | } -------------------------------------------------------------------------------- /examples/features/terraform-and-ansible/terraform/modules/macros.tf.jinja2: -------------------------------------------------------------------------------- 1 | {% macro private_subnet(name, cidrs) %} 2 | resource "aws_subnet" "subnet_{{ name }}" { 3 | availability_zone = "${lookup(var.region, count.index)}" 4 | cidr_block = "${element(split(" " , "{{ cidrs }}"), count.index)}" 5 | count = 2 6 | map_public_ip_on_launch = false 7 | vpc_id = "${aws_vpc.vpc.id}" 8 | tags { 9 | Name = "${var.cluster_prefix}-${var.environment}-{{ name }}${count.index+1}" 10 | } 11 | } 12 | 13 | resource "aws_route_table_association" "subnet_route_table_association_{{ name }}" { 14 | count = 2 // this must be the same number as count in subnet creation 15 | route_table_id = "${aws_route_table.route_table_private.id}" 16 | subnet_id = "${element(aws_subnet.subnet_{{ name }}.*.id, count.index)}" 17 | } 18 | {% endmacro %} 19 | 20 | 21 | {% macro public_subnet(name, cidrs, public_ip=False) %} 22 | 23 | resource "aws_subnet" "subnet_{{ name }}" { 24 | availability_zone = "${lookup(var.region, count.index)}" 25 | cidr_block = "${element(split(" " , "{{ cidrs }}"), count.index)}" 26 | count = 2 27 | map_public_ip_on_launch = {{ "true" if public_ip else "false" }} 28 | vpc_id = "${aws_vpc.vpc.id}" 29 | tags { 30 | Name = "${var.cluster_prefix}-${var.environment}-{{ name }}${count.index+1}" 31 | } 32 | } 33 | 34 | resource "aws_route_table_association" "subnet_route_table_association_{{ name }}" { 35 | count = 2 // this must be the same number as count in subnet creation 36 | route_table_id = "${aws_route_table.route_table_public.id}" 37 | subnet_id = "${element(aws_subnet.subnet_{{ name }}.*.id, count.index)}" 38 | } 39 | {% endmacro %} 40 | 41 | {% macro iam_role(role_name) %} 42 | 43 | // Roles 44 | resource "aws_iam_role" "role_{{ role_name }}" { 45 | name = "${var.cluster_prefix}-role-{{ role_name }}" 46 | assume_role_policy = < /etc/sudoers.d/ec2-user-tty -------------------------------------------------------------------------------- /examples/features/terraform-hierarchical/.opsconfig.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | compositions: 3 | order: 4 | terraform: 5 | - account 6 | - network 7 | - cluster 8 | - spinnaker 9 | helmfile: 10 | - helmfiles 11 | -------------------------------------------------------------------------------- /examples/features/terraform-hierarchical/README.md: -------------------------------------------------------------------------------- 1 | Note that you need the `.opsconfig.yaml` file (which is already present in this folder) for this to work. 2 | 3 | 1. Run 'terraform plan' for all compositions for a given cluster: 4 | ```sh 5 | # generates config and runs terraform 6 | ops config/env=dev/cluster=cluster1 terraform plan 7 | ``` 8 | 9 | 2. Run 'terraform apply' for all compositions for a given cluster: 10 | ```sh 11 | ops config/env=dev/cluster=cluster1 terraform apply --skip-plan 12 | ``` 13 | 14 | 3. Run a single composition: 15 | ```sh 16 | ops config/env=dev/cluster=cluster1/composition=network terraform apply --skip-plan 17 | ``` 18 | 19 | 4. If you only want to generate and view the config you can run: 20 | ```sh 21 | ops config/env=dev/cluster=cluster1/composition=network config 22 | ``` 23 | -------------------------------------------------------------------------------- /examples/features/terraform-hierarchical/compositions/terraform/cluster/main.tf: -------------------------------------------------------------------------------- 1 | variable "config" {} 2 | 3 | module "cluster" { 4 | source = "../../../modules/cluster" 5 | config = var.config 6 | } 7 | 8 | output "cluster_name" { 9 | value = var.config.cluster.name 10 | } 11 | -------------------------------------------------------------------------------- /examples/features/terraform-hierarchical/compositions/terraform/network/main.tf: -------------------------------------------------------------------------------- 1 | variable "config" {} 2 | 3 | module "network" { 4 | source = "../../../modules/network" 5 | config = var.config 6 | } 7 | -------------------------------------------------------------------------------- /examples/features/terraform-hierarchical/config/env=dev/cluster=cluster1/composition=cluster/conf.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adobe/ops-cli/8246f18c9c9d10ba6b7cdbace6a756281cca892b/examples/features/terraform-hierarchical/config/env=dev/cluster=cluster1/composition=cluster/conf.yaml -------------------------------------------------------------------------------- /examples/features/terraform-hierarchical/config/env=dev/cluster=cluster1/composition=network/conf.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adobe/ops-cli/8246f18c9c9d10ba6b7cdbace6a756281cca892b/examples/features/terraform-hierarchical/config/env=dev/cluster=cluster1/composition=network/conf.yaml -------------------------------------------------------------------------------- /examples/features/terraform-hierarchical/config/env=dev/cluster=cluster1/conf.yaml: -------------------------------------------------------------------------------- 1 | cluster: 2 | name: cluster1 3 | -------------------------------------------------------------------------------- /examples/features/terraform-hierarchical/config/env=dev/cluster=cluster2/composition=cluster/conf.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adobe/ops-cli/8246f18c9c9d10ba6b7cdbace6a756281cca892b/examples/features/terraform-hierarchical/config/env=dev/cluster=cluster2/composition=cluster/conf.yaml -------------------------------------------------------------------------------- /examples/features/terraform-hierarchical/config/env=dev/cluster=cluster2/composition=network/conf.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adobe/ops-cli/8246f18c9c9d10ba6b7cdbace6a756281cca892b/examples/features/terraform-hierarchical/config/env=dev/cluster=cluster2/composition=network/conf.yaml -------------------------------------------------------------------------------- /examples/features/terraform-hierarchical/config/env=dev/cluster=cluster2/conf.yaml: -------------------------------------------------------------------------------- 1 | cluster: 2 | name: cluster2 3 | -------------------------------------------------------------------------------- /examples/features/terraform-hierarchical/config/env=dev/default.yaml: -------------------------------------------------------------------------------- 1 | account: 2 | cloud_provider: 3 | aws: 4 | profile: test_profile 5 | 6 | env: 7 | name: dev 8 | 9 | region: 10 | location: us-east-1 11 | name: va6 12 | 13 | project: 14 | prefix: ee 15 | 16 | # This value will be overridden 17 | cluster: 18 | name: default 19 | -------------------------------------------------------------------------------- /examples/features/terraform-hierarchical/modules/cluster/main.tf: -------------------------------------------------------------------------------- 1 | variable "config" {} 2 | 3 | output "cluster_name" { 4 | value = var.config.cluster.name 5 | } 6 | -------------------------------------------------------------------------------- /examples/features/terraform-hierarchical/modules/network/main.tf: -------------------------------------------------------------------------------- 1 | variable "config" {} 2 | 3 | locals { 4 | env = var.config["env"] 5 | region = var.config["region"]["location"] 6 | project = var.config["project"]["prefix"] 7 | } 8 | 9 | #resource "aws_s3_bucket" "bucket" { 10 | # bucket = "${local.env}-${local.region}-${local.project}-test-bucket" 11 | # acl = "private" 12 | 13 | # tags = { 14 | # Name = "My bucket" 15 | # Environment = "na" 16 | # } 17 | #} -------------------------------------------------------------------------------- /renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 3 | "extends": [ 4 | "config:base" 5 | ] 6 | } 7 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | simpledi==0.4.1 2 | awscli==1.32.6 3 | boto3==1.34.6 4 | botocore==1.34.6 5 | urllib3==2.0.7 6 | ansible==8.7.0 7 | azure-common==1.1.28 8 | azure==4.0.0 9 | msrestazure==0.6.4 10 | Jinja2==3.1.4 11 | hashmerge 12 | python-consul 13 | hvac==1.2.1 14 | passgen 15 | inflection==0.5.1 16 | kubernetes==26.1.0 17 | himl==0.15.2 18 | six 19 | GitPython==3.1.* 20 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | description-file = README.md 3 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019 Adobe. All rights reserved. 2 | # This file is licensed to you under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. You may obtain a copy 4 | # of the License at http://www.apache.org/licenses/LICENSE-2.0 5 | 6 | # Unless required by applicable law or agreed to in writing, software distributed under 7 | # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS 8 | # OF ANY KIND, either express or implied. See the License for the specific language 9 | # governing permissions and limitations under the License. 10 | 11 | import os 12 | import sys 13 | 14 | try: 15 | from setuptools import setup, find_packages 16 | except ImportError: 17 | from distutils.core import setup, find_packages 18 | 19 | with open('README.md') as f: 20 | _readme = f.read() 21 | 22 | _mydir = os.path.abspath(os.path.dirname(sys.argv[0])) 23 | _requires = [r for r in open(os.path.sep.join((_mydir, 'requirements.txt')), "r").read().split('\n') if len(r) > 1] 24 | setup( 25 | name='ops-cli', 26 | version='2.3.1', 27 | description='Ops - wrapper for Terraform, Ansible, and SSH for cloud automation', 28 | long_description=_readme + '\n\n', 29 | long_description_content_type='text/markdown', 30 | url='https://github.com/adobe/ops-cli', 31 | python_requires='>=3.5', 32 | author='Adobe', 33 | author_email='noreply@adobe.com', 34 | license='Apache2', 35 | classifiers=[ 36 | 'Development Status :: 5 - Production/Stable', 37 | 'Environment :: Web Environment', 38 | 'Intended Audience :: Developers', 39 | 'License :: OSI Approved :: Apache Software License', 40 | 'Operating System :: OS Independent', 41 | 'Programming Language :: Python :: 3', 42 | 'Programming Language :: Python :: 3.9', 43 | 'Programming Language :: Python :: 3.10', 44 | 'Programming Language :: Python :: 3.11', 45 | 'Programming Language :: Python :: Implementation :: CPython', 46 | 'Programming Language :: Python :: Implementation :: PyPy', 47 | 'Topic :: Internet :: WWW/HTTP :: Dynamic Content', 48 | 'Topic :: Software Development :: Libraries :: Python Modules', 49 | 'Topic :: Text Processing :: Markup :: HTML' 50 | ], 51 | package_dir={'': 'src'}, 52 | packages=find_packages('src'), 53 | package_data={ 54 | '': ['data/ansible/*', 'data/ansible/tasks/*', 'data/ssh/*', 'data/terraform/*'] 55 | }, 56 | install_requires=_requires, 57 | entry_points={ 58 | 'console_scripts': [ 59 | 'ops = ops.main:run' 60 | ] 61 | } 62 | ) 63 | -------------------------------------------------------------------------------- /src/ops/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019 Adobe. All rights reserved. 2 | # This file is licensed to you under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. You may obtain a copy 4 | # of the License at http://www.apache.org/licenses/LICENSE-2.0 5 | 6 | # Unless required by applicable law or agreed to in writing, software distributed under 7 | # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS 8 | # OF ANY KIND, either express or implied. See the License for the specific language 9 | # governing permissions and limitations under the License. 10 | 11 | import pkg_resources 12 | import re 13 | from distutils.version import StrictVersion 14 | from subprocess import call, Popen, PIPE 15 | 16 | from six import PY3 17 | 18 | from .cli import display 19 | 20 | 21 | def validate_ops_version(min_ops_version): 22 | current_ops_version = [ 23 | x.version for x in pkg_resources.working_set if x.project_name == "ops-cli"][0] 24 | if StrictVersion(current_ops_version) < StrictVersion(min_ops_version): 25 | raise Exception("The current ops version {0} is lower than the minimum required version {1}. " 26 | "Please upgrade by following the instructions seen here: " 27 | "https://github.com/adobe/ops-cli#installing".format(current_ops_version, min_ops_version)) 28 | 29 | 30 | class Executor(object): 31 | """ All cli commands usually return a dict(command=...) that will be executed by this handler""" 32 | 33 | def __call__(self, result, pass_trough=True, cwd=None): 34 | try: 35 | return self._execute(result, pass_trough, cwd) 36 | except Exception as ex: 37 | display(str(ex) if PY3 else ex.message, stderr=True, color='red') 38 | display( 39 | '------- TRACEBACK ----------', 40 | stderr=True, 41 | color='dark gray') 42 | import traceback 43 | traceback.print_exc() 44 | display( 45 | '------ END TRACEBACK -------', 46 | stderr=True, 47 | color='dark gray') 48 | 49 | def _execute(self, result, pass_trough=True, cwd=None): 50 | if not result or not isinstance(result, dict): 51 | return 52 | 53 | if 'command' in result: 54 | shell_command = result['command'] 55 | display( 56 | "%s" % 57 | self.shadow_credentials(shell_command), 58 | stderr=True, 59 | color='yellow') 60 | if pass_trough: 61 | exit_code = call(shell_command, shell=True, cwd=cwd) 62 | else: 63 | p = Popen( 64 | shell_command, 65 | shell=True, 66 | stdout=PIPE, 67 | stderr=PIPE, 68 | cwd=cwd) 69 | output, errors = p.communicate() 70 | display(str(output)) 71 | if errors: 72 | display( 73 | "%s" % 74 | self.shadow_credentials(errors), 75 | stderr=True, 76 | color='red') 77 | exit_code = p.returncode 78 | 79 | if 'post_actions' in result: 80 | for callback in result['post_actions']: 81 | callback() 82 | 83 | return exit_code 84 | 85 | def shadow_credentials(self, cmd): 86 | if isinstance(cmd, (bytes, bytearray)): 87 | cmd = cmd.decode("utf-8") 88 | cmd = re.sub(r"secret_key=.{20}", "secret_key=****", cmd) 89 | cmd = re.sub(r"access_key=.{10}", "access_key=****", cmd) 90 | 91 | return cmd 92 | 93 | 94 | class OpsException(Exception): 95 | pass 96 | -------------------------------------------------------------------------------- /src/ops/ansible/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019 Adobe. All rights reserved. 2 | # This file is licensed to you under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. You may obtain a copy 4 | # of the License at http://www.apache.org/licenses/LICENSE-2.0 5 | 6 | # Unless required by applicable law or agreed to in writing, software distributed under 7 | # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS 8 | # OF ANY KIND, either express or implied. See the License for the specific language 9 | # governing permissions and limitations under the License. 10 | -------------------------------------------------------------------------------- /src/ops/ansible/callback_plugins/__init__.py: -------------------------------------------------------------------------------- 1 | #Copyright 2019 Adobe. All rights reserved. 2 | #This file is licensed to you under the Apache License, Version 2.0 (the "License"); 3 | #you may not use this file except in compliance with the License. You may obtain a copy 4 | #of the License at http://www.apache.org/licenses/LICENSE-2.0 5 | 6 | #Unless required by applicable law or agreed to in writing, software distributed under 7 | #the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS 8 | #OF ANY KIND, either express or implied. See the License for the specific language 9 | #governing permissions and limitations under the License. 10 | 11 | -------------------------------------------------------------------------------- /src/ops/ansible/filter_plugins/__init__.py: -------------------------------------------------------------------------------- 1 | #Copyright 2019 Adobe. All rights reserved. 2 | #This file is licensed to you under the Apache License, Version 2.0 (the "License"); 3 | #you may not use this file except in compliance with the License. You may obtain a copy 4 | #of the License at http://www.apache.org/licenses/LICENSE-2.0 5 | 6 | #Unless required by applicable law or agreed to in writing, software distributed under 7 | #the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS 8 | #OF ANY KIND, either express or implied. See the License for the specific language 9 | #governing permissions and limitations under the License. 10 | 11 | -------------------------------------------------------------------------------- /src/ops/ansible/filter_plugins/commonfilters.py: -------------------------------------------------------------------------------- 1 | #Copyright 2019 Adobe. All rights reserved. 2 | #This file is licensed to you under the Apache License, Version 2.0 (the "License"); 3 | #you may not use this file except in compliance with the License. You may obtain a copy 4 | #of the License at http://www.apache.org/licenses/LICENSE-2.0 5 | 6 | #Unless required by applicable law or agreed to in writing, software distributed under 7 | #the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS 8 | #OF ANY KIND, either express or implied. See the License for the specific language 9 | #governing permissions and limitations under the License. 10 | 11 | from __future__ import absolute_import 12 | import os 13 | from ops.cli import display 14 | from six import iteritems 15 | 16 | 17 | def read_file(fname): 18 | if os.path.exists(fname): 19 | with open(fname) as f: 20 | return f.read() 21 | else: 22 | display("read_file: File %s does not exist" % fname, stderr=True, color='red') 23 | return None 24 | 25 | def write_file(fname, contents): 26 | handler = open(fname,'w') 27 | handler.write(contents) 28 | handler.close() 29 | 30 | def escape_new_lines(string): 31 | return string.replace("\n", "\\n") 32 | 33 | def read_consul(key_path, consul_url="http://localhost:8500", recurse=True, show_error=False): 34 | ret = {} 35 | try: 36 | from ops.simpleconsul import SimpleConsul 37 | sc = SimpleConsul(consul_url) 38 | ret = sc.get(key_path,recurse) 39 | except Exception as e: 40 | if show_error: 41 | ret['error'] = e.message 42 | return ret 43 | 44 | def read_envvar(varname, default=None): 45 | import os 46 | return os.getenv(varname,default) 47 | 48 | def read_yaml(fname, show_error=False): 49 | ret = {} 50 | try: 51 | import yaml as y 52 | f = open(fname,"r") 53 | ret = y.safe_load(f.read()) 54 | except Exception as e: 55 | if show_error: 56 | ret['error'] = e.message 57 | return ret 58 | 59 | def flatten_tree(d, parent_key='', sep='/'): 60 | items = [] 61 | for k, v in d.items(): 62 | new_key = parent_key + sep + str(k) if parent_key else str(k) 63 | if isinstance(v, dict): 64 | items.extend(flatten_tree(v, new_key, sep=sep).items()) 65 | else: 66 | items.append((new_key, v)) 67 | return dict(items) 68 | 69 | def check_vault( 70 | secret_path, key='value', vault_user=None, vault_url=None, 71 | token=None, namespace=None, mount_point=None, auto_prompt=True): 72 | 73 | from ops.simplevault import SimpleVault 74 | sv = SimpleVault( 75 | vault_user=vault_user, vault_addr=vault_url, vault_token=token, 76 | namespace=namespace, mount_point=mount_point, auto_prompt=auto_prompt) 77 | check_status = sv.check(secret_path, key) 78 | # we want to return these string values because this is what Jinja2 understands 79 | if check_status: 80 | return "true" 81 | return "false" 82 | 83 | def read_vault( 84 | secret_path, key='value', fetch_all=False, vault_user=None, vault_url=None, 85 | token=None, namespace=None, mount_point=None, auto_prompt=True): 86 | 87 | from ops.simplevault import SimpleVault 88 | sv = SimpleVault( 89 | vault_user=vault_user, vault_addr=vault_url, vault_token=token, 90 | namespace=namespace, mount_point=mount_point, auto_prompt=auto_prompt) 91 | return sv.get(path=secret_path, key=key, fetch_all=fetch_all) 92 | 93 | def write_vault( 94 | secret_path, key='value', data="", vault_user=None, vault_url=None, 95 | namespace=None, mount_point=None, token=None, auto_prompt=True): 96 | 97 | from ops.simplevault import SimpleVault 98 | sv = SimpleVault( 99 | vault_user=vault_user, vault_addr=vault_url, vault_token=token, 100 | namespace=None, mount_point=None, auto_prompt=auto_prompt) 101 | new_data = {} 102 | if isinstance(data, dict): 103 | for k,v in iteritems(data): 104 | new_data[k] = str(v) 105 | elif key: 106 | new_data[key] = str(data) 107 | else: 108 | return False 109 | return sv.put(path=secret_path, value=new_data ) 110 | 111 | def read_ssm(key, aws_profile, region_name='us-east-1'): 112 | from ops.simplessm import SimpleSSM 113 | ssm = SimpleSSM(aws_profile, region_name) 114 | return ssm.get(key) 115 | 116 | def managed_vault_secret(secret_path,key='value', 117 | policy={}, 118 | vault_user=None, 119 | vault_addr=None, 120 | vault_token=None, 121 | namespace=None, 122 | mount_point=None, 123 | auto_prompt=True): 124 | from ops.simplevault import ManagedVaultSecret 125 | ms = ManagedVaultSecret(path=secret_path, 126 | key=key, 127 | policy=policy, 128 | vault_user=vault_user, 129 | vault_addr=vault_addr, 130 | vault_token=vault_token, 131 | namespace=namespace, 132 | mount_point=mount_point, 133 | auto_prompt=auto_prompt) 134 | return ms.get() 135 | 136 | def escape_json(input): 137 | import json 138 | escaped = json.dumps(input) 139 | if escaped.startswith('"') and escaped.endswith('"'): 140 | # trim double quotes 141 | return escaped[1:-1] 142 | return escaped 143 | 144 | class FilterModule(object): 145 | 146 | def filters(self): 147 | return { 148 | 'escape_new_lines': escape_new_lines, 149 | 'flatten_tree': flatten_tree, 150 | 'read_consul': read_consul, 151 | 'read_envvar': read_envvar, 152 | 'read_file': read_file, 153 | 'read_vault': read_vault, 154 | 'read_yaml': read_yaml, 155 | 'write_vault': write_vault, 156 | 'managed_vault_secret': managed_vault_secret, 157 | 'read_ssm': read_ssm, 158 | 'escape_json': escape_json, 159 | 'check_vault': check_vault 160 | } 161 | -------------------------------------------------------------------------------- /src/ops/ansible/vars_plugins/__init__.py: -------------------------------------------------------------------------------- 1 | #Copyright 2019 Adobe. All rights reserved. 2 | #This file is licensed to you under the Apache License, Version 2.0 (the "License"); 3 | #you may not use this file except in compliance with the License. You may obtain a copy 4 | #of the License at http://www.apache.org/licenses/LICENSE-2.0 5 | 6 | #Unless required by applicable law or agreed to in writing, software distributed under 7 | #the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS 8 | #OF ANY KIND, either express or implied. See the License for the specific language 9 | #governing permissions and limitations under the License. 10 | 11 | -------------------------------------------------------------------------------- /src/ops/ansible/vars_plugins/clusterconfig.py: -------------------------------------------------------------------------------- 1 | #Copyright 2019 Adobe. All rights reserved. 2 | #This file is licensed to you under the Apache License, Version 2.0 (the "License"); 3 | #you may not use this file except in compliance with the License. You may obtain a copy 4 | #of the License at http://www.apache.org/licenses/LICENSE-2.0 5 | 6 | #Unless required by applicable law or agreed to in writing, software distributed under 7 | #the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS 8 | #OF ANY KIND, either express or implied. See the License for the specific language 9 | #governing permissions and limitations under the License. 10 | 11 | from ansible.errors import AnsibleParserError 12 | from ansible.plugins.vars import BaseVarsPlugin 13 | import os 14 | from ops.main import AppContainer 15 | import logging 16 | 17 | logger = logging.getLogger(__name__) 18 | 19 | class VarsModule(BaseVarsPlugin): 20 | 21 | """ 22 | Loads variables for groups and/or hosts 23 | """ 24 | 25 | def __init__(self, *args): 26 | """ constructor """ 27 | 28 | super(VarsModule, self).__init__(*args) 29 | 30 | logger.debug("Running plugin: %s with cluster config %s" % (__file__, os.environ['OPS_CLUSTER_CONFIG'])) 31 | 32 | app = AppContainer([os.environ['OPS_CLUSTER_CONFIG'], 'noop']) 33 | self.config = app.cluster_config.all() 34 | 35 | def get_vars(self, loader, path, entities, cache=True): 36 | super(VarsModule, self).get_vars(loader, path, entities) 37 | return self.config 38 | -------------------------------------------------------------------------------- /src/ops/ansible/vars_plugins/opsconfig.py: -------------------------------------------------------------------------------- 1 | #Copyright 2019 Adobe. All rights reserved. 2 | #This file is licensed to you under the Apache License, Version 2.0 (the "License"); 3 | #you may not use this file except in compliance with the License. You may obtain a copy 4 | #of the License at http://www.apache.org/licenses/LICENSE-2.0 5 | 6 | #Unless required by applicable law or agreed to in writing, software distributed under 7 | #the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS 8 | #OF ANY KIND, either express or implied. See the License for the specific language 9 | #governing permissions and limitations under the License. 10 | 11 | from ansible.errors import AnsibleParserError 12 | from ansible.plugins.vars import BaseVarsPlugin 13 | import os 14 | from ops.main import AppContainer 15 | import logging 16 | 17 | logger = logging.getLogger(__name__) 18 | 19 | class VarsModule(BaseVarsPlugin): 20 | 21 | """ 22 | Loads local ops installation vars 23 | """ 24 | 25 | def __init__(self, *args): 26 | """ constructor """ 27 | 28 | super(VarsModule, self).__init__(*args) 29 | 30 | logger.debug("Running plugin: %s with cluster config %s" % (__file__, os.environ['OPS_CLUSTER_CONFIG'])) 31 | 32 | app = AppContainer([os.environ['OPS_CLUSTER_CONFIG'], 'noop']) 33 | self.config = app.ops_config.config.copy() 34 | self.config.update({ 35 | 'ops_package_dir': app.ops_config.package_dir, 36 | 'ops_ansible_tasks_dir': app.ops_config.package_dir + "/data/ansible/tasks" 37 | }) 38 | 39 | def get_vars(self, loader, path, entities, cache=True): 40 | super(VarsModule, self).get_vars(loader, path, entities) 41 | return self.config 42 | -------------------------------------------------------------------------------- /src/ops/cli/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019 Adobe. All rights reserved. 2 | # This file is licensed to you under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. You may obtain a copy 4 | # of the License at http://www.apache.org/licenses/LICENSE-2.0 5 | 6 | # Unless required by applicable law or agreed to in writing, software distributed under 7 | # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS 8 | # OF ANY KIND, either express or implied. See the License for the specific language 9 | # governing permissions and limitations under the License. 10 | 11 | import os 12 | from subprocess import Popen, PIPE 13 | import sys 14 | 15 | 16 | def get_output(command, trim=True): 17 | out = Popen(command, shell=True, stdout=PIPE).communicate()[0] 18 | if trim: 19 | out = out.strip() 20 | 21 | return out 22 | 23 | 24 | def display(msg, **kwargs): 25 | # use ansible pretty printer if available 26 | try: 27 | from ansible.playbook.play import display 28 | display.display(msg, **kwargs) 29 | except ImportError: 30 | print(msg) 31 | 32 | 33 | def err(msg): 34 | display(str(msg), stderr=True, color='red') 35 | 36 | 37 | def get_config_value(config, key): 38 | try: 39 | return config[key] 40 | except KeyError as e: 41 | err("You must set the %s value in %s.yaml or in the cli as an extra variable: -e %s=value" % 42 | (e.message, config['cluster'], e.message)) 43 | sys.exit(1) 44 | -------------------------------------------------------------------------------- /src/ops/cli/aws.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019 Adobe. All rights reserved. 2 | # This file is licensed to you under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. You may obtain a copy 4 | # of the License at http://www.apache.org/licenses/LICENSE-2.0 5 | 6 | # Unless required by applicable law or agreed to in writing, software distributed under 7 | # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS 8 | # OF ANY KIND, either express or implied. See the License for the specific language 9 | # governing permissions and limitations under the License. 10 | 11 | from . import get_output 12 | from shlex import quote 13 | 14 | 15 | def access_key(profile): 16 | return get_output( 17 | 'aws configure get aws_access_key_id --profile %s' % quote(profile)) 18 | 19 | 20 | def secret_key(profile): 21 | return get_output( 22 | 'aws configure get aws_secret_access_key --profile %s' % quote(profile)) 23 | -------------------------------------------------------------------------------- /src/ops/cli/config_generator.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019 Adobe. All rights reserved. 2 | # This file is licensed to you under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. You may obtain a copy 4 | # of the License at http://www.apache.org/licenses/LICENSE-2.0 5 | 6 | # Unless required by applicable law or agreed to in writing, software distributed under 7 | # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS 8 | # OF ANY KIND, either express or implied. See the License for the specific language 9 | # governing permissions and limitations under the License. 10 | 11 | import logging 12 | from himl.main import ConfigRunner 13 | from ops.cli.parser import SubParserConfig 14 | 15 | logger = logging.getLogger(__name__) 16 | 17 | class ConfigGeneratorParserConfig(SubParserConfig): 18 | def get_name(self): 19 | return 'config' 20 | 21 | def get_help(self): 22 | return 'Generate configurations based on a hierarchical structure, with templating support' 23 | 24 | def configure(self, parser): 25 | return ConfigRunner().get_parser(parser) 26 | 27 | def get_epilog(self): 28 | return ''' 29 | Examples: 30 | # Generate config 31 | ops data/account=ee-dev/env=dev/region=va6/project=ee/cluster=experiments/composition=helmfiles config --format json --print-data 32 | ''' 33 | 34 | 35 | class ConfigGeneratorRunner(object): 36 | def __init__(self, cluster_config_path): 37 | self.cluster_config_path = cluster_config_path 38 | 39 | def run(self, args, extra_args): 40 | logger.info("Found extra_args %s", extra_args) 41 | logging.basicConfig(level=logging.INFO) 42 | args.path = self.cluster_config_path 43 | if args.output_file is None: 44 | args.print_data = True 45 | 46 | ConfigRunner().do_run(args) 47 | -------------------------------------------------------------------------------- /src/ops/cli/inventory.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019 Adobe. All rights reserved. 2 | # This file is licensed to you under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. You may obtain a copy 4 | # of the License at http://www.apache.org/licenses/LICENSE-2.0 5 | 6 | # Unless required by applicable law or agreed to in writing, software distributed under 7 | # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS 8 | # OF ANY KIND, either express or implied. See the License for the specific language 9 | # governing permissions and limitations under the License. 10 | 11 | import yaml 12 | import logging 13 | 14 | from ansible.parsing.yaml.dumper import AnsibleDumper 15 | from ansible.utils.color import stringc 16 | from . import display 17 | from .parser import configure_common_arguments, SubParserConfig 18 | 19 | logger = logging.getLogger(__name__) 20 | 21 | class InventoryParserConfig(SubParserConfig): 22 | def get_name(self): 23 | return 'inventory' 24 | 25 | def get_help(self): 26 | return 'Show current inventory data' 27 | 28 | def configure(self, parser): 29 | configure_common_arguments(parser) 30 | parser.add_argument( 31 | '--refresh-cache', 32 | action='store_true', 33 | help="Refresh the cache for the inventory") 34 | parser.add_argument('--limit', type=str, 35 | help='Limit run to a specific server subgroup. Eg: --limit newton-dcs') 36 | parser.add_argument('--facts', default=False, action='store_true', 37 | help='Show inventory facts for the given hosts') 38 | 39 | return parser 40 | 41 | 42 | class InventoryRunner(object): 43 | def __init__(self, ansible_inventory, cluster_name): 44 | """ 45 | :type ansible_inventory: ops.inventory.generator.AnsibleInventory 46 | """ 47 | self.ansible_inventory = ansible_inventory 48 | self.cluster_name = cluster_name 49 | 50 | def run(self, args, extra_args): 51 | logger.info("Found extra_args %s", extra_args) 52 | for host in self.get_inventory_hosts(args): 53 | group_names = [group.name for group in host.get_groups()] 54 | group_names = sorted(group_names) 55 | group_string = ", ".join(group_names) 56 | host_id = host.vars.get('ec2_InstanceId', '') 57 | if host_id != '': 58 | name_and_id = "%s -- %s" % (stringc(host.name, 59 | 'blue'), stringc(host_id, 'blue')) 60 | else: 61 | name_and_id = "%s" % stringc(host.name, 'blue') 62 | display("%s (%s)" % (name_and_id, stringc(group_string, 'green'))) 63 | if args.facts: 64 | display(self.get_host_facts(host)) 65 | 66 | def get_inventory_hosts(self, args): 67 | limit = args.limit or 'all' 68 | 69 | return self.ansible_inventory.get_hosts(limit) 70 | 71 | def get_host_facts(self, host, indent="\t"): 72 | vars = host.get_vars() 73 | ret = yaml.dump( 74 | vars, 75 | indent=4, 76 | allow_unicode=True, 77 | default_flow_style=False, 78 | Dumper=AnsibleDumper) 79 | ret = "\n".join([indent + line for line in ret.split("\n")]) 80 | 81 | return ret 82 | -------------------------------------------------------------------------------- /src/ops/cli/packer.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019 Adobe. All rights reserved. 2 | # This file is licensed to you under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. You may obtain a copy 4 | # of the License at http://www.apache.org/licenses/LICENSE-2.0 5 | 6 | # Unless required by applicable law or agreed to in writing, software distributed under 7 | # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS 8 | # OF ANY KIND, either express or implied. See the License for the specific language 9 | # governing permissions and limitations under the License. 10 | 11 | import logging 12 | from ops.cli.parser import SubParserConfig 13 | from . import aws 14 | 15 | logger = logging.getLogger(__name__) 16 | 17 | class PackerParserConfig(SubParserConfig): 18 | def get_name(self): 19 | return 'packer' 20 | 21 | def get_help(self): 22 | return 'Wrap common packer tasks and inject variables from a cluster file' 23 | 24 | def configure(self, parser): 25 | parser.add_argument('subcommand', help='build | validate', type=str) 26 | return parser 27 | 28 | def get_epilog(self): 29 | return ''' 30 | Examples: 31 | # Validate a packer file 32 | ops clusters/centos7.yaml packer validate 33 | 34 | # Build a packer file 35 | ops clusters/centos7.yaml packer build 36 | ''' 37 | 38 | 39 | class PackerRunner(object): 40 | def __init__(self, root_dir, cluster_config): 41 | self.cluster_config = cluster_config 42 | self.root_dir = root_dir 43 | 44 | def run(self, args, extra_args): 45 | logger.info("Found extra_args %s", extra_args) 46 | config_all = self.cluster_config.all() 47 | 48 | packer_variables = config_all['packer']['variables'] 49 | 50 | if config_all['packer']['clouds'] is not None: 51 | if 'aws' in config_all['packer']['clouds']: 52 | profile_name = config_all['packer']['clouds']['aws']['boto_profile'] 53 | packer_variables['aws_access_key'] = aws.access_key( 54 | profile_name) 55 | packer_variables['aws_secret_key'] = aws.secret_key( 56 | profile_name) 57 | else: 58 | # add other cloud logic here 59 | pass 60 | 61 | variables = '' 62 | for key, value in packer_variables.items(): 63 | variables += " -var '%s=%s' " % (key, value) 64 | 65 | if args.subcommand == 'build': 66 | command = 'packer build %s %s' % ( 67 | variables, config_all['packer']['template']) 68 | 69 | if args.subcommand == 'validate': 70 | command = 'packer validate %s %s' % ( 71 | variables, config_all['packer']['template']) 72 | 73 | return dict( 74 | command=command 75 | ) 76 | -------------------------------------------------------------------------------- /src/ops/cli/parser.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019 Adobe. All rights reserved. 2 | # This file is licensed to you under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. You may obtain a copy 4 | # of the License at http://www.apache.org/licenses/LICENSE-2.0 5 | 6 | # Unless required by applicable law or agreed to in writing, software distributed under 7 | # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS 8 | # OF ANY KIND, either express or implied. See the License for the specific language 9 | # governing permissions and limitations under the License. 10 | 11 | import argparse 12 | 13 | import sys 14 | 15 | from six import PY3 16 | 17 | 18 | class RootParser(object): 19 | def __init__(self, sub_parsers=None): 20 | """ 21 | :type sub_parsers: list[SubParserConfig] 22 | """ 23 | 24 | if sub_parsers is None: 25 | sub_parsers = [] 26 | self.sub_parsers = sub_parsers 27 | 28 | def _get_parser(self): 29 | parser = argparse.ArgumentParser( 30 | description='Run commands against a cluster definition', prog='ops') 31 | parser.add_argument( 32 | 'cluster_config_path', 33 | type=str, 34 | help='The cluster config path cluster.yaml') 35 | parser.add_argument('--root-dir', type=str, help='The root of the resource tree - ' 36 | 'it can be an absolute path or relative to the current dir') 37 | parser.add_argument('--verbose', '-v', action='count', 38 | help='Get more verbose output from commands') 39 | configure_common_arguments(parser) 40 | 41 | subparsers = parser.add_subparsers(dest='command') 42 | 43 | for subparser_conf in self.sub_parsers: 44 | subparser_instance = subparsers.add_parser(subparser_conf.get_name(), 45 | help=subparser_conf.get_help(), 46 | epilog=subparser_conf.get_epilog(), 47 | formatter_class=subparser_conf.get_formatter()) 48 | subparser_conf.configure(subparser_instance) 49 | 50 | subparsers.add_parser( 51 | 'noop', help='used to initialize the full container for api usage') 52 | 53 | return parser 54 | 55 | @staticmethod 56 | def _check_args_for_unicode(args): 57 | if args is None: 58 | args = sys.argv 59 | 60 | try: 61 | for value in args: 62 | if not PY3 and isinstance(value, unicode): 63 | # Python3 or some Python3 compatibility mode can make 64 | # arguments to be unicode, not str 65 | value.encode('utf-8').encode('utf-8') 66 | # Python 2 str, check if it can be represented in utf8 67 | elif isinstance(value, str): 68 | value.encode('utf-8') 69 | except UnicodeDecodeError as e: 70 | print('Invalid character in argument "{0}", most likely an "en dash", replace it with normal dash -'.format( 71 | e.args[1])) 72 | raise 73 | 74 | def parse_args(self, args=None): 75 | RootParser._check_args_for_unicode(args) 76 | return self._get_parser().parse_args(args) 77 | 78 | def parse_known_args(self, args=None): 79 | RootParser._check_args_for_unicode(args) 80 | return self._get_parser().parse_known_args(args) 81 | 82 | 83 | class SubParserConfig(object): 84 | def get_name(self): 85 | pass 86 | 87 | def configure(self, parser): 88 | pass 89 | 90 | def get_formatter(self): 91 | return argparse.RawDescriptionHelpFormatter 92 | 93 | def get_help(self): 94 | return "" 95 | 96 | def get_epilog(self): 97 | return "" 98 | 99 | 100 | def configure_common_arguments(parser): 101 | parser.add_argument('-e', '--extra-vars', type=str, action='append', default=[], 102 | help='Extra variables to use. Eg: -e ssh_user=ssh_user') 103 | 104 | return parser 105 | 106 | 107 | def configure_common_ansible_args(parser): 108 | parser.add_argument('--ask-sudo-pass', action='store_true', 109 | help='Ask sudo pass for commands that need sudo') 110 | parser.add_argument('--limit', type=str, 111 | help='Limit run to a specific server subgroup. Eg: --limit newton-dcs') 112 | parser.add_argument('--noscb', action='store_false', dest='use_scb', 113 | help='Disable use of Shell Control Box (SCB) even if ' 114 | 'it is enabled in the cluster config') 115 | 116 | return parser 117 | -------------------------------------------------------------------------------- /src/ops/cli/playbook.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019 Adobe. All rights reserved. 2 | # This file is licensed to you under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. You may obtain a copy 4 | # of the License at http://www.apache.org/licenses/LICENSE-2.0 5 | 6 | # Unless required by applicable law or agreed to in writing, software distributed under 7 | # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS 8 | # OF ANY KIND, either express or implied. See the License for the specific language 9 | # governing permissions and limitations under the License. 10 | 11 | from .parser import SubParserConfig 12 | from .parser import configure_common_ansible_args, configure_common_arguments 13 | from ops.inventory.sshconfig import SshConfigGenerator 14 | import getpass 15 | import logging 16 | 17 | logger = logging.getLogger(__name__) 18 | 19 | 20 | class PlaybookParserConfig(SubParserConfig): 21 | def get_name(self): 22 | return 'play' 23 | 24 | def get_epilog(self): 25 | return ''' 26 | Examples: 27 | # Run an ansible playbook 28 | ops clusters/qe1.yaml play ansible/plays/cluster/configure.yaml 29 | 30 | # Limit the run of a playbook to a subgroup 31 | ops clusters/qe1.yaml play ansible/plays/cluster/configure.yaml -- --limit dcs 32 | 33 | # Overwrite or set a variable 34 | ops clusters/qe1.yaml play ansible/plays/cluster/configure.yaml -- -e city=paris 35 | 36 | # Filter with tags 37 | ops clusters/qe1.yaml play ansible/plays/cluster/configure.yaml -- -t common 38 | 39 | # Run a playbook and overwrite the default user 40 | ops clusters/qe1.yaml play ansible/plays/cluster/configure.yaml -- -u ec2-user 41 | ''' 42 | 43 | def configure(self, parser): 44 | configure_common_arguments(parser) 45 | configure_common_ansible_args(parser) 46 | parser.add_argument( 47 | 'playbook_path', 48 | type=str, 49 | help='The playbook path') 50 | parser.add_argument( 51 | 'ansible_args', 52 | type=str, 53 | nargs='*', 54 | help='Extra ansible args') 55 | 56 | def get_help(self): 57 | return 'Run an Ansible playbook' 58 | 59 | 60 | class PlaybookRunner(object): 61 | def __init__(self, ops_config, root_dir, inventory_generator, 62 | cluster_config_path, cluster_config): 63 | """ 64 | :type inventory_generator: ops.inventory.generator.InventoryGenerator 65 | """ 66 | 67 | self.inventory_generator = inventory_generator 68 | self.root_dir = root_dir 69 | self.ops_config = ops_config 70 | self.cluster_config_path = cluster_config_path 71 | self.cluster_config = cluster_config 72 | 73 | def run(self, args, extra_args): 74 | logger.info("Found extra_args %s", extra_args) 75 | inventory_path, ssh_config_paths = self.inventory_generator.generate() 76 | ssh_config_path = SshConfigGenerator.get_ssh_config_path(self.cluster_config, 77 | ssh_config_paths, 78 | args.use_scb) 79 | ssh_config = f"ANSIBLE_SSH_ARGS='-F {ssh_config_path}'" 80 | 81 | ansible_config = "ANSIBLE_CONFIG=%s" % self.ops_config.ansible_config_path 82 | 83 | # default user: read from cluster then from ops config then local user 84 | default_user = self.cluster_config.get('ssh_user') \ 85 | or self.ops_config.get('ssh.user') \ 86 | or getpass.getuser() 87 | 88 | if not has_arg(args.ansible_args, 'u', 'user') and default_user: 89 | args.ansible_args.extend(['-u', default_user]) 90 | 91 | if not has_arg(args.ansible_args, 'i', 'inventory-file'): 92 | args.ansible_args.extend(['-i', inventory_path]) 93 | 94 | extra_vars = dict(cluster=self.cluster_config['cluster']) 95 | if "environment" in self.cluster_config.get( 96 | "terraform", {}).get("vars", {}): 97 | extra_vars["environment"] = self.cluster_config["terraform"]["vars"]["environment"] 98 | extra_vars_args = ' '.join([' -e %s=%s ' % (k, v) 99 | for k, v in extra_vars.items()]) 100 | 101 | play_args = ' '.join(args.ansible_args) 102 | play_args = extra_vars_args + play_args 103 | 104 | command = "cd {root_dir}; " \ 105 | "OPS_CLUSTER_CONFIG={cluster_config} " \ 106 | "ANSIBLE_FILTER_PLUGINS={filter_plugins} " \ 107 | "ANSIBLE_VARS_PLUGINS={vars_plugins} " \ 108 | "ANSIBLE_CALLBACK_PLUGINS={callback_plugins} " \ 109 | "{ansible_config} {ssh_config} ansible-playbook {play} {args}".format( 110 | root_dir=self.root_dir, 111 | cluster_config=self.cluster_config_path, 112 | ansible_config=ansible_config, 113 | ssh_config=ssh_config, 114 | play=args.playbook_path, 115 | args=play_args, 116 | filter_plugins=self.ops_config.ansible_filter_plugins, 117 | vars_plugins=self.ops_config.ansible_vars_plugins, 118 | callback_plugins=self.ops_config.ansible_callback_plugins 119 | ) 120 | 121 | return dict(command=command) 122 | 123 | 124 | def has_arg(container, *args): 125 | for arg in args: 126 | if len(arg) == 1: 127 | arg = '-' + arg 128 | else: 129 | arg = '--' + arg 130 | 131 | if arg in container: 132 | return True 133 | 134 | return False 135 | -------------------------------------------------------------------------------- /src/ops/cli/run.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019 Adobe. All rights reserved. 2 | # This file is licensed to you under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. You may obtain a copy 4 | # of the License at http://www.apache.org/licenses/LICENSE-2.0 5 | 6 | # Unless required by applicable law or agreed to in writing, software distributed under 7 | # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS 8 | # OF ANY KIND, either express or implied. See the License for the specific language 9 | # governing permissions and limitations under the License. 10 | 11 | import logging 12 | from .parser import configure_common_ansible_args, SubParserConfig 13 | from ops.inventory.sshconfig import SshConfigGenerator 14 | 15 | logger = logging.getLogger(__name__) 16 | 17 | class CommandParserConfig(SubParserConfig): 18 | def get_epilog(self): 19 | return ''' 20 | Examples: 21 | # Last 5 installed packages on each host 22 | ops qe1.yaml run all 'sudo grep Installed /var/log/yum.log | tail -5' 23 | 24 | # See nodetool status on each cassandra node 25 | ops qe1.yaml run qe1-cassandra 'nodetool status' 26 | 27 | # Complex limits 28 | ops qe1.yaml run 'qe1-cassandra,!qe1-cassandra-0' 'nodetool status' 29 | 30 | # Show how to pass other args 31 | ''' 32 | 33 | def configure(self, parser): 34 | configure_common_ansible_args(parser) 35 | parser.add_argument( 36 | 'host_pattern', 37 | type=str, 38 | help='Limit the run to the following hosts') 39 | parser.add_argument( 40 | 'shell_command', 41 | type=str, 42 | help='Shell command you want to run') 43 | parser.add_argument( 44 | 'extra_args', 45 | type=str, 46 | nargs='*', 47 | help='Extra ansible arguments') 48 | 49 | def get_help(self): 50 | return 'Runs a command against hosts in the cluster' 51 | 52 | def get_name(self): 53 | return 'run' 54 | 55 | 56 | class CommandRunner(object): 57 | 58 | def __init__(self, ops_config, root_dir, inventory_generator, 59 | cluster_config_path, cluster_config): 60 | 61 | self.inventory_generator = inventory_generator 62 | self.root_dir = root_dir 63 | self.ops_config = ops_config 64 | self.cluster_config_path = cluster_config_path 65 | self.cluster_config = cluster_config 66 | 67 | def run(self, args, extra_args): 68 | logger.info("Found extra_args %s", extra_args) 69 | inventory_path, ssh_config_paths = self.inventory_generator.generate() 70 | limit = args.host_pattern 71 | ssh_config_path = SshConfigGenerator.get_ssh_config_path(self.cluster_config, 72 | ssh_config_paths, 73 | args.use_scb) 74 | extra_args = ' '.join(args.extra_args) 75 | command = """cd {root_dir} 76 | ANSIBLE_SSH_ARGS='-F {ssh_config}' ANSIBLE_CONFIG={ansible_config_path} ansible -i {inventory_path} '{limit}' \\ 77 | -m shell -a '{command}' {extra_args}""".format( 78 | ssh_config=ssh_config_path, 79 | ansible_config_path=self.ops_config.ansible_config_path, 80 | inventory_path=inventory_path, 81 | command=args.shell_command, 82 | limit=limit, 83 | root_dir=self.root_dir, 84 | extra_args=extra_args 85 | ) 86 | 87 | if args.verbose: 88 | command += ' -vvv ' 89 | 90 | return dict(command=command) 91 | -------------------------------------------------------------------------------- /src/ops/cli/sync.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019 Adobe. All rights reserved. 2 | # This file is licensed to you under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. You may obtain a copy 4 | # of the License at http://www.apache.org/licenses/LICENSE-2.0 5 | 6 | # Unless required by applicable law or agreed to in writing, software distributed under 7 | # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS 8 | # OF ANY KIND, either express or implied. See the License for the specific language 9 | # governing permissions and limitations under the License. 10 | 11 | import logging 12 | import getpass 13 | import subprocess 14 | 15 | from .parser import SubParserConfig 16 | from . import * 17 | from ops.inventory.sshconfig import SshConfigGenerator 18 | 19 | logger = logging.getLogger(__name__) 20 | 21 | 22 | class SyncParserConfig(SubParserConfig): 23 | def configure(self, parser): 24 | parser.add_argument( 25 | '-l', 26 | '--user', 27 | type=str, 28 | help='Value for remote user that will be used for ssh') 29 | parser.add_argument('src', type=str, help='Source dir') 30 | parser.add_argument('dest', type=str, help='Dest dir') 31 | parser.add_argument('--noscb', action='store_false', dest='use_scb', 32 | help='Disable use of Shell Control Box (SCB) ' 33 | 'even if it is enabled in the cluster config') 34 | parser.add_argument( 35 | 'opts', 36 | default=['-va --progress'], 37 | nargs='*', 38 | help='Rsync opts') 39 | 40 | def get_help(self): 41 | return 'Sync files from/to a cluster' 42 | 43 | def get_name(self): 44 | return 'sync' 45 | 46 | def get_epilog(self): 47 | return """ 48 | rsync wrapper for ops inventory conventions 49 | 50 | Example: 51 | 52 | # rsync from remote dcs role 53 | ops cluster.yml sync 'dcs[0]:/usr/local/demdex/conf' /tmp/configurator-data --user remote_user 54 | 55 | # extra rsync options 56 | ops cluster.yml sync 'dcs[0]:/usr/local/demdex/conf' /tmp/configurator-data -l remote_user -- --progress 57 | """ 58 | 59 | 60 | class SyncRunner(object): 61 | 62 | def __init__(self, cluster_config, root_dir, 63 | ansible_inventory, inventory_generator, ops_config): 64 | """ 65 | :type ansible_inventory: ops.inventory.generator.AnsibleInventory 66 | """ 67 | 68 | self.inventory_generator = inventory_generator 69 | self.ansible_inventory = ansible_inventory 70 | self.root_dir = root_dir 71 | self.cluster_config = cluster_config 72 | self.ops_config = ops_config 73 | 74 | def run(self, args, extra_args): 75 | logger.info("Found extra_args %s", extra_args) 76 | inventory_path, ssh_config_paths = self.inventory_generator.generate() 77 | src = PathExpr(args.src) 78 | dest = PathExpr(args.dest) 79 | 80 | ssh_config_path = SshConfigGenerator.get_ssh_config_path(self.cluster_config, 81 | ssh_config_paths, 82 | args.use_scb) 83 | if src.is_remote and dest.is_remote: 84 | display( 85 | 'Too remote expressions are not allowed', 86 | stderr=True, 87 | color='red') 88 | return 89 | 90 | if src.is_remote: 91 | remote = src 92 | else: 93 | remote = dest 94 | 95 | display( 96 | "Looking for hosts for pattern '%s'" % 97 | remote.pattern, stderr=True) 98 | 99 | remote_hosts = [] 100 | hosts = self.ansible_inventory.get_hosts(remote.pattern) 101 | if not hosts: 102 | bastion = self.ansible_inventory.get_hosts( 103 | 'bastion')[0].vars.get('ansible_ssh_host') 104 | remote_hosts.append('{}--{}'.format(bastion, remote.pattern)) 105 | else: 106 | for host in hosts: 107 | ssh_host = host.get_vars().get('ansible_ssh_host') or host 108 | remote_hosts.append(ssh_host) 109 | 110 | for ssh_host in remote_hosts: 111 | ssh_user = self.cluster_config.get('ssh_user') or self.ops_config.get( 112 | 'ssh.user') or getpass.getuser() 113 | if remote.remote_user: 114 | ssh_user = remote.remote_user 115 | elif args.user: 116 | ssh_user = args.user 117 | 118 | from_path = src.with_user_and_path(ssh_user, ssh_host) 119 | to_path = dest.with_user_and_path(ssh_user, ssh_host) 120 | 121 | command = 'rsync {opts} {from_path} {to_path} -e "ssh -F {ssh_config}"'.format( 122 | opts=" ".join(args.opts), 123 | from_path=from_path, 124 | to_path=to_path, 125 | ssh_config=ssh_config_path 126 | 127 | ) 128 | 129 | return dict(command=command) 130 | 131 | 132 | class PathExpr(object): 133 | 134 | def __init__(self, path): 135 | self._path = path 136 | 137 | @property 138 | def is_remote(self): 139 | return ":" in self._path 140 | 141 | @property 142 | def path(self): 143 | return self._path if not self.is_remote else self._path.split(":")[-1] 144 | 145 | @property 146 | def pattern(self): 147 | if ':' not in self._path: 148 | return None 149 | 150 | return self._path if not self.is_remote else self._path.split(":")[ 151 | 0].split('@')[-1] 152 | 153 | @property 154 | def remote_user(self): 155 | if '@' not in self._path: 156 | return None 157 | 158 | return self._path.split('@')[0] 159 | 160 | def __str__(self): 161 | return self._path 162 | 163 | def with_user_and_path(self, ssh_user, ssh_host): 164 | if self.is_remote: 165 | user_expr = '' 166 | if ssh_user: 167 | user_expr = ssh_user + '@' 168 | 169 | return PathExpr("{user_expr}{host}:{path}".format( 170 | user_expr=user_expr, host=ssh_host, path=self.path)) 171 | else: 172 | return self 173 | -------------------------------------------------------------------------------- /src/ops/data/ansible/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | host_key_checking = False 3 | timeout = 90 4 | roles_path = /etc/ansible/roles:ansible/roles:./roles 5 | stdout_callback = debug 6 | 7 | [ssh_connection] 8 | scp_if_ssh = False 9 | pipelining = True 10 | -------------------------------------------------------------------------------- /src/ops/data/ansible/tasks/deploy_prometheus_alert_rules.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Deploy alert rules on Prometheus 3 | # vars: 4 | # - rules_path: the rpm package to be updated 5 | 6 | # Usage example: 7 | # 8 | #- name: Deploy service alerts 9 | # become: True 10 | # become_user: ddxadmin 11 | # include: {{ ops_tasks_dir }}/deploy_prometheus_alert_rules.yml 12 | # vars: 13 | # rules_path: ../roles/prometheus/rules/ 14 | 15 | - name: Verify rules_path parameter is defined 16 | assert: 17 | that: rules_path is defined 18 | 19 | - name: "Copying alert rules to Prometheus servers" 20 | copy: src="{{ rules_path }}" dest=/etc/prometheus/rules/ owner=prometheus group=prometheus mode=0644 21 | 22 | - name: "Validating configs" 23 | shell: /usr/local/bin/promtool check config /etc/prometheus/prometheus.yaml 24 | register: result 25 | - debug: var=result.stdout_lines 26 | 27 | - name: "Reloading Prometheus" 28 | uri: url=http://127.0.0.1:8080/-/reload method=POST return_content=yes status_code=200 timeout=300 29 | register: response 30 | retries: 5 31 | delay: 5 32 | -------------------------------------------------------------------------------- /src/ops/data/ansible/tasks/install_rpm.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | ## Update rpm to specific version 4 | # vars: 5 | # - rpm_package: the rpm package to be updated 6 | # - rpm_version: the rpm package version 7 | # - notify: the called handler 8 | 9 | # Usage example: 10 | # 11 | #- name: Update aam-keystone 12 | # become: True 13 | # become_user: ddxadmin 14 | # include: {{ ops_tasks_dir }}/install_rpm.yml 15 | # vars: 16 | # rpm_package: aam-keystone 17 | # rpm_version: "{{ package_version }}" 18 | # notify: "my jar updated" 19 | 20 | - name: Verify rpm package name is defined 21 | assert: 22 | that: rpm_package is defined 23 | 24 | - name: Verify version is defined 25 | assert: 26 | that: rpm_version is defined 27 | 28 | - name: Update rpm {{ rpm_package }} to {{ rpm_version }} 29 | shell: | 30 | new_version="{{ rpm_version }}" 31 | rpm_package="{{ rpm_package }}" 32 | current_version=$(rpm -qa "${rpm_package}" | sed -r "s/${rpm_package}-(.+).(noarch|x86_64)/\1/g") 33 | 34 | if [[ -z $current_version ]]; then 35 | # First install of the rpm 36 | sudo yum -y install ${rpm_package}-${new_version} 37 | else 38 | # If patch version is not specified - check X.Y.Z version only 39 | if ! [[ ${new_version} =~ .*-.* ]]; then 40 | current_version=$(echo $current_version | sed -r "s/(.+)-(.+)/\1/g") 41 | fi 42 | # Updating rpm? 43 | if [[ "${new_version}" == "${current_version}" ]]; then 44 | echo "Package ${rpm_package} version ${new_version} is already installed." 45 | exit 0 46 | fi 47 | if [[ `printf "${new_version}\n${current_version}" | sort -V | head -1` == "${new_version}" ]]; then 48 | sudo yum -y downgrade ${rpm_package}-${new_version} 49 | else 50 | sudo yum -y update ${rpm_package}-${new_version} 51 | fi 52 | fi 53 | #check installed version 54 | current_version=$(rpm -qa "${rpm_package}" | sed -r "s/${rpm_package}-(.+).(noarch|x86_64)/\1/g") 55 | if ! [[ ${new_version} =~ .*-.* ]]; then 56 | current_version=$(echo $current_version | sed -r "s/(.+)-(.+)/\1/g") 57 | fi 58 | if [ "${new_version}" == "${current_version}" ]; then 59 | echo "Success" 60 | else 61 | echo "Failed." 62 | fi 63 | register: result 64 | failed_when: "'Fail' in result.stdout" 65 | changed_when: "'Success' in result.stdout" 66 | notify: 67 | - "{{ notify | default(None) }}" 68 | 69 | -------------------------------------------------------------------------------- /src/ops/data/ansible/tasks/remove_prometheus_alert_rules.yml: -------------------------------------------------------------------------------- 1 | ## Remove specific rules from Prometheus 2 | # vars: 3 | # - delete_rule_files: a list of glob patterns to match the rule files to be deleted 4 | # - skip_reload: to skip the "Reloading Prometheus" step, set this variable to "yes" 5 | 6 | # Usage example: 7 | # 8 | #- name: Remove stale prometheus rules 9 | # become: True 10 | # include: {{ ops_tasks_dir }}/remove_prometheus_alert_rules.yml 11 | # vars: 12 | # delete_rule_files: '["some_rule*", "some_other_rule*", "specific_rule.rule"]' 13 | # skip_reload: "yes" 14 | 15 | - name: "Verify delete_rule_files parameter is defined" 16 | assert: 17 | that: delete_rule_files is defined 18 | 19 | - name: "List rules files" 20 | find: 21 | paths: "/etc/prometheus/rules/" 22 | patterns: "{{ delete_rule_files }}" 23 | register: rule_files 24 | 25 | - debug: 26 | msg: "delete_rule_files: {{ delete_rule_files }} rule_files: {{ rule_files }}" 27 | verbosity: 1 28 | 29 | - name: "Delete rule files matching the patterns" 30 | file: 31 | path: "{{ item.path }}" 32 | state: absent 33 | with_items: "{{ rule_files.files }}" 34 | 35 | - name: "Validating configs" 36 | shell: /usr/local/bin/promtool check config /etc/prometheus/prometheus.yaml 37 | register: result 38 | - debug: var=result.stdout_lines 39 | 40 | - name: "Reloading Prometheus" 41 | uri: 42 | url: http://127.0.0.1:8080/-/reload 43 | method: POST 44 | return_content: yes 45 | status_code: 200 46 | timeout: 300 47 | register: response 48 | retries: 5 49 | delay: 5 50 | vars: 51 | skip_reload: "no" 52 | when: 53 | skip_reload != "yes" 54 | -------------------------------------------------------------------------------- /src/ops/data/ssh/ssh.config: -------------------------------------------------------------------------------- 1 | Host * 2 | #ControlPath ~/.ssh/mux_%h_%p_%r 3 | #ControlMaster auto 4 | #ControlPersist 15m 5 | ForwardAgent yes 6 | SendEnv LANG LC_* 7 | ServerAliveCountMax 2 8 | ServerAliveInterval 30 9 | StrictHostKeyChecking no 10 | TCPKeepAlive yes 11 | 12 | Host *--*--* 13 | ForwardAgent yes 14 | LogLevel QUIET 15 | ProxyCommand ssh -o StrictHostKeyChecking=no -o ForwardAgent=yes -A %r@$(echo %h | sed -e 's/--.*//g')@$(echo %h | sed -e 's/.*--//g') nc $(echo %h | sed -e 's/.*--\(.*\)--.*/\1/') %p 16 | SendEnv LANG LC_* 17 | ServerAliveCountMax 2 18 | ServerAliveInterval 30 19 | StrictHostKeyChecking no 20 | TCPKeepAlive yes 21 | 22 | Host *--* 23 | ForwardAgent yes 24 | LogLevel QUIET 25 | ProxyCommand $(if test -x ${HOME}/bin/sshpass &>/dev/null; then echo "${HOME}/bin/sshpass"; fi) ssh -o StrictHostKeyChecking=no -o ForwardAgent=yes -A %r@$(echo %h | sed -e 's/--.*//g') nc $(echo %h | sed -e 's/.*--//g') %p 26 | SendEnv LANG LC_* 27 | ServerAliveCountMax 2 28 | ServerAliveInterval 30 29 | StrictHostKeyChecking no 30 | TCPKeepAlive yes 31 | -------------------------------------------------------------------------------- /src/ops/data/ssh/ssh.scb.proxy.config.tpl: -------------------------------------------------------------------------------- 1 | Host * 2 | ForwardAgent yes 3 | SendEnv LANG LC_* 4 | ServerAliveCountMax 2 5 | ServerAliveInterval 30 6 | StrictHostKeyChecking no 7 | TCPKeepAlive yes 8 | 9 | Host *--* 10 | ForwardAgent yes 11 | LogLevel QUIET 12 | ProxyCommand /usr/bin/nc -X 5 -x 127.0.0.1:{scb_proxy_port} $(echo %h | sed -e 's/.*--//g') %p 13 | SendEnv LANG LC_* 14 | ServerAliveCountMax 2 15 | ServerAliveInterval 30 16 | StrictHostKeyChecking no 17 | TCPKeepAlive yes 18 | -------------------------------------------------------------------------------- /src/ops/data/ssh/ssh.tunnel.config: -------------------------------------------------------------------------------- 1 | Host * 2 | TCPKeepAlive yes 3 | StrictHostKeyChecking no 4 | ServerAliveInterval 30 5 | ServerAliveCountMax 2 6 | SendEnv LANG LC_* 7 | 8 | Host *--* 9 | ForwardAgent yes 10 | ProxyCommand $(if test -x ${HOME}/bin/sshpass &>/dev/null; then echo "${HOME}/bin/sshpass"; fi) ssh -A %r@$(echo %h | sed -e 's/--.*//g') nc $(echo %h | sed -e 's/.*--//g') %p 11 | -------------------------------------------------------------------------------- /src/ops/data/terraform/terraformrc: -------------------------------------------------------------------------------- 1 | plugin_cache_dir = "$HOME/.terraform.d/plugin-cache" 2 | -------------------------------------------------------------------------------- /src/ops/git_utils.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019 Adobe. All rights reserved. 2 | # This file is licensed to you under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. You may obtain a copy 4 | # of the License at http://www.apache.org/licenses/LICENSE-2.0 5 | 6 | # Unless required by applicable law or agreed to in writing, software distributed under 7 | # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS 8 | # OF ANY KIND, either express or implied. See the License for the specific language 9 | # governing permissions and limitations under the License. 10 | 11 | import git 12 | import logging 13 | import os 14 | import yaml 15 | 16 | logger = logging.getLogger(__name__) 17 | 18 | 19 | def setup_repo(repo_path, upstream_repo): 20 | """ 21 | Ensure that the repo is present or clone it from upstream otherwise. 22 | """ 23 | repo_path = os.path.expanduser(repo_path) 24 | 25 | try: 26 | git.Repo(repo_path, search_parent_directories=True) 27 | except git.NoSuchPathError: 28 | logger.warning( 29 | "Repo '%s' not found. Cloning from upstream '%s'", repo_path, upstream_repo 30 | ) 31 | git.Repo.clone_from(upstream_repo, repo_path) 32 | 33 | 34 | def checkout_repo(repo_path, config_path, get_version): 35 | with open(os.path.expanduser(config_path)) as f: 36 | conf = yaml.load(f, Loader=yaml.SafeLoader) 37 | 38 | version = get_version(conf) 39 | repo = git.Repo(repo_path, search_parent_directories=True) 40 | 41 | repo.git.fetch() 42 | repo.git.checkout(version) 43 | 44 | logger.info( 45 | "Checked out repo '%s' to version '%s'", 46 | repo.git.rev_parse("--show-toplevel"), 47 | version, 48 | ) 49 | -------------------------------------------------------------------------------- /src/ops/hierarchical/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adobe/ops-cli/8246f18c9c9d10ba6b7cdbace6a756281cca892b/src/ops/hierarchical/__init__.py -------------------------------------------------------------------------------- /src/ops/inventory/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019 Adobe. All rights reserved. 2 | # This file is licensed to you under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. You may obtain a copy 4 | # of the License at http://www.apache.org/licenses/LICENSE-2.0 5 | 6 | # Unless required by applicable law or agreed to in writing, software distributed under 7 | # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS 8 | # OF ANY KIND, either express or implied. See the License for the specific language 9 | # governing permissions and limitations under the License. 10 | 11 | from .ec2inventory import Ec2Inventory 12 | -------------------------------------------------------------------------------- /src/ops/inventory/caching.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019 Adobe. All rights reserved. 2 | # This file is licensed to you under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. You may obtain a copy 4 | # of the License at http://www.apache.org/licenses/LICENSE-2.0 5 | 6 | # Unless required by applicable law or agreed to in writing, software distributed under 7 | # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS 8 | # OF ANY KIND, either express or implied. See the License for the specific language 9 | # governing permissions and limitations under the License. 10 | 11 | import hashlib 12 | import json 13 | import os 14 | import time 15 | 16 | from six import PY3 17 | 18 | 19 | def cache_callback_result(directory, func, max_age, cache_key_args): 20 | directory = os.path.expanduser(directory) 21 | path = get_cache_path(directory, cache_key_args) 22 | if is_valid(path, max_age): 23 | return read(path) 24 | 25 | return write(path, func()) 26 | 27 | 28 | def get_cache_path(dir, args): 29 | m = hashlib.md5() 30 | json_dump = json.dumps(args) 31 | if PY3: 32 | json_dump = json_dump.encode('utf-8') 33 | m.update(json_dump) 34 | 35 | return os.path.join(dir, m.hexdigest()) 36 | 37 | 38 | def is_valid(filename, max_age): 39 | """ Determines if the cache files have expired, or if it is still valid """ 40 | 41 | filename = os.path.expanduser(filename) 42 | if os.path.isfile(filename): 43 | mod_time = os.path.getmtime(filename) 44 | current_time = time.time() 45 | if (mod_time + max_age) > current_time: 46 | return True 47 | 48 | return False 49 | 50 | 51 | def write(filename, data): 52 | """ Writes data in JSON format to a file """ 53 | 54 | json_data = json.dumps(data, sort_keys=True, indent=2) 55 | cache = open(os.path.expanduser(filename), 'w') 56 | cache.write(json_data) 57 | cache.close() 58 | 59 | return data 60 | 61 | 62 | def read(filename): 63 | """ Reads the inventory from the cache file and returns it as a JSON 64 | object """ 65 | 66 | cache = open(os.path.expanduser(filename), 'r') 67 | json_inventory = cache.read() 68 | return json.loads(json_inventory) 69 | -------------------------------------------------------------------------------- /src/ops/inventory/plugin/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019 Adobe. All rights reserved. 2 | # This file is licensed to you under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. You may obtain a copy 4 | # of the License at http://www.apache.org/licenses/LICENSE-2.0 5 | 6 | # Unless required by applicable law or agreed to in writing, software distributed under 7 | # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS 8 | # OF ANY KIND, either express or implied. See the License for the specific language 9 | # governing permissions and limitations under the License. 10 | 11 | from .ec2 import ec2 12 | from .cns import cns 13 | from .legacy_pcs import legacy_pcs 14 | from .azr import azr 15 | from .skms import skms 16 | -------------------------------------------------------------------------------- /src/ops/inventory/plugin/azr.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019 Adobe. All rights reserved. 2 | # This file is licensed to you under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. You may obtain a copy 4 | # of the License at http://www.apache.org/licenses/LICENSE-2.0 5 | 6 | # Unless required by applicable law or agreed to in writing, software distributed under 7 | # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS 8 | # OF ANY KIND, either express or implied. See the License for the specific language 9 | # governing permissions and limitations under the License. 10 | 11 | 12 | from ops.inventory.azurerm import * 13 | from ansible.playbook.play import display 14 | from six import iteritems 15 | 16 | 17 | class DictGlue(object): 18 | def __init__(self, data={}): 19 | self.__dict__.update(data) 20 | 21 | 22 | class EnvironmentMissingException(Exception): 23 | pass 24 | 25 | 26 | class OpsAzureInventory(AzureInventory): 27 | """ 28 | We inherit from the original implementation and override what we need here 29 | The original implementation is intended to be called by ansible and has different parameter semantics 30 | we override them here so that in the future we can update the azurerm independently (hopefully) 31 | """ 32 | 33 | def __init__(self, args={}): 34 | self._dict_args = { 35 | 'list': True, 36 | 'debug': False, 37 | 'host': None, 38 | 'pretty': False, 39 | 'profile': None, 40 | 'subscription_id': None, 41 | 'client_id': None, 42 | 'secret': None, 43 | 'tenant': None, 44 | 'ad_user': None, 45 | 'password': None, 46 | 'resource_groups': None, 47 | 'tags': None, 48 | 'locations': None, 49 | 'no_powerstate': False, 50 | 'bastion_tag': 'Adobe:Class' 51 | } 52 | if not HAS_AZURE: 53 | raise HAS_AZURE_EXC 54 | 55 | self._dict_args.update(args) 56 | if self._dict_args['subscription_id'] is not None: 57 | self._dict_args.update( 58 | {'subscription_id': str(self._dict_args['subscription_id'])}) 59 | self._args = DictGlue(self._dict_args) 60 | rm = AzureRM(self._args) 61 | 62 | self._compute_client = rm.compute_client 63 | self._network_client = rm.network_client 64 | self._resource_client = rm.rm_client 65 | self._security_groups = None 66 | self.resource_groups = [] 67 | self.tags = None 68 | self.locations = None 69 | self.replace_dash_in_groups = False 70 | self.group_by_resource_group = True 71 | self.group_by_location = True 72 | self.group_by_security_group = False 73 | self.group_by_tag = True 74 | self.include_powerstate = True 75 | 76 | self._inventory = dict( 77 | _meta=dict( 78 | hostvars=dict() 79 | ), 80 | azure=[] 81 | ) 82 | self._get_settings() 83 | 84 | if self._args.resource_groups: 85 | self.resource_groups = self._args.resource_groups.split(',') 86 | 87 | if self._args.tags: 88 | self.tags = self._args.tags.split(',') 89 | 90 | if self._args.locations: 91 | self.locations = self._args.locations.split(',') 92 | 93 | if self._args.no_powerstate: 94 | self.include_powerstate = False 95 | 96 | self.get_inventory() 97 | 98 | bastions = {} 99 | for host, hostvars in iteritems(self._inventory['_meta']['hostvars']): 100 | if ('role' in hostvars['tags'] and hostvars['tags']['role'] == 'bastion') or \ 101 | (self._args.bastion_tag in hostvars['tags'] and 102 | hostvars['tags'][self._args.bastion_tag] == 'bastion'): 103 | if hostvars['public_ip'] is not None: 104 | bastion_ip = hostvars['public_ip'] 105 | location = hostvars['location'] 106 | bastions[location] = bastion_ip 107 | self._inventory['_meta']['hostvars'][host]['ansible_ssh_host'] = bastion_ip 108 | else: 109 | display.display( 110 | "Warning, bastion host found but has no public IP (is the host stopped?)", 111 | color='yellow') 112 | 113 | if bastions: 114 | for host, hostvars in iteritems( 115 | self._inventory['_meta']['hostvars']): 116 | if ('role' in hostvars['tags'] and 117 | hostvars['tags']['role'] == 'bastion') or \ 118 | (self._args.bastion_tag in hostvars['tags'] and 119 | hostvars['tags'][self._args.bastion_tag] == 'bastion'): 120 | pass 121 | else: 122 | private_ip = hostvars['private_ip'] 123 | self._inventory['_meta']['hostvars'][host]['ansible_ssh_host'] = \ 124 | bastions[hostvars['location']] + '--' + private_ip 125 | 126 | def get_as_json(self, pretty=False): 127 | return self._json_format_dict(pretty=pretty) 128 | 129 | def _selected_machines(self, virtual_machines): 130 | selected_machines = [] 131 | for machine in virtual_machines: 132 | # explicit chosen host 133 | if self._args.host and self._args.host == machine.name: 134 | selected_machines.append(machine) 135 | # filter only by tags 136 | if self.tags and not self.locations and self._tags_match( 137 | machine.tags, self.tags): 138 | selected_machines.append(machine) 139 | # filter only by location 140 | if self.locations and not self.tags and machine.location in self.locations: 141 | selected_machines.append(machine) 142 | # filter by both location and tags 143 | if self.locations and self.tags and self._tags_match( 144 | machine.tags, self.tags) and machine.location in self.locations: 145 | selected_machines.append(machine) 146 | return selected_machines 147 | 148 | 149 | def azr(args={}): 150 | """Eventual filtering will be done here after we will define how we group and tag resources""" 151 | return OpsAzureInventory(args).get_as_json(pretty=True) 152 | -------------------------------------------------------------------------------- /src/ops/inventory/plugin/cns.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019 Adobe. All rights reserved. 2 | # This file is licensed to you under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. You may obtain a copy 4 | # of the License at http://www.apache.org/licenses/LICENSE-2.0 5 | 6 | # Unless required by applicable law or agreed to in writing, software distributed under 7 | # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS 8 | # OF ANY KIND, either express or implied. See the License for the specific language 9 | # governing permissions and limitations under the License. 10 | 11 | import json 12 | 13 | from .ec2 import ec2 14 | 15 | 16 | def cns(args): 17 | result = {} 18 | 19 | if 'clusters' not in args: 20 | raise Exception('clusters entry is missing in the cns plugin args') 21 | 22 | for cluster in args['clusters']: 23 | region = cluster['region'] 24 | profile = cluster['boto_profile'] 25 | for cns_cluster in cluster['names']: 26 | jsn = ec2(dict( 27 | region=region, 28 | boto_profile=profile, 29 | cache=args.get('cache', 3600 * 24), 30 | filters=[ 31 | {'Name': 'tag:cluster', 'Values': [cns_cluster]} 32 | ], 33 | bastion=[ 34 | {'Name': 'tag:cluster', 'Values': [cns_cluster]}, 35 | {'Name': 'tag:role', 'Values': ['bastion']} 36 | ] 37 | )) 38 | 39 | merge_inventories(result, json.loads(jsn)) 40 | 41 | return json.dumps(result, sort_keys=True, indent=2) 42 | 43 | 44 | def merge_inventories(a, b): 45 | for k, v in b.items(): 46 | if not a.get(k): 47 | a[k] = b[k] 48 | elif isinstance(a[k], list): 49 | a[k].extend(b[k]) 50 | elif k == '_meta': 51 | a[k]['hostvars'].update(b[k]['hostvars']) 52 | -------------------------------------------------------------------------------- /src/ops/inventory/plugin/ec2.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019 Adobe. All rights reserved. 2 | # This file is licensed to you under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. You may obtain a copy 4 | # of the License at http://www.apache.org/licenses/LICENSE-2.0 5 | 6 | # Unless required by applicable law or agreed to in writing, software distributed under 7 | # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS 8 | # OF ANY KIND, either express or implied. See the License for the specific language 9 | # governing permissions and limitations under the License. 10 | 11 | from ops.inventory.ec2inventory import Ec2Inventory 12 | 13 | 14 | def ec2(args): 15 | filters = args.get('filters', []) 16 | bastion_filters = args.get('bastion', []) 17 | 18 | if args.get('cluster') and not args.get('filters'): 19 | filters = [{'Name': 'tag:cluster', 'Values': [args.get('cluster')]}] 20 | 21 | if args.get('cluster') and not args.get('bastion'): 22 | bastion_filters = [ 23 | {'Name': 'tag:cluster', 'Values': [args.get('cluster')]}, 24 | {'Name': 'tag:role', 'Values': ['bastion']} 25 | ] 26 | 27 | return Ec2Inventory(boto_profile=args['boto_profile'], 28 | regions=args['region'], 29 | filters=filters, 30 | bastion_filters=bastion_filters).get_as_json() 31 | -------------------------------------------------------------------------------- /src/ops/inventory/plugin/legacy_pcs.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019 Adobe. All rights reserved. 2 | # This file is licensed to you under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. You may obtain a copy 4 | # of the License at http://www.apache.org/licenses/LICENSE-2.0 5 | 6 | # Unless required by applicable law or agreed to in writing, software distributed under 7 | # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS 8 | # OF ANY KIND, either express or implied. See the License for the specific language 9 | # governing permissions and limitations under the License. 10 | 11 | import json 12 | from .ec2 import ec2 13 | from .cns import merge_inventories 14 | 15 | 16 | def legacy_pcs(args): 17 | region = args['region'] 18 | boto_profile = args['boto_profile'] 19 | bastion = args['bastion'] 20 | 21 | result = {} 22 | 23 | roles = ['pcs', 'tableloader'] 24 | 25 | for role in roles: 26 | jsn = ec2(dict( 27 | region=region, 28 | boto_profile=boto_profile, 29 | filters={ 30 | 'tag:CMDB_role': role 31 | }, 32 | bastion=bastion 33 | )) 34 | 35 | merge_inventories(result, json.loads(jsn)) 36 | 37 | return json.dumps(result, sort_keys=True, indent=2) 38 | -------------------------------------------------------------------------------- /src/ops/inventory/sshconfig.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019 Adobe. All rights reserved. 2 | # This file is licensed to you under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. You may obtain a copy 4 | # of the License at http://www.apache.org/licenses/LICENSE-2.0 5 | 6 | # Unless required by applicable law or agreed to in writing, software distributed under 7 | # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS 8 | # OF ANY KIND, either express or implied. See the License for the specific language 9 | # governing permissions and limitations under the License. 10 | 11 | import os 12 | import socketserver 13 | from shutil import copy 14 | from pathlib import Path 15 | from ansible.playbook.play import display 16 | 17 | 18 | class SshConfigGenerator(object): 19 | SSH_CONFIG_FILE = "ssh.config" 20 | SSH_SCB_PROXY_TPL_FILE = "ssh.scb.proxy.config.tpl" 21 | 22 | def __init__(self, package_dir): 23 | self.package_dir = package_dir 24 | self.ssh_data_dir = self.package_dir + '/data/ssh' 25 | self.ssh_config_files = [self.SSH_CONFIG_FILE, self.SSH_SCB_PROXY_TPL_FILE] 26 | 27 | def generate(self, directory): 28 | dest_ssh_config = {} 29 | for index, ssh_config in enumerate(self._get_ssh_config()): 30 | ssh_config_file = self.ssh_config_files[index] 31 | dest_ssh_config[ssh_config_file] = f"{directory}/{ssh_config_file.replace('.', '_')}" 32 | copy(ssh_config, dest_ssh_config[ssh_config_file]) 33 | return dest_ssh_config 34 | 35 | def _get_ssh_config(self): 36 | return [f"{self.ssh_data_dir}/{ssh_config_file}" 37 | for ssh_config_file in self.ssh_config_files] 38 | 39 | @staticmethod 40 | def get_ssh_config_path(cluster_config, ssh_config_paths, use_scb): 41 | scb_settings = cluster_config.get('scb', {}) 42 | scb_enabled = scb_settings.get('enabled') and use_scb 43 | if scb_enabled: 44 | ssh_config_tpl_path = ssh_config_paths.get(SshConfigGenerator.SSH_SCB_PROXY_TPL_FILE) 45 | scb_proxy_port = SshConfigGenerator.get_ssh_scb_proxy_port(ssh_config_tpl_path) 46 | ssh_config_path = SshConfigGenerator.generate_ssh_scb_config(ssh_config_tpl_path, 47 | scb_proxy_port) 48 | display.display(f"Connecting via scb proxy at 127.0.0.1:{scb_proxy_port}.\n" 49 | f"This proxy should have already been started and running " 50 | f"in a different terminal window.\n" 51 | f"If there are connection issues double check that " 52 | f"the proxy is running.", 53 | color='blue', 54 | stderr=True) 55 | else: 56 | ssh_config_path = ssh_config_paths.get(SshConfigGenerator.SSH_CONFIG_FILE) 57 | return ssh_config_path 58 | 59 | @staticmethod 60 | def generate_ssh_scb_proxy_port(ssh_config_path, auto_scb_port, scb_config_port): 61 | ssh_config_port_path = f"{ssh_config_path}/ssh_scb_proxy_config_port" 62 | if auto_scb_port: 63 | with socketserver.TCPServer(("localhost", 0), None) as s: 64 | generated_port = s.server_address[1] 65 | display.display(f"Using auto generated port {generated_port} for scb proxy port", 66 | color='blue', 67 | stderr=True) 68 | else: 69 | generated_port = scb_config_port 70 | display.display(f"Using port {generated_port} from cluster config for scb proxy port", 71 | color='blue', 72 | stderr=True) 73 | 74 | with open(ssh_config_port_path, 'w') as f: 75 | f.write(str(generated_port)) 76 | os.fchmod(f.fileno(), 0o644) 77 | 78 | return generated_port 79 | 80 | 81 | @staticmethod 82 | def get_ssh_scb_proxy_port(ssh_config_path): 83 | ssh_port_path = ssh_config_path.replace("_tpl", "_port") 84 | ssh_scb_proxy_port = Path(ssh_port_path).read_text() 85 | return ssh_scb_proxy_port 86 | 87 | @staticmethod 88 | def generate_ssh_scb_config(ssh_config_tpl_path, scb_proxy_port): 89 | ssh_config_template = Path(ssh_config_tpl_path).read_text() 90 | ssh_config_content = ssh_config_template.format( 91 | scb_proxy_port=scb_proxy_port 92 | ) 93 | ssh_config_path = ssh_config_tpl_path.removesuffix("_tpl") 94 | with open(ssh_config_path, 'w') as f: 95 | f.write(ssh_config_content) 96 | os.fchmod(f.fileno(), 0o644) 97 | 98 | return ssh_config_path 99 | -------------------------------------------------------------------------------- /src/ops/jinja/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019 Adobe. All rights reserved. 2 | # This file is licensed to you under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. You may obtain a copy 4 | # of the License at http://www.apache.org/licenses/LICENSE-2.0 5 | 6 | # Unless required by applicable law or agreed to in writing, software distributed under 7 | # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS 8 | # OF ANY KIND, either express or implied. See the License for the specific language 9 | # governing permissions and limitations under the License. 10 | 11 | from jinja2 import FileSystemLoader, Environment, StrictUndefined, Undefined, DebugUndefined 12 | from jinja2.loaders import ChoiceLoader 13 | 14 | from ansible.plugins.loader import PluginLoader 15 | 16 | 17 | class Template(object): 18 | 19 | def __init__(self, root_dir, ops_config): 20 | loader = ChoiceLoader([ 21 | FileSystemLoader(root_dir), 22 | FileSystemLoader("/") 23 | ]) 24 | 25 | mode = ops_config.get('jinja2.undefined') 26 | undefined = Undefined 27 | if mode == 'StrictUndefined': 28 | undefined = StrictUndefined 29 | elif mode == 'DebugUndefined': 30 | undefined = DebugUndefined 31 | 32 | self.env = Environment(loader=loader, undefined=undefined) 33 | 34 | self.filter_plugin_loader = PluginLoader( 35 | 'FilterModule', 36 | 'ansible.plugins.filter', 37 | ops_config.ansible_filter_plugins.split(':'), 38 | 'filter_plugins' 39 | ) 40 | 41 | for filter in self.filter_plugin_loader.all(): 42 | self.env.filters.update(filter.filters()) 43 | 44 | def render(self, source, vars): 45 | jinja_template = self.env.get_template(source) 46 | 47 | return jinja_template.render(**vars) 48 | -------------------------------------------------------------------------------- /src/ops/simpleconsul.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019 Adobe. All rights reserved. 2 | # This file is licensed to you under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. You may obtain a copy 4 | # of the License at http://www.apache.org/licenses/LICENSE-2.0 5 | 6 | # Unless required by applicable law or agreed to in writing, software distributed under 7 | # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS 8 | # OF ANY KIND, either express or implied. See the License for the specific language 9 | # governing permissions and limitations under the License. 10 | 11 | #!/usr/bin/env python 12 | 13 | """ 14 | Very simple wrapper class to access consul data 15 | """ 16 | 17 | import re 18 | import consul 19 | import hashmerge 20 | 21 | from six import iteritems 22 | 23 | DEFAULT_CONNECT = { 24 | 'host': '127.0.0.1', 25 | 'port': 8500, 26 | 'scheme': 'http' 27 | } 28 | 29 | DEFAULT_PARAMS = { 30 | 'token': None, 31 | 'consistency': 'default', 32 | 'dc': None, 33 | 'verify': True 34 | } 35 | 36 | 37 | class SimpleConsul(object): 38 | """ Simple wrapper class for interacting with Consul. Focused mainly on KV operations""" 39 | 40 | consul_params = {} 41 | conn = None 42 | 43 | def __init__(self, consul_url='http://127.0.0.1:8500', 44 | token=None, consistency='default', 45 | dc=None, verify=True): 46 | """ 47 | Assemble parameters for connecting to consul, fill in with defaults where 48 | we do not receive any,then connect to consul 49 | """ 50 | self.consul_params = {} 51 | self.consul_params.update(DEFAULT_CONNECT) 52 | self.consul_params.update(DEFAULT_PARAMS) 53 | self.consul_params.update( 54 | self._parse_connect_url(consul_url)) 55 | if token: 56 | self.consul_params['token'] = token 57 | if consistency: 58 | self.consul_params['consistency'] = consistency 59 | if dc: 60 | self.consul_params['dc'] = dc 61 | if verify: 62 | self.consul_params['verify'] = verify 63 | self.conn = consul.Consul(**self.consul_params) 64 | self.conn.kv.get("just-fail-if-we-cannot-connect-to-consul") 65 | 66 | @staticmethod 67 | def _parse_connect_url(url): 68 | """Get host port and scheme from an url""" 69 | ret = {} 70 | s_res = re.match(r'(http|https)://([\w\-\.]+)+(:(\d+)){0,1}', str(url)) 71 | if s_res: 72 | keys = 'scheme', 'host', 'skip', 'port' 73 | ret = { 74 | keys[i]: s_res.group( 75 | i + 76 | 1) for i in range( 77 | 0, 78 | 4) if s_res.group( 79 | i + 80 | 1)} 81 | ret.pop('skip', True) 82 | return ret 83 | 84 | def get(self, key, recurse=False): 85 | """Read a key""" 86 | merger = hashmerge.HashMerge(hashmerge.RIGHT_PRECEDENT) 87 | index, data = self.conn.kv.get(key, recurse=False) 88 | if data: 89 | single_value = data.get('Value', None) 90 | else: 91 | single_value = None 92 | if not recurse: 93 | return single_value 94 | aggregated = {} 95 | keys_dict = {} 96 | index, keys_list = self.conn.kv.get(key + '/', recurse=recurse) 97 | if keys_list: 98 | keys_dict = {i['Key']: i['Value'] for i in keys_list} 99 | for k, v in iteritems(keys_dict): 100 | tmp = {} 101 | path_atoms = k.split('/') 102 | leaf = path_atoms.pop() 103 | if leaf == '': 104 | tmp = {} 105 | else: 106 | tmp = {leaf: v} 107 | for atom in reversed(path_atoms): 108 | tmp = {atom: tmp} 109 | aggregated = merger.merge(aggregated, tmp) 110 | return aggregated or single_value 111 | 112 | def put(self, key, value): 113 | """Put a key""" 114 | if isinstance(value, (int, str)): 115 | self.conn.kv.put(key, str(value), cas=None, 116 | flags=None, acquire=None, release=None, 117 | token=None, dc=None) 118 | elif isinstance(value, list): 119 | for item in value: 120 | self.conn.kv.put(key, item, "True") 121 | elif isinstance(value, dict): 122 | for k, v in iteritems(value): 123 | self.put(key + '/' + k, v) 124 | -------------------------------------------------------------------------------- /src/ops/simplessm.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019 Adobe. All rights reserved. 2 | # This file is licensed to you under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. You may obtain a copy 4 | # of the License at http://www.apache.org/licenses/LICENSE-2.0 5 | 6 | # Unless required by applicable law or agreed to in writing, software distributed under 7 | # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS 8 | # OF ANY KIND, either express or implied. See the License for the specific language 9 | # governing permissions and limitations under the License. 10 | 11 | #!/usr/bin/env python 12 | 13 | from botocore.exceptions import ClientError 14 | import boto3 15 | import os 16 | 17 | 18 | class SimpleSSM(object): 19 | def __init__(self, aws_profile, region_name): 20 | self.initial_aws_profile = os.getenv('AWS_PROFILE', None) 21 | self.aws_profile = aws_profile 22 | self.region_name = region_name 23 | 24 | def get(self, key): 25 | client = self.get_ssm_client() 26 | try: 27 | return client.get_parameter(Name=key, WithDecryption=True).get( 28 | "Parameter").get("Value") 29 | except ClientError as e: 30 | raise Exception( 31 | 'Error while trying to read SSM value for key: %s - %s' % 32 | (key, e.response['Error']['Code'])) 33 | finally: 34 | self.release_ssm_client() 35 | 36 | def get_ssm_client(self): 37 | os.environ['AWS_PROFILE'] = self.aws_profile 38 | return boto3.client('ssm', region_name=self.region_name) 39 | 40 | def release_ssm_client(self): 41 | if self.initial_aws_profile is None: 42 | del os.environ['AWS_PROFILE'] 43 | else: 44 | os.environ['AWS_PROFILE'] = self.initial_aws_profile 45 | -------------------------------------------------------------------------------- /src/ops/terraform/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/adobe/ops-cli/8246f18c9c9d10ba6b7cdbace6a756281cca892b/src/ops/terraform/__init__.py -------------------------------------------------------------------------------- /tests/e2e/common.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import os 3 | 4 | 5 | @pytest.fixture 6 | def test_path(): 7 | path = os.path.abspath(__file__) 8 | return os.path.dirname(path) 9 | -------------------------------------------------------------------------------- /tests/e2e/fixture/ansible/.opsconfig.yaml: -------------------------------------------------------------------------------- 1 | ansible.config_path: ansible.cfg 2 | ansible.filter_plugins: plugins/filter_plugins -------------------------------------------------------------------------------- /tests/e2e/fixture/ansible/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | library=modules 3 | -------------------------------------------------------------------------------- /tests/e2e/fixture/ansible/clusters/test.yaml: -------------------------------------------------------------------------------- 1 | inventory: 2 | - directory: inventory 3 | -------------------------------------------------------------------------------- /tests/e2e/fixture/ansible/clusters/test_filters.yaml: -------------------------------------------------------------------------------- 1 | inventory: 2 | - directory: inventory 3 | 4 | test_standard_filters: "{{ [1, 2, 3] | sum }}" 5 | test_custom_filters: "{{ 'value' | my_filter }}" 6 | test_ops_filters: "{{ 'escape\ntest' | escape_new_lines }}" 7 | -------------------------------------------------------------------------------- /tests/e2e/fixture/ansible/inventory/hosts: -------------------------------------------------------------------------------- 1 | [web] 2 | web1 ansible_connection=local 3 | web2 ansible_connection=local -------------------------------------------------------------------------------- /tests/e2e/fixture/ansible/modules/my_module.py: -------------------------------------------------------------------------------- 1 | #Copyright 2019 Adobe. All rights reserved. 2 | #This file is licensed to you under the Apache License, Version 2.0 (the "License"); 3 | #you may not use this file except in compliance with the License. You may obtain a copy 4 | #of the License at http://www.apache.org/licenses/LICENSE-2.0 5 | 6 | #Unless required by applicable law or agreed to in writing, software distributed under 7 | #the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS 8 | #OF ANY KIND, either express or implied. See the License for the specific language 9 | #governing permissions and limitations under the License. 10 | 11 | #!/usr/bin/env python 12 | def main(): 13 | module = AnsibleModule( 14 | argument_spec=dict( 15 | set_facts=dict(required=True, type='dict') 16 | ) 17 | ) 18 | 19 | result = dict( 20 | changed=True, 21 | ansible_facts=module.params['set_facts'] 22 | ) 23 | 24 | module.exit_json(**result) 25 | 26 | 27 | from ansible.module_utils.basic import * 28 | 29 | main() 30 | -------------------------------------------------------------------------------- /tests/e2e/fixture/ansible/playbooks/play_module.yaml: -------------------------------------------------------------------------------- 1 | - hosts: web 2 | gather_facts: no 3 | vars: 4 | test_cmd_var: false 5 | test_cmd_bool_var: True 6 | tasks: 7 | - debug: msg="{{ 'filter_this' | my_filter }}" 8 | - my_module: 9 | set_facts: 10 | the_module_works: yep 11 | - debug: var=the_module_works 12 | - debug: msg="test_cmd_var = {{ test_cmd_var | bool }}" 13 | - debug: msg="test_cmd_bool_var = {{ test_cmd_bool_var }}" 14 | -------------------------------------------------------------------------------- /tests/e2e/fixture/ansible/plugins/filter_plugins/filters.py: -------------------------------------------------------------------------------- 1 | #Copyright 2019 Adobe. All rights reserved. 2 | #This file is licensed to you under the Apache License, Version 2.0 (the "License"); 3 | #you may not use this file except in compliance with the License. You may obtain a copy 4 | #of the License at http://www.apache.org/licenses/LICENSE-2.0 5 | 6 | #Unless required by applicable law or agreed to in writing, software distributed under 7 | #the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS 8 | #OF ANY KIND, either express or implied. See the License for the specific language 9 | #governing permissions and limitations under the License. 10 | 11 | def my_filter(string): 12 | return 'filtered: ' + string 13 | 14 | 15 | class FilterModule(object): 16 | def filters(self): 17 | return { 18 | 'my_filter': my_filter 19 | } 20 | -------------------------------------------------------------------------------- /tests/e2e/fixture/inventory/.opsconfig.yaml: -------------------------------------------------------------------------------- 1 | root_dir: {directory} -------------------------------------------------------------------------------- /tests/e2e/fixture/inventory/clusters/common_plugins.yaml: -------------------------------------------------------------------------------- 1 | 2 | 3 | inventory: 4 | - plugin: cns 5 | args: 6 | clusters: 7 | - region: us-west-2 8 | boto_profile: aprofile_111 9 | names: ['{{ cluster }}', 'another_cluster'] 10 | 11 | - plugin: ec2 12 | args: 13 | region: us-west-2 14 | boto_profile: aprofile_111 15 | filters: 16 | "tag:CMDB_role": '{{ cluster }}' 17 | bastion: 18 | "tag:Name": usw2-bastion1 -------------------------------------------------------------------------------- /tests/e2e/fixture/inventory/clusters/plugin_generator.yaml: -------------------------------------------------------------------------------- 1 | 2 | inventory: 3 | - plugin: test_plugin 4 | -------------------------------------------------------------------------------- /tests/e2e/fixture/inventory/clusters/plugin_generator_scb.yaml: -------------------------------------------------------------------------------- 1 | 2 | inventory: 3 | - plugin: test_plugin 4 | 5 | scb: 6 | enabled: true 7 | host: "scb.example.com" 8 | proxy_port: 2222 9 | -------------------------------------------------------------------------------- /tests/e2e/fixture/terraform/.opsconfig.yaml: -------------------------------------------------------------------------------- 1 | terraform_version: v0.6.3 -------------------------------------------------------------------------------- /tests/e2e/fixture/terraform/clusters/prod/test.yaml: -------------------------------------------------------------------------------- 1 | 2 | terraform: 3 | path: terraform/main -------------------------------------------------------------------------------- /tests/e2e/fixture/terraform/terraform/main/main.tf.jinja2: -------------------------------------------------------------------------------- 1 | variable "cluster" {} 2 | 3 | variable "user_data" { 4 | description = "The user-data passed to the instance. Changing this will re-create the instances." 5 | default = { 6 | "0" = "{{ 'terraform/user_data' | read_file | escape_new_lines }}" 7 | } 8 | } 9 | 10 | output "user_data_out" { 11 | value = "${var.user_data}" 12 | } 13 | -------------------------------------------------------------------------------- /tests/e2e/fixture/terraform/terraform/user_data: -------------------------------------------------------------------------------- 1 | my_user_data -------------------------------------------------------------------------------- /tests/e2e/test_inventory.py: -------------------------------------------------------------------------------- 1 | # This Python file uses the following encoding: utf-8 2 | #Copyright 2019 Adobe. All rights reserved. 3 | #This file is licensed to you under the Apache License, Version 2.0 (the "License"); 4 | #you may not use this file except in compliance with the License. You may obtain a copy 5 | #of the License at http://www.apache.org/licenses/LICENSE-2.0 6 | 7 | #Unless required by applicable law or agreed to in writing, software distributed under 8 | #the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS 9 | #OF ANY KIND, either express or implied. See the License for the specific language 10 | #governing permissions and limitations under the License. 11 | 12 | # coding=utf-8 13 | import os 14 | import pytest 15 | from six import PY3 16 | 17 | from ops.main import AppContainer 18 | from simpledi import * 19 | 20 | current_dir = os.path.dirname(__file__) 21 | 22 | 23 | def app(*args): 24 | app = AppContainer(args) 25 | 26 | def test_plugin(inventory_opts): 27 | return """ 28 | { 29 | "bastion": ["bastion.host"], 30 | "nat": ["bastion.host"], 31 | "web": [ 32 | "web1.host", 33 | "web2.host" 34 | ], 35 | "backend": ["172.16.0.1--172.16.0.2"] 36 | } 37 | """ 38 | 39 | # we configure the plugin test_plugin 40 | 41 | inventory_plugins = ListInstanceProvider(instance(test_plugin)) 42 | app.inventory_plugins = inventory_plugins 43 | 44 | return app 45 | 46 | 47 | def run(*args): 48 | return app(*args).run() 49 | 50 | 51 | def test_plugin_generator(capsys): 52 | # we run the inventory 53 | run(current_dir + '/fixture/inventory/clusters/plugin_generator.yaml', 'inventory') 54 | 55 | # we should have the 3 hosts in the inventory output 56 | out, err = capsys.readouterr() 57 | print(out) 58 | print(err) 59 | assert 'bastion.host' in out 60 | assert 'web1.host' in out 61 | assert 'web2.host' in out 62 | 63 | 64 | def test_inventory_limit(capsys): 65 | # when we run with limit, then we should have only one host 66 | run(current_dir + '/fixture/inventory/clusters/plugin_generator.yaml', 'inventory', '--limit', 'bastion') 67 | out, err = capsys.readouterr() 68 | print(out) 69 | print(err) 70 | assert 'bastion.host' in out 71 | assert 'web1.host' not in out 72 | 73 | 74 | if not PY3: 75 | def test_inventory_limit_unicode_dash(): 76 | with pytest.raises(UnicodeDecodeError): 77 | run(current_dir + '/fixture/inventory/clusters/plugin_generator.yaml', 'inventory', '––limit', 'bastion') 78 | -------------------------------------------------------------------------------- /tests/e2e/test_jinja_filters.py: -------------------------------------------------------------------------------- 1 | #Copyright 2019 Adobe. All rights reserved. 2 | #This file is licensed to you under the Apache License, Version 2.0 (the "License"); 3 | #you may not use this file except in compliance with the License. You may obtain a copy 4 | #of the License at http://www.apache.org/licenses/LICENSE-2.0 5 | 6 | #Unless required by applicable law or agreed to in writing, software distributed under 7 | #the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS 8 | #OF ANY KIND, either express or implied. See the License for the specific language 9 | #governing permissions and limitations under the License. 10 | 11 | import os 12 | import pytest 13 | from ops.main import AppContainer 14 | from simpledi import * 15 | 16 | @pytest.fixture 17 | def app(): 18 | def _app(args): 19 | return AppContainer(args) 20 | return _app 21 | 22 | current_dir = os.path.dirname(__file__) 23 | 24 | def test_loading_of_modules(capsys, app): 25 | root_dir = current_dir + '/fixture/ansible' 26 | container = app(['-vv', '--root-dir', root_dir, 'clusters/test_filters.yaml', 'play', 27 | 'playbooks/play_module.yaml']) 28 | 29 | code = container.execute(container.run(), pass_trough=False) 30 | out, err = capsys.readouterr() 31 | assert code is 0 32 | 33 | container.cluster_config['test_standard_filters'] == '6' 34 | container.cluster_config['test_custom_filters'] == 'filtered: value' 35 | -------------------------------------------------------------------------------- /tests/e2e/test_playbook.py: -------------------------------------------------------------------------------- 1 | # This Python file uses the following encoding: utf-8 2 | #Copyright 2019 Adobe. All rights reserved. 3 | #This file is licensed to you under the Apache License, Version 2.0 (the "License"); 4 | #you may not use this file except in compliance with the License. You may obtain a copy 5 | #of the License at http://www.apache.org/licenses/LICENSE-2.0 6 | 7 | #Unless required by applicable law or agreed to in writing, software distributed under 8 | #the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS 9 | #OF ANY KIND, either express or implied. See the License for the specific language 10 | #governing permissions and limitations under the License. 11 | 12 | import os 13 | import pytest 14 | from ops import display 15 | 16 | from six import PY3 17 | 18 | from ops.main import AppContainer 19 | from simpledi import * 20 | 21 | @pytest.fixture 22 | def app(): 23 | def _app(args): 24 | return AppContainer(args) 25 | 26 | return _app 27 | 28 | current_dir = os.path.dirname(__file__) 29 | 30 | 31 | def test_loading_of_modules_and_extensions(capsys, app): 32 | root_dir = current_dir + '/fixture/ansible' 33 | container = app(['-vv', '--root-dir', root_dir, 'clusters/test.yaml', 'play', 34 | 'playbooks/play_module.yaml', '--', '-e', 'test_cmd_var=true', 35 | '-e', '\'{"test_cmd_bool_var": false}\'']) 36 | command = container.run() 37 | code = container.execute(command, pass_trough=False) 38 | out, err = capsys.readouterr() 39 | display(out, color='gray') 40 | display(err, color='red') 41 | assert code is 0 42 | # the filter plugins work 43 | assert '"msg": "filtered: filter_this"' in out 44 | 45 | # custom modules are interpreted 46 | assert '"the_module_works": "yep"' in out 47 | 48 | # cmd extra_vars override playbook vars 49 | assert '"test_cmd_var = True"' in out 50 | 51 | # cmd extra_vars bool var 52 | assert '"test_cmd_bool_var = False"' in out 53 | 54 | # cluster is present as a variable in the command line 55 | assert '-e cluster=test' in command['command'] 56 | 57 | if not PY3: 58 | def test_ssh_user_unicode_dash(capsys, app): 59 | with pytest.raises(UnicodeDecodeError): 60 | root_dir = current_dir + '/fixture/ansible' 61 | app([u'–vv', '--root-dir', root_dir, 'clusters/test.yaml', 'play', 62 | 'playbooks/play_module.yaml']).run() 63 | -------------------------------------------------------------------------------- /tests/e2e/test_ssh.py: -------------------------------------------------------------------------------- 1 | # This Python file uses the following encoding: utf-8 2 | #Copyright 2019 Adobe. All rights reserved. 3 | #This file is licensed to you under the Apache License, Version 2.0 (the "License"); 4 | #you may not use this file except in compliance with the License. You may obtain a copy 5 | #of the License at http://www.apache.org/licenses/LICENSE-2.0 6 | 7 | #Unless required by applicable law or agreed to in writing, software distributed under 8 | #the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS 9 | #OF ANY KIND, either express or implied. See the License for the specific language 10 | #governing permissions and limitations under the License. 11 | 12 | import os 13 | import re 14 | 15 | from six import PY3 16 | 17 | import test_inventory 18 | import pytest 19 | 20 | # bring in the fixtures 21 | app = test_inventory.app 22 | run = test_inventory.run 23 | 24 | current_dir = os.path.dirname(__file__) 25 | 26 | 27 | def test_ssh(): 28 | command = run(current_dir + '/fixture/inventory/clusters/plugin_generator.yaml', 'ssh', 29 | 'bastion', '--', '-ND', '8157') 30 | 31 | assert re.match('ssh -F .+/ssh.config bastion.host -ND 8157', command['command']) 32 | 33 | 34 | def test_ssh_scb(): 35 | command = run(current_dir + '/fixture/inventory/clusters/plugin_generator_scb.yaml', 'ssh', 36 | 'bastion', '--', '-TD', '8157') 37 | 38 | assert re.match(r'ssh -F .+/ssh.config .+bastion.host@scb\.example.com -TD 8157', 39 | command['command']) 40 | 41 | 42 | def test_ssh_scb_noscb(): 43 | command = run(current_dir + '/fixture/inventory/clusters/plugin_generator_scb.yaml', 'ssh', 44 | '--noscb', 'bastion', '--', '-TD', '8157') 45 | 46 | assert re.match('ssh -F .+/ssh.config bastion.host -TD 8157', command['command']) 47 | assert "scb.example.com" not in command['command'] 48 | 49 | 50 | def test_ssh_user(): 51 | command = run(current_dir + '/fixture/inventory/clusters/plugin_generator.yaml', 'ssh', 52 | 'bastion', '-l', 'remote_user') 53 | 54 | assert re.match('ssh -F .+/ssh.config bastion.host -l remote_user', command['command']) 55 | 56 | 57 | def test_ssh_scb_user(): 58 | command = run(current_dir + '/fixture/inventory/clusters/plugin_generator_scb.yaml', 'ssh', 59 | 'bastion', '-l', 'remote_user') 60 | 61 | assert re.match(r'ssh -F .+/ssh.config remote_user@bastion.host@scb\.example.com ' 62 | r'-l remote_user', command['command']) 63 | 64 | 65 | def test_ssh_scb_user_ssh_dest_user(): 66 | command = run(current_dir + '/fixture/inventory/clusters/plugin_generator_scb.yaml', 'ssh', 67 | 'backend', '--ssh-dest-user', 'ec2-user', '-l', 'remote_user') 68 | 69 | assert re.match(r'ssh -F .+/ssh.config -t remote_user@172.16.0.1@scb\.example.com ' 70 | r'ssh ec2-user@172.16.0.2 -l remote_user', command['command']) 71 | 72 | def test_ssh_scb_user_noscb(): 73 | command = run(current_dir + '/fixture/inventory/clusters/plugin_generator_scb.yaml', 'ssh', 74 | 'bastion', '--noscb', '-l', 'remote_user') 75 | 76 | assert re.match('ssh -F .+/ssh.config bastion.host -l remote_user', command['command']) 77 | assert "scb.example.com" not in command['command'] 78 | 79 | 80 | if not PY3: 81 | def test_ssh_user_unicode_dash(): 82 | with pytest.raises(UnicodeDecodeError): 83 | run(current_dir + '/fixture/inventory/clusters/plugin_generator.yaml', 'ssh', 84 | 'bastion', '–l', 'remote_user') 85 | 86 | 87 | def test_ssh_user_default(): 88 | # we take the default system user 89 | command = run(current_dir + '/fixture/inventory/clusters/plugin_generator.yaml', 'ssh', 90 | 'bastion') 91 | 92 | current_user = os.environ.get("USER") or "root" 93 | assert '-l %s' % current_user in command['command'] 94 | 95 | 96 | def test_ssh_scb_user_default(): 97 | # we take the default system user 98 | command = run(current_dir + '/fixture/inventory/clusters/plugin_generator_scb.yaml', 'ssh', 99 | 'bastion') 100 | 101 | current_user = os.environ.get("USER") or "root" 102 | assert '-l %s' % current_user in command['command'] 103 | 104 | 105 | def test_ssh_user_opsconfig(): 106 | # we take the value from opsconfig, if present 107 | container = app(current_dir + '/fixture/inventory/clusters/plugin_generator.yaml', 108 | 'ssh', 'bastion') 109 | container.ops_config.config['ssh.user'] = 'test' 110 | 111 | command = container.run() 112 | 113 | assert '-l test' in command['command'] 114 | 115 | 116 | def test_ssh_user_opsconfig_override(): 117 | # the value of the command line argument overrides .opsconfig.yaml 118 | 119 | container = app(current_dir + '/fixture/inventory/clusters/plugin_generator.yaml', 'ssh', 120 | 'bastion', '-l', 'ec2-user') 121 | 122 | container.ops_config.config['ssh.user'] = 'test' 123 | 124 | command = container.run() 125 | 126 | assert '-l ec2-user' in command['command'] 127 | 128 | 129 | def test_ssh_scb_user_opsconfig(): 130 | # we take the value from opsconfig, if present 131 | container = app(current_dir + '/fixture/inventory/clusters/plugin_generator_scb.yaml', 132 | 'ssh', 133 | 'bastion') 134 | container.ops_config.config['ssh.user'] = 'test' 135 | 136 | command = container.run() 137 | 138 | assert '-l test' in command['command'] 139 | 140 | 141 | def test_ssh_scb_user_opsconfig_override(): 142 | # the value of the command line argument overrides .opsconfig.yaml 143 | 144 | container = app(current_dir + '/fixture/inventory/clusters/plugin_generator_scb.yaml', 'ssh', 145 | 'bastion', '-l', 'ec2-user') 146 | 147 | container.ops_config.config['ssh.user'] = 'test' 148 | 149 | command = container.run() 150 | 151 | assert '-l ec2-user' in command['command'] 152 | -------------------------------------------------------------------------------- /tests/e2e/test_terraform.py: -------------------------------------------------------------------------------- 1 | #Copyright 2019 Adobe. All rights reserved. 2 | #This file is licensed to you under the Apache License, Version 2.0 (the "License"); 3 | #you may not use this file except in compliance with the License. You may obtain a copy 4 | #of the License at http://www.apache.org/licenses/LICENSE-2.0 5 | 6 | #Unless required by applicable law or agreed to in writing, software distributed under 7 | #the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS 8 | #OF ANY KIND, either express or implied. See the License for the specific language 9 | #governing permissions and limitations under the License. 10 | 11 | import os 12 | import pytest 13 | from ops.main import AppContainer 14 | from simpledi import * 15 | 16 | current_dir = os.path.dirname(__file__) 17 | 18 | 19 | @pytest.fixture 20 | def app(): 21 | def _app(args): 22 | return AppContainer(args) 23 | 24 | return _app 25 | 26 | 27 | def test_terraform_templating_for_file_plugin(capsys, app): 28 | app(['--root-dir', current_dir + '/fixture/terraform', 'clusters/prod/test.yaml', 'terraform', 'template']).run() 29 | 30 | out, err = capsys.readouterr() 31 | print(out) 32 | print(err) 33 | assert 'my_user_data' in out 34 | 35 | 36 | def test_terraform_plan(capsys, app): 37 | container = app(['--root-dir', current_dir + '/fixture/terraform', 'clusters/prod/test.yaml', 'terraform', 'plan']) 38 | command = container.run() 39 | 40 | 41 | # we have the terraform plan command 42 | assert 'terraform plan' in command['command'] 43 | 44 | # we have a post_action -> the delete command 45 | assert len(command['post_actions']) == 1 46 | 47 | # when we call the post actions 48 | container.execute(command) 49 | assert not os.path.isfile(current_dir + '/fixture/terraform/terraform/main/main.tf') 50 | 51 | 52 | def test_terraform_apply(capsys, app): 53 | def terraform(*args): 54 | a = ['--root-dir', current_dir + '/fixture/terraform', 'clusters/prod/test.yaml'] 55 | a.extend(args) 56 | container = app(a) 57 | container.execute(container.run()) 58 | 59 | terraform('terraform', 'plan') 60 | # terraform('terraform', 'apply') 61 | # terraform('terraform', 'output', '--var', 'user_data_out') 62 | -------------------------------------------------------------------------------- /tests/unit/fixture/.opsconfig.yaml: -------------------------------------------------------------------------------- 1 | terraform.version: override_version 2 | 3 | vars.region: us-west-1 -------------------------------------------------------------------------------- /tests/unit/fixture/clusters/dev/.opsconfig.yaml: -------------------------------------------------------------------------------- 1 | vars.env: dev -------------------------------------------------------------------------------- /tests/unit/fixture/clusters/dev/us-west-1/test.yaml: -------------------------------------------------------------------------------- 1 | test: 1 -------------------------------------------------------------------------------- /tests/unit/fixture/clusters/prod/.opsconfig.yaml: -------------------------------------------------------------------------------- 1 | vars.env: prod -------------------------------------------------------------------------------- /tests/unit/fixture/clusters/prod/us-east-1/.opsconfig.yaml: -------------------------------------------------------------------------------- 1 | vars.region: us-east-1 -------------------------------------------------------------------------------- /tests/unit/fixture/clusters/prod/us-east-1/test.yaml: -------------------------------------------------------------------------------- 1 | test: 1 -------------------------------------------------------------------------------- /tests/unit/test_composition_config_generator.py: -------------------------------------------------------------------------------- 1 | from ops.hierarchical.composition_config_generator import CompositionSorter 2 | 3 | 4 | def test_composition_discovery(): 5 | expected_order = ["comp1", "compB", "comp3"] 6 | composition_sorter = CompositionSorter(composition_order=expected_order) 7 | assert composition_sorter.get_sorted_compositions(["comp3", "comp1", "compB"]) == expected_order 8 | 9 | 10 | def test_unknown_composition_is_ignored(): 11 | expected_order = ["comp1", "comp2"] 12 | composition_sorter = CompositionSorter(composition_order=expected_order) 13 | assert composition_sorter.get_sorted_compositions(["comp2", "comp1", "unknown_composition"]) == expected_order 14 | 15 | 16 | def test_reverse_order(): 17 | expected_order = ["comp1", "comp2"] 18 | composition_sorter = CompositionSorter(composition_order=expected_order) 19 | assert composition_sorter.get_sorted_compositions(["comp1", "comp2"], reverse=True) == ("comp2", "comp1") 20 | -------------------------------------------------------------------------------- /tests/unit/test_opsconfig.py: -------------------------------------------------------------------------------- 1 | #Copyright 2019 Adobe. All rights reserved. 2 | #This file is licensed to you under the Apache License, Version 2.0 (the "License"); 3 | #you may not use this file except in compliance with the License. You may obtain a copy 4 | #of the License at http://www.apache.org/licenses/LICENSE-2.0 5 | 6 | #Unless required by applicable law or agreed to in writing, software distributed under 7 | #the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS 8 | #OF ANY KIND, either express or implied. See the License for the specific language 9 | #governing permissions and limitations under the License. 10 | 11 | import os 12 | from argparse import Namespace 13 | 14 | from ops.opsconfig import OpsConfig 15 | 16 | current_dir = os.path.dirname(__file__) 17 | 18 | 19 | def test_configuration_overrides(): 20 | cfg = OpsConfig(Namespace(cluster_config_path=current_dir + '/fixture/clusters/prod/us-east-1/test.yaml'), '') 21 | 22 | assert 'vars.random' not in cfg 23 | assert cfg['terraform.version'] == 'override_version' 24 | assert cfg['vars.region'] == 'us-east-1' 25 | assert cfg['vars.env'] == 'prod' 26 | 27 | cfg = OpsConfig(Namespace(cluster_config_path=current_dir + '/fixture/clusters/dev/us-west-1/test.yaml'), '') 28 | assert cfg['terraform.version'] == 'override_version' 29 | assert cfg['vars.region'] == 'us-west-1' 30 | assert cfg['vars.env'] == 'dev' 31 | --------------------------------------------------------------------------------