├── .ansible-lint ├── .bandit.yml ├── .coveragerc ├── .flake8 ├── .github ├── CODEOWNERS ├── dependabot.yml ├── labels.yml ├── lineage.yml └── workflows │ ├── build.yml │ ├── codeql-analysis.yml │ ├── dependency-review.yml │ └── sync-labels.yml ├── .gitignore ├── .isort.cfg ├── .mdl_config.yaml ├── .pre-commit-config.yaml ├── .prettierignore ├── .yamllint ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── bump-version ├── pytest.ini ├── requirements-dev.txt ├── requirements-test.txt ├── requirements.txt ├── setup-env ├── setup.py ├── src └── aws_profile_sync │ ├── __init__.py │ ├── __main__.py │ ├── _version.py │ ├── aws_profile_sync.py │ └── handlers │ ├── __init__.py │ └── ssh_git.py ├── tag.sh └── tests ├── conftest.py ├── credentials-test └── test_aws_profile_sync.py /.ansible-lint: -------------------------------------------------------------------------------- 1 | --- 2 | # See https://ansible-lint.readthedocs.io/configuring/ for a list of 3 | # the configuration elements that can exist in this file. 4 | enable_list: 5 | # Useful checks that one must opt-into. See here for more details: 6 | # https://ansible-lint.readthedocs.io/rules/ 7 | - fcqn-builtins 8 | - no-log-password 9 | - no-same-owner 10 | exclude_paths: 11 | # This exclusion is implicit, unless exclude_paths is defined 12 | - .cache 13 | # Seems wise to ignore this too 14 | - .github 15 | kinds: 16 | # This will force our systemd specific molecule configurations to be treated 17 | # as plain yaml files by ansible-lint. This mirrors the default kind 18 | # configuration in ansible-lint for molecule configurations: 19 | # yaml: "**/molecule/*/{base,molecule}.{yaml,yml}" 20 | - yaml: "**/molecule/*/molecule-{no,with}-systemd.yml" 21 | use_default_rules: true 22 | -------------------------------------------------------------------------------- /.bandit.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Configuration file for the Bandit python security scanner 3 | # https://bandit.readthedocs.io/en/latest/config.html 4 | # This config is applied to bandit when scanning the "tests" tree 5 | 6 | # Tests are first included by `tests`, and then excluded by `skips`. 7 | # If `tests` is empty, all tests are considered included. 8 | 9 | tests: 10 | # - B101 11 | # - B102 12 | 13 | skips: 14 | - B101 # skip "assert used" check since assertions are required in pytests 15 | -------------------------------------------------------------------------------- /.coveragerc: -------------------------------------------------------------------------------- 1 | # This is the configuration for code coverage checks 2 | # https://coverage.readthedocs.io/en/latest/config.html 3 | 4 | [run] 5 | source = src/aws_profile_sync 6 | omit = 7 | branch = true 8 | 9 | [report] 10 | exclude_lines = 11 | if __name__ == "__main__": 12 | show_missing = true 13 | -------------------------------------------------------------------------------- /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | max-line-length = 80 3 | # Select (turn on) 4 | # * Complexity violations reported by mccabe (C) - 5 | # http://flake8.pycqa.org/en/latest/user/error-codes.html#error-violation-codes 6 | # * Documentation conventions compliance reported by pydocstyle (D) - 7 | # http://www.pydocstyle.org/en/stable/error_codes.html 8 | # * Default errors and warnings reported by pycodestyle (E and W) - 9 | # https://pycodestyle.readthedocs.io/en/latest/intro.html#error-codes 10 | # * Default errors reported by pyflakes (F) - 11 | # http://flake8.pycqa.org/en/latest/glossary.html#term-pyflakes 12 | # * Default warnings reported by flake8-bugbear (B) - 13 | # https://github.com/PyCQA/flake8-bugbear#list-of-warnings 14 | # * The B950 flake8-bugbear opinionated warning - 15 | # https://github.com/PyCQA/flake8-bugbear#opinionated-warnings 16 | select = C,D,E,F,W,B,B950 17 | # Ignore flake8's default warning about maximum line length, which has 18 | # a hard stop at the configured value. Instead we use 19 | # flake8-bugbear's B950, which allows up to 10% overage. 20 | # 21 | # Also ignore flake8's warning about line breaks before binary 22 | # operators. It no longer agrees with PEP8. See, for aws_profile_sync, here: 23 | # https://github.com/ambv/black/issues/21. Guido agrees here: 24 | # https://github.com/python/peps/commit/c59c4376ad233a62ca4b3a6060c81368bd21e85b. 25 | ignore = E501,W503 26 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | # Each line is a file pattern followed by one or more owners. 2 | 3 | # These owners will be the default owners for everything in the 4 | # repo. Unless a later match takes precedence, these owners will be 5 | # requested for review when someone opens a pull request. 6 | * @dav3r @felddy @jsf9k @mcdonnnj 7 | 8 | # These folks own any files in the .github directory at the root of 9 | # the repository and any of its subdirectories. 10 | /.github/ @dav3r @felddy @jsf9k @mcdonnnj 11 | 12 | # These folks own all linting configuration files. 13 | /.ansible-lint @dav3r @felddy @jsf9k @mcdonnnj 14 | /.bandit.yml @dav3r @felddy @jsf9k @mcdonnnj 15 | /.flake8 @dav3r @felddy @jsf9k @mcdonnnj 16 | /.isort.cfg @dav3r @felddy @jsf9k @mcdonnnj 17 | /.mdl_config.yaml @dav3r @felddy @jsf9k @mcdonnnj 18 | /.pre-commit-config.yaml @dav3r @felddy @jsf9k @mcdonnnj 19 | /.prettierignore @dav3r @felddy @jsf9k @mcdonnnj 20 | /.yamllint @dav3r @felddy @jsf9k @mcdonnnj 21 | /requirements.txt @dav3r @felddy @jsf9k @mcdonnnj 22 | /requirements-dev.txt @dav3r @felddy @jsf9k @mcdonnnj 23 | /requirements-test.txt @dav3r @felddy @jsf9k @mcdonnnj 24 | /setup-env @dav3r @felddy @jsf9k @mcdonnnj 25 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # Any ignore directives should be uncommented in downstream projects to disable 4 | # Dependabot updates for the given dependency. Downstream projects will get 5 | # these updates when the pull request(s) in the appropriate skeleton are merged 6 | # and Lineage processes these changes. 7 | 8 | updates: 9 | - directory: / 10 | ignore: 11 | # Managed by cisagov/skeleton-generic 12 | - dependency-name: actions/cache 13 | - dependency-name: actions/checkout 14 | - dependency-name: actions/dependency-review-action 15 | - dependency-name: actions/setup-go 16 | - dependency-name: actions/setup-python 17 | - dependency-name: cisagov/action-job-preamble 18 | - dependency-name: cisagov/setup-env-github-action 19 | - dependency-name: crazy-max/ghaction-github-labeler 20 | - dependency-name: github/codeql-action 21 | - dependency-name: hashicorp/setup-packer 22 | - dependency-name: hashicorp/setup-terraform 23 | - dependency-name: mxschmitt/action-tmate 24 | # Managed by cisagov/skeleton-python-library 25 | - dependency-name: actions/download-artifact 26 | - dependency-name: actions/upload-artifact 27 | package-ecosystem: github-actions 28 | schedule: 29 | interval: weekly 30 | 31 | - directory: / 32 | package-ecosystem: pip 33 | schedule: 34 | interval: weekly 35 | 36 | - directory: / 37 | package-ecosystem: terraform 38 | schedule: 39 | interval: weekly 40 | version: 2 41 | -------------------------------------------------------------------------------- /.github/labels.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Rather than breaking up descriptions into multiline strings we disable that 3 | # specific rule in yamllint for this file. 4 | # yamllint disable rule:line-length 5 | - color: eb6420 6 | description: This issue or pull request is awaiting the outcome of another issue or pull request 7 | name: blocked 8 | - color: "000000" 9 | description: This issue or pull request involves changes to existing functionality 10 | name: breaking change 11 | - color: d73a4a 12 | description: This issue or pull request addresses broken functionality 13 | name: bug 14 | - color: 07648d 15 | description: This issue will be advertised on code.gov's Open Tasks page (https://code.gov/open-tasks) 16 | name: code.gov 17 | - color: 0366d6 18 | description: Pull requests that update a dependency file 19 | name: dependencies 20 | - color: 5319e7 21 | description: This issue or pull request improves or adds to documentation 22 | name: documentation 23 | - color: cfd3d7 24 | description: This issue or pull request already exists or is covered in another issue or pull request 25 | name: duplicate 26 | - color: b005bc 27 | description: A high-level objective issue encompassing multiple issues instead of a specific unit of work 28 | name: epic 29 | - color: "000000" 30 | description: Pull requests that update GitHub Actions code 31 | name: github-actions 32 | - color: 0e8a16 33 | description: This issue or pull request is well-defined and good for newcomers 34 | name: good first issue 35 | - color: ff7518 36 | description: Pull request that should count toward Hacktoberfest participation 37 | name: hacktoberfest-accepted 38 | - color: a2eeef 39 | description: This issue or pull request will add or improve functionality, maintainability, or ease of use 40 | name: improvement 41 | - color: fef2c0 42 | description: This issue or pull request is not applicable, incorrect, or obsolete 43 | name: invalid 44 | - color: ce099a 45 | description: This pull request is ready to merge during the next Lineage Kraken release 46 | name: kraken 🐙 47 | - color: a4fc5d 48 | description: This issue or pull request requires further information 49 | name: need info 50 | - color: fcdb45 51 | description: This pull request is awaiting an action or decision to move forward 52 | name: on hold 53 | - color: 3772a4 54 | description: Pull requests that update Python code 55 | name: python 56 | - color: ef476c 57 | description: This issue is a request for information or needs discussion 58 | name: question 59 | - color: d73a4a 60 | description: This issue or pull request addresses a security issue 61 | name: security 62 | - color: 00008b 63 | description: This issue or pull request adds or otherwise modifies test code 64 | name: test 65 | - color: 1d76db 66 | description: This issue or pull request pulls in upstream updates 67 | name: upstream update 68 | - color: d4c5f9 69 | description: This issue or pull request increments the version number 70 | name: version bump 71 | - color: ffffff 72 | description: This issue will not be incorporated 73 | name: wontfix 74 | -------------------------------------------------------------------------------- /.github/lineage.yml: -------------------------------------------------------------------------------- 1 | --- 2 | lineage: 3 | skeleton: 4 | remote-url: https://github.com/cisagov/skeleton-python-library.git 5 | version: "1" 6 | -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: build 3 | 4 | on: # yamllint disable-line rule:truthy 5 | merge_group: 6 | types: 7 | - checks_requested 8 | pull_request: 9 | push: 10 | repository_dispatch: 11 | types: 12 | - apb 13 | 14 | # Set a default shell for any run steps. The `-Eueo pipefail` sets errtrace, 15 | # nounset, errexit, and pipefail. The `-x` will print all commands as they are 16 | # run. Please see the GitHub Actions documentation for more information: 17 | # https://docs.github.com/en/actions/using-jobs/setting-default-values-for-jobs 18 | defaults: 19 | run: 20 | shell: bash -Eueo pipefail -x {0} 21 | 22 | env: 23 | PIP_CACHE_DIR: ~/.cache/pip 24 | PRE_COMMIT_CACHE_DIR: ~/.cache/pre-commit 25 | RUN_TMATE: ${{ secrets.RUN_TMATE }} 26 | TERRAFORM_DOCS_REPO_BRANCH_NAME: improvement/support_atx_closed_markdown_headers 27 | TERRAFORM_DOCS_REPO_DEPTH: 1 28 | TERRAFORM_DOCS_REPO_URL: https://github.com/mcdonnnj/terraform-docs.git 29 | 30 | jobs: 31 | diagnostics: 32 | name: Run diagnostics 33 | # This job does not need any permissions 34 | permissions: {} 35 | runs-on: ubuntu-latest 36 | steps: 37 | # Note that a duplicate of this step must be added at the top of 38 | # each job. 39 | - name: Apply standard cisagov job preamble 40 | uses: cisagov/action-job-preamble@v1 41 | with: 42 | check_github_status: "true" 43 | # This functionality is poorly implemented and has been 44 | # causing problems due to the MITM implementation hogging or 45 | # leaking memory. As a result we disable it by default. If 46 | # you want to temporarily enable it, simply set 47 | # monitor_permissions equal to "true". 48 | # 49 | # TODO: Re-enable this functionality when practical. See 50 | # cisagov/skeleton-generic#207 for more details. 51 | monitor_permissions: "false" 52 | output_workflow_context: "true" 53 | # Use a variable to specify the permissions monitoring 54 | # configuration. By default this will yield the 55 | # configuration stored in the cisagov organization-level 56 | # variable, but if you want to use a different configuration 57 | # then simply: 58 | # 1. Create a repository-level variable with the name 59 | # ACTIONS_PERMISSIONS_CONFIG. 60 | # 2. Set this new variable's value to the configuration you 61 | # want to use for this repository. 62 | # 63 | # Note in particular that changing the permissions 64 | # monitoring configuration *does not* require you to modify 65 | # this workflow. 66 | permissions_monitoring_config: ${{ vars.ACTIONS_PERMISSIONS_CONFIG }} 67 | lint: 68 | needs: 69 | - diagnostics 70 | permissions: 71 | # actions/checkout needs this to fetch code 72 | contents: read 73 | runs-on: ubuntu-latest 74 | steps: 75 | - name: Apply standard cisagov job preamble 76 | uses: cisagov/action-job-preamble@v1 77 | with: 78 | # This functionality is poorly implemented and has been 79 | # causing problems due to the MITM implementation hogging or 80 | # leaking memory. As a result we disable it by default. If 81 | # you want to temporarily enable it, simply set 82 | # monitor_permissions equal to "true". 83 | # 84 | # TODO: Re-enable this functionality when practical. See 85 | # cisagov/skeleton-generic#207 for more details. 86 | monitor_permissions: "false" 87 | # Use a variable to specify the permissions monitoring 88 | # configuration. By default this will yield the 89 | # configuration stored in the cisagov organization-level 90 | # variable, but if you want to use a different configuration 91 | # then simply: 92 | # 1. Create a repository-level variable with the name 93 | # ACTIONS_PERMISSIONS_CONFIG. 94 | # 2. Set this new variable's value to the configuration you 95 | # want to use for this repository. 96 | # 97 | # Note in particular that changing the permissions 98 | # monitoring configuration *does not* require you to modify 99 | # this workflow. 100 | permissions_monitoring_config: ${{ vars.ACTIONS_PERMISSIONS_CONFIG }} 101 | - id: setup-env 102 | uses: cisagov/setup-env-github-action@develop 103 | - uses: actions/checkout@v4 104 | - id: setup-python 105 | uses: actions/setup-python@v5 106 | with: 107 | python-version: ${{ steps.setup-env.outputs.python-version }} 108 | # We need the Go version and Go cache location for the actions/cache step, 109 | # so the Go installation must happen before that. 110 | - id: setup-go 111 | uses: actions/setup-go@v5 112 | with: 113 | # There is no expectation for actual Go code so we disable caching as 114 | # it relies on the existence of a go.sum file. 115 | cache: false 116 | go-version: ${{ steps.setup-env.outputs.go-version }} 117 | - id: go-cache 118 | name: Lookup Go cache directory 119 | run: | 120 | echo "dir=$(go env GOCACHE)" >> $GITHUB_OUTPUT 121 | - uses: actions/cache@v4 122 | env: 123 | BASE_CACHE_KEY: ${{ github.job }}-${{ runner.os }}-\ 124 | py${{ steps.setup-python.outputs.python-version }}-\ 125 | go${{ steps.setup-go.outputs.go-version }}-\ 126 | packer${{ steps.setup-env.outputs.packer-version }}-\ 127 | tf${{ steps.setup-env.outputs.terraform-version }}- 128 | with: 129 | # We do not use '**/setup.py' in the cache key so only the 'setup.py' 130 | # file in the root of the repository is used. This is in case a Python 131 | # package were to have a 'setup.py' as part of its internal codebase. 132 | key: ${{ env.BASE_CACHE_KEY }}\ 133 | ${{ hashFiles('**/requirements-test.txt') }}-\ 134 | ${{ hashFiles('**/requirements.txt') }}-\ 135 | ${{ hashFiles('**/.pre-commit-config.yaml') }}-\ 136 | ${{ hashFiles('setup.py') }} 137 | # Note that the .terraform directory IS NOT included in the 138 | # cache because if we were caching, then we would need to use 139 | # the `-upgrade=true` option. This option blindly pulls down the 140 | # latest modules and providers instead of checking to see if an 141 | # update is required. That behavior defeats the benefits of caching. 142 | # so there is no point in doing it for the .terraform directory. 143 | path: | 144 | ${{ env.PIP_CACHE_DIR }} 145 | ${{ env.PRE_COMMIT_CACHE_DIR }} 146 | ${{ steps.go-cache.outputs.dir }} 147 | restore-keys: | 148 | ${{ env.BASE_CACHE_KEY }} 149 | - uses: hashicorp/setup-packer@v3 150 | with: 151 | version: ${{ steps.setup-env.outputs.packer-version }} 152 | - uses: hashicorp/setup-terraform@v3 153 | with: 154 | terraform_version: ${{ steps.setup-env.outputs.terraform-version }} 155 | - name: Install go-critic 156 | env: 157 | PACKAGE_URL: github.com/go-critic/go-critic/cmd/gocritic 158 | PACKAGE_VERSION: ${{ steps.setup-env.outputs.go-critic-version }} 159 | run: go install ${PACKAGE_URL}@${PACKAGE_VERSION} 160 | - name: Install goimports 161 | env: 162 | PACKAGE_URL: golang.org/x/tools/cmd/goimports 163 | PACKAGE_VERSION: ${{ steps.setup-env.outputs.goimports-version }} 164 | run: go install ${PACKAGE_URL}@${PACKAGE_VERSION} 165 | - name: Install gosec 166 | env: 167 | PACKAGE_URL: github.com/securego/gosec/v2/cmd/gosec 168 | PACKAGE_VERSION: ${{ steps.setup-env.outputs.gosec-version }} 169 | run: go install ${PACKAGE_URL}@${PACKAGE_VERSION} 170 | - name: Install staticcheck 171 | env: 172 | PACKAGE_URL: honnef.co/go/tools/cmd/staticcheck 173 | PACKAGE_VERSION: ${{ steps.setup-env.outputs.staticcheck-version }} 174 | run: go install ${PACKAGE_URL}@${PACKAGE_VERSION} 175 | # TODO: https://github.com/cisagov/skeleton-generic/issues/165 176 | # We are temporarily using @mcdonnnj's forked branch of terraform-docs 177 | # until his PR: https://github.com/terraform-docs/terraform-docs/pull/745 178 | # is approved. This temporary fix will allow for ATX header support when 179 | # terraform-docs is run during linting. 180 | - name: Clone ATX headers branch from terraform-docs fork 181 | run: | 182 | git clone \ 183 | --branch $TERRAFORM_DOCS_REPO_BRANCH_NAME \ 184 | --depth $TERRAFORM_DOCS_REPO_DEPTH \ 185 | --single-branch \ 186 | $TERRAFORM_DOCS_REPO_URL /tmp/terraform-docs 187 | - name: Build and install terraform-docs binary 188 | run: | 189 | go build \ 190 | -C /tmp/terraform-docs \ 191 | -o $(go env GOPATH)/bin/terraform-docs 192 | - name: Install dependencies 193 | run: | 194 | python -m pip install --upgrade pip setuptools wheel 195 | pip install --upgrade --requirement requirements-test.txt 196 | - name: Set up pre-commit hook environments 197 | run: pre-commit install-hooks 198 | - name: Run pre-commit on all files 199 | run: pre-commit run --all-files 200 | - name: Setup tmate debug session 201 | uses: mxschmitt/action-tmate@v3 202 | if: env.RUN_TMATE 203 | test: 204 | name: test source - py${{ matrix.python-version }} 205 | needs: 206 | - diagnostics 207 | permissions: 208 | # actions/checkout needs this to fetch code 209 | contents: read 210 | runs-on: ubuntu-latest 211 | strategy: 212 | fail-fast: false 213 | matrix: 214 | python-version: 215 | - "3.9" 216 | - "3.10" 217 | - "3.11" 218 | - "3.12" 219 | - "3.13" 220 | steps: 221 | - name: Apply standard cisagov job preamble 222 | uses: cisagov/action-job-preamble@v1 223 | with: 224 | # This functionality is poorly implemented and has been 225 | # causing problems due to the MITM implementation hogging or 226 | # leaking memory. As a result we disable it by default. If 227 | # you want to temporarily enable it, simply set 228 | # monitor_permissions equal to "true". 229 | # 230 | # TODO: Re-enable this functionality when practical. See 231 | # cisagov/skeleton-python-library#149 for more details. 232 | monitor_permissions: "false" 233 | # Use a variable to specify the permissions monitoring 234 | # configuration. By default this will yield the 235 | # configuration stored in the cisagov organization-level 236 | # variable, but if you want to use a different configuration 237 | # then simply: 238 | # 1. Create a repository-level variable with the name 239 | # ACTIONS_PERMISSIONS_CONFIG. 240 | # 2. Set this new variable's value to the configuration you 241 | # want to use for this repository. 242 | # 243 | # Note in particular that changing the permissions 244 | # monitoring configuration *does not* require you to modify 245 | # this workflow. 246 | permissions_monitoring_config: ${{ vars.ACTIONS_PERMISSIONS_CONFIG }} 247 | - uses: actions/checkout@v4 248 | - id: setup-python 249 | uses: actions/setup-python@v5 250 | with: 251 | python-version: ${{ matrix.python-version }} 252 | - uses: actions/cache@v4 253 | env: 254 | BASE_CACHE_KEY: ${{ github.job }}-${{ runner.os }}-\ 255 | py${{ steps.setup-python.outputs.python-version }}- 256 | with: 257 | path: ${{ env.PIP_CACHE_DIR }} 258 | # We do not use '**/setup.py' in the cache key so only the 'setup.py' 259 | # file in the root of the repository is used. This is in case a Python 260 | # package were to have a 'setup.py' as part of its internal codebase. 261 | key: ${{ env.BASE_CACHE_KEY }}\ 262 | ${{ hashFiles('**/requirements-test.txt') }}-\ 263 | ${{ hashFiles('**/requirements.txt') }}-\ 264 | ${{ hashFiles('setup.py') }} 265 | restore-keys: | 266 | ${{ env.BASE_CACHE_KEY }} 267 | - name: Install dependencies 268 | run: | 269 | python -m pip install --upgrade pip 270 | pip install --upgrade --requirement requirements-test.txt 271 | - name: Run tests 272 | env: 273 | RELEASE_TAG: ${{ github.event.release.tag_name }} 274 | run: pytest 275 | - name: Upload coverage report 276 | uses: coverallsapp/github-action@v2 277 | with: 278 | flag-name: py${{ matrix.python-version }} 279 | parallel: true 280 | if: success() 281 | - name: Setup tmate debug session 282 | uses: mxschmitt/action-tmate@v3 283 | if: env.RUN_TMATE 284 | coveralls-finish: 285 | permissions: 286 | # actions/checkout needs this to fetch code 287 | contents: read 288 | runs-on: ubuntu-latest 289 | needs: 290 | - diagnostics 291 | - test 292 | steps: 293 | - name: Apply standard cisagov job preamble 294 | uses: cisagov/action-job-preamble@v1 295 | with: 296 | # This functionality is poorly implemented and has been 297 | # causing problems due to the MITM implementation hogging or 298 | # leaking memory. As a result we disable it by default. If 299 | # you want to temporarily enable it, simply set 300 | # monitor_permissions equal to "true". 301 | # 302 | # TODO: Re-enable this functionality when practical. See 303 | # cisagov/skeleton-python-library#149 for more details. 304 | monitor_permissions: "false" 305 | # Use a variable to specify the permissions monitoring 306 | # configuration. By default this will yield the 307 | # configuration stored in the cisagov organization-level 308 | # variable, but if you want to use a different configuration 309 | # then simply: 310 | # 1. Create a repository-level variable with the name 311 | # ACTIONS_PERMISSIONS_CONFIG. 312 | # 2. Set this new variable's value to the configuration you 313 | # want to use for this repository. 314 | # 315 | # Note in particular that changing the permissions 316 | # monitoring configuration *does not* require you to modify 317 | # this workflow. 318 | permissions_monitoring_config: ${{ vars.ACTIONS_PERMISSIONS_CONFIG }} 319 | - uses: actions/checkout@v4 320 | - name: Finished coveralls reports 321 | uses: coverallsapp/github-action@v2 322 | with: 323 | parallel-finished: true 324 | - name: Setup tmate debug session 325 | uses: mxschmitt/action-tmate@v3 326 | if: env.RUN_TMATE 327 | build: 328 | name: build wheel - py${{ matrix.python-version }} 329 | needs: 330 | - diagnostics 331 | - lint 332 | - test 333 | permissions: 334 | # actions/checkout needs this to fetch code 335 | contents: read 336 | runs-on: ubuntu-latest 337 | strategy: 338 | fail-fast: false 339 | matrix: 340 | python-version: 341 | - "3.9" 342 | - "3.10" 343 | - "3.11" 344 | - "3.12" 345 | - "3.13" 346 | steps: 347 | - name: Apply standard cisagov job preamble 348 | uses: cisagov/action-job-preamble@v1 349 | with: 350 | # This functionality is poorly implemented and has been 351 | # causing problems due to the MITM implementation hogging or 352 | # leaking memory. As a result we disable it by default. If 353 | # you want to temporarily enable it, simply set 354 | # monitor_permissions equal to "true". 355 | # 356 | # TODO: Re-enable this functionality when practical. See 357 | # cisagov/skeleton-python-library#149 for more details. 358 | monitor_permissions: "false" 359 | # Use a variable to specify the permissions monitoring 360 | # configuration. By default this will yield the 361 | # configuration stored in the cisagov organization-level 362 | # variable, but if you want to use a different configuration 363 | # then simply: 364 | # 1. Create a repository-level variable with the name 365 | # ACTIONS_PERMISSIONS_CONFIG. 366 | # 2. Set this new variable's value to the configuration you 367 | # want to use for this repository. 368 | # 369 | # Note in particular that changing the permissions 370 | # monitoring configuration *does not* require you to modify 371 | # this workflow. 372 | permissions_monitoring_config: ${{ vars.ACTIONS_PERMISSIONS_CONFIG }} 373 | - uses: actions/checkout@v4 374 | - id: setup-python 375 | uses: actions/setup-python@v5 376 | with: 377 | python-version: ${{ matrix.python-version }} 378 | - uses: actions/cache@v4 379 | env: 380 | BASE_CACHE_KEY: ${{ github.job }}-${{ runner.os }}-\ 381 | py${{ steps.setup-python.outputs.python-version }}- 382 | with: 383 | path: ${{ env.PIP_CACHE_DIR }} 384 | # We do not use '**/setup.py' in the cache key so only the 'setup.py' 385 | # file in the root of the repository is used. This is in case a Python 386 | # package were to have a 'setup.py' as part of its internal codebase. 387 | key: ${{ env.BASE_CACHE_KEY }}\ 388 | ${{ hashFiles('**/requirements.txt') }}-\ 389 | ${{ hashFiles('setup.py') }} 390 | restore-keys: | 391 | ${{ env.BASE_CACHE_KEY }} 392 | - name: Install build dependencies 393 | run: | 394 | python -m pip install --upgrade pip setuptools wheel 395 | python -m pip install --upgrade build 396 | - name: Build artifacts 397 | run: python -m build 398 | - name: Upload artifacts 399 | uses: actions/upload-artifact@v4 400 | with: 401 | name: dist-${{ matrix.python-version }} 402 | path: dist 403 | - name: Setup tmate debug session 404 | uses: mxschmitt/action-tmate@v3 405 | if: env.RUN_TMATE 406 | test-build: 407 | name: test built wheel - py${{ matrix.python-version }} 408 | needs: 409 | - diagnostics 410 | - build 411 | permissions: 412 | # actions/checkout needs this to fetch code 413 | contents: read 414 | runs-on: ubuntu-latest 415 | strategy: 416 | fail-fast: false 417 | matrix: 418 | python-version: 419 | - "3.9" 420 | - "3.10" 421 | - "3.11" 422 | - "3.12" 423 | - "3.13" 424 | steps: 425 | - name: Apply standard cisagov job preamble 426 | uses: cisagov/action-job-preamble@v1 427 | with: 428 | # This functionality is poorly implemented and has been 429 | # causing problems due to the MITM implementation hogging or 430 | # leaking memory. As a result we disable it by default. If 431 | # you want to temporarily enable it, simply set 432 | # monitor_permissions equal to "true". 433 | # 434 | # TODO: Re-enable this functionality when practical. See 435 | # cisagov/skeleton-python-library#149 for more details. 436 | monitor_permissions: "false" 437 | # Use a variable to specify the permissions monitoring 438 | # configuration. By default this will yield the 439 | # configuration stored in the cisagov organization-level 440 | # variable, but if you want to use a different configuration 441 | # then simply: 442 | # 1. Create a repository-level variable with the name 443 | # ACTIONS_PERMISSIONS_CONFIG. 444 | # 2. Set this new variable's value to the configuration you 445 | # want to use for this repository. 446 | # 447 | # Note in particular that changing the permissions 448 | # monitoring configuration *does not* require you to modify 449 | # this workflow. 450 | permissions_monitoring_config: ${{ vars.ACTIONS_PERMISSIONS_CONFIG }} 451 | - uses: actions/checkout@v4 452 | - id: setup-python 453 | uses: actions/setup-python@v5 454 | with: 455 | python-version: ${{ matrix.python-version }} 456 | - uses: actions/cache@v4 457 | env: 458 | BASE_CACHE_KEY: ${{ github.job }}-${{ runner.os }}-\ 459 | py${{ steps.setup-python.outputs.python-version }}- 460 | with: 461 | path: ${{ env.PIP_CACHE_DIR }} 462 | # We do not use '**/setup.py' in the cache key so only the 'setup.py' 463 | # file in the root of the repository is used. This is in case a Python 464 | # package were to have a 'setup.py' as part of its internal codebase. 465 | key: ${{ env.BASE_CACHE_KEY }}\ 466 | ${{ hashFiles('**/requirements.txt') }}-\ 467 | ${{ hashFiles('setup.py') }} 468 | restore-keys: | 469 | ${{ env.BASE_CACHE_KEY }} 470 | - name: Retrieve the built wheel 471 | uses: actions/download-artifact@v4 472 | with: 473 | name: dist-${{ matrix.python-version }} 474 | path: dist 475 | - id: find-wheel 476 | name: Get the name of the retrieved wheel (there should only be one) 477 | run: echo "wheel=$(ls dist/*whl)" >> $GITHUB_OUTPUT 478 | - name: Update core Python packages 479 | run: python -m pip install --upgrade pip setuptools wheel 480 | - name: Install the built wheel (along with testing dependencies) 481 | run: python -m pip install ${{ steps.find-wheel.outputs.wheel }}[test] 482 | - name: Run tests 483 | env: 484 | RELEASE_TAG: ${{ github.event.release.tag_name }} 485 | run: pytest 486 | - name: Setup tmate debug session 487 | uses: mxschmitt/action-tmate@v3 488 | if: env.RUN_TMATE 489 | -------------------------------------------------------------------------------- /.github/workflows/codeql-analysis.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # For most projects, this workflow file will not need changing; you simply need 3 | # to commit it to your repository. 4 | # 5 | # You may wish to alter this file to override the set of languages analyzed, 6 | # or to provide custom queries or build logic. 7 | name: CodeQL 8 | 9 | # The use of on here as a key is part of the GitHub actions syntax. 10 | # yamllint disable-line rule:truthy 11 | on: 12 | merge_group: 13 | types: 14 | - checks_requested 15 | pull_request: 16 | # The branches here must be a subset of the ones in the push key 17 | branches: 18 | - develop 19 | push: 20 | # Dependabot-triggered push events have read-only access, but uploading code 21 | # scanning requires write access. 22 | branches-ignore: 23 | - dependabot/** 24 | schedule: 25 | - cron: 0 14 * * 6 26 | 27 | jobs: 28 | diagnostics: 29 | name: Run diagnostics 30 | # This job does not need any permissions 31 | permissions: {} 32 | runs-on: ubuntu-latest 33 | steps: 34 | # Note that a duplicate of this step must be added at the top of 35 | # each job. 36 | - name: Apply standard cisagov job preamble 37 | uses: cisagov/action-job-preamble@v1 38 | with: 39 | check_github_status: "true" 40 | # This functionality is poorly implemented and has been 41 | # causing problems due to the MITM implementation hogging or 42 | # leaking memory. As a result we disable it by default. If 43 | # you want to temporarily enable it, simply set 44 | # monitor_permissions equal to "true". 45 | # 46 | # TODO: Re-enable this functionality when practical. See 47 | # cisagov/skeleton-generic#207 for more details. 48 | monitor_permissions: "false" 49 | output_workflow_context: "true" 50 | # Use a variable to specify the permissions monitoring 51 | # configuration. By default this will yield the 52 | # configuration stored in the cisagov organization-level 53 | # variable, but if you want to use a different configuration 54 | # then simply: 55 | # 1. Create a repository-level variable with the name 56 | # ACTIONS_PERMISSIONS_CONFIG. 57 | # 2. Set this new variable's value to the configuration you 58 | # want to use for this repository. 59 | # 60 | # Note in particular that changing the permissions 61 | # monitoring configuration *does not* require you to modify 62 | # this workflow. 63 | permissions_monitoring_config: ${{ vars.ACTIONS_PERMISSIONS_CONFIG }} 64 | analyze: 65 | name: Analyze 66 | needs: 67 | - diagnostics 68 | permissions: 69 | # actions/checkout needs this to fetch code 70 | contents: read 71 | # required for all workflows 72 | security-events: write 73 | runs-on: ubuntu-latest 74 | strategy: 75 | fail-fast: false 76 | matrix: 77 | # Override automatic language detection by changing the below 78 | # list 79 | # 80 | # Supported options are actions, c-cpp, csharp, go, 81 | # java-kotlin, javascript-typescript, python, ruby, and swift. 82 | language: 83 | - actions 84 | - python 85 | # Learn more... 86 | # https://docs.github.com/en/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#overriding-automatic-language-detection 87 | 88 | steps: 89 | - name: Apply standard cisagov job preamble 90 | uses: cisagov/action-job-preamble@v1 91 | with: 92 | # This functionality is poorly implemented and has been 93 | # causing problems due to the MITM implementation hogging or 94 | # leaking memory. As a result we disable it by default. If 95 | # you want to temporarily enable it, simply set 96 | # monitor_permissions equal to "true". 97 | # 98 | # TODO: Re-enable this functionality when practical. See 99 | # cisagov/skeleton-generic#207 for more details. 100 | monitor_permissions: "false" 101 | # Use a variable to specify the permissions monitoring 102 | # configuration. By default this will yield the 103 | # configuration stored in the cisagov organization-level 104 | # variable, but if you want to use a different configuration 105 | # then simply: 106 | # 1. Create a repository-level variable with the name 107 | # ACTIONS_PERMISSIONS_CONFIG. 108 | # 2. Set this new variable's value to the configuration you 109 | # want to use for this repository. 110 | # 111 | # Note in particular that changing the permissions 112 | # monitoring configuration *does not* require you to modify 113 | # this workflow. 114 | permissions_monitoring_config: ${{ vars.ACTIONS_PERMISSIONS_CONFIG }} 115 | 116 | - name: Checkout repository 117 | uses: actions/checkout@v4 118 | 119 | # Initializes the CodeQL tools for scanning. 120 | - name: Initialize CodeQL 121 | uses: github/codeql-action/init@v3 122 | with: 123 | languages: ${{ matrix.language }} 124 | 125 | # Autobuild attempts to build any compiled languages (C/C++, C#, or 126 | # Java). If this step fails, then you should remove it and run the build 127 | # manually (see below). 128 | - name: Autobuild 129 | uses: github/codeql-action/autobuild@v3 130 | 131 | # ℹ️ Command-line programs to run using the OS shell. 132 | # 📚 https://git.io/JvXDl 133 | 134 | # ✏️ If the Autobuild fails above, remove it and uncomment the following 135 | # three lines and modify them (or add more) to build your code if your 136 | # project uses a compiled language 137 | 138 | # - run: | 139 | # make bootstrap 140 | # make release 141 | 142 | - name: Perform CodeQL Analysis 143 | uses: github/codeql-action/analyze@v3 144 | -------------------------------------------------------------------------------- /.github/workflows/dependency-review.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Dependency review 3 | 4 | on: # yamllint disable-line rule:truthy 5 | merge_group: 6 | types: 7 | - checks_requested 8 | pull_request: 9 | 10 | # Set a default shell for any run steps. The `-Eueo pipefail` sets errtrace, 11 | # nounset, errexit, and pipefail. The `-x` will print all commands as they are 12 | # run. Please see the GitHub Actions documentation for more information: 13 | # https://docs.github.com/en/actions/using-jobs/setting-default-values-for-jobs 14 | defaults: 15 | run: 16 | shell: bash -Eueo pipefail -x {0} 17 | 18 | jobs: 19 | diagnostics: 20 | name: Run diagnostics 21 | # This job does not need any permissions 22 | permissions: {} 23 | runs-on: ubuntu-latest 24 | steps: 25 | # Note that a duplicate of this step must be added at the top of 26 | # each job. 27 | - name: Apply standard cisagov job preamble 28 | uses: cisagov/action-job-preamble@v1 29 | with: 30 | check_github_status: "true" 31 | # This functionality is poorly implemented and has been 32 | # causing problems due to the MITM implementation hogging or 33 | # leaking memory. As a result we disable it by default. If 34 | # you want to temporarily enable it, simply set 35 | # monitor_permissions equal to "true". 36 | # 37 | # TODO: Re-enable this functionality when practical. See 38 | # cisagov/skeleton-generic#207 for more details. 39 | monitor_permissions: "false" 40 | output_workflow_context: "true" 41 | # Use a variable to specify the permissions monitoring 42 | # configuration. By default this will yield the 43 | # configuration stored in the cisagov organization-level 44 | # variable, but if you want to use a different configuration 45 | # then simply: 46 | # 1. Create a repository-level variable with the name 47 | # ACTIONS_PERMISSIONS_CONFIG. 48 | # 2. Set this new variable's value to the configuration you 49 | # want to use for this repository. 50 | # 51 | # Note in particular that changing the permissions 52 | # monitoring configuration *does not* require you to modify 53 | # this workflow. 54 | permissions_monitoring_config: ${{ vars.ACTIONS_PERMISSIONS_CONFIG }} 55 | dependency-review: 56 | name: Dependency review 57 | needs: 58 | - diagnostics 59 | permissions: 60 | # actions/checkout needs this to fetch code 61 | contents: read 62 | runs-on: ubuntu-latest 63 | steps: 64 | - name: Apply standard cisagov job preamble 65 | uses: cisagov/action-job-preamble@v1 66 | with: 67 | # This functionality is poorly implemented and has been 68 | # causing problems due to the MITM implementation hogging or 69 | # leaking memory. As a result we disable it by default. If 70 | # you want to temporarily enable it, simply set 71 | # monitor_permissions equal to "true". 72 | # 73 | # TODO: Re-enable this functionality when practical. See 74 | # cisagov/skeleton-generic#207 for more details. 75 | monitor_permissions: "false" 76 | # Use a variable to specify the permissions monitoring 77 | # configuration. By default this will yield the 78 | # configuration stored in the cisagov organization-level 79 | # variable, but if you want to use a different configuration 80 | # then simply: 81 | # 1. Create a repository-level variable with the name 82 | # ACTIONS_PERMISSIONS_CONFIG. 83 | # 2. Set this new variable's value to the configuration you 84 | # want to use for this repository. 85 | # 86 | # Note in particular that changing the permissions 87 | # monitoring configuration *does not* require you to modify 88 | # this workflow. 89 | permissions_monitoring_config: ${{ vars.ACTIONS_PERMISSIONS_CONFIG }} 90 | - id: checkout-repo 91 | name: Checkout the repository 92 | uses: actions/checkout@v4 93 | - id: dependency-review 94 | name: Review dependency changes for vulnerabilities and license changes 95 | uses: actions/dependency-review-action@v4 96 | -------------------------------------------------------------------------------- /.github/workflows/sync-labels.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: sync-labels 3 | 4 | on: # yamllint disable-line rule:truthy 5 | push: 6 | paths: 7 | - .github/labels.yml 8 | - .github/workflows/sync-labels.yml 9 | workflow_dispatch: 10 | 11 | permissions: 12 | contents: read 13 | 14 | jobs: 15 | diagnostics: 16 | name: Run diagnostics 17 | # This job does not need any permissions 18 | permissions: {} 19 | runs-on: ubuntu-latest 20 | steps: 21 | # Note that a duplicate of this step must be added at the top of 22 | # each job. 23 | - name: Apply standard cisagov job preamble 24 | uses: cisagov/action-job-preamble@v1 25 | with: 26 | check_github_status: "true" 27 | # This functionality is poorly implemented and has been 28 | # causing problems due to the MITM implementation hogging or 29 | # leaking memory. As a result we disable it by default. If 30 | # you want to temporarily enable it, simply set 31 | # monitor_permissions equal to "true". 32 | # 33 | # TODO: Re-enable this functionality when practical. See 34 | # cisagov/skeleton-generic#207 for more details. 35 | monitor_permissions: "false" 36 | output_workflow_context: "true" 37 | # Use a variable to specify the permissions monitoring 38 | # configuration. By default this will yield the 39 | # configuration stored in the cisagov organization-level 40 | # variable, but if you want to use a different configuration 41 | # then simply: 42 | # 1. Create a repository-level variable with the name 43 | # ACTIONS_PERMISSIONS_CONFIG. 44 | # 2. Set this new variable's value to the configuration you 45 | # want to use for this repository. 46 | # 47 | # Note in particular that changing the permissions 48 | # monitoring configuration *does not* require you to modify 49 | # this workflow. 50 | permissions_monitoring_config: ${{ vars.ACTIONS_PERMISSIONS_CONFIG }} 51 | labeler: 52 | needs: 53 | - diagnostics 54 | permissions: 55 | # actions/checkout needs this to fetch code 56 | contents: read 57 | # crazy-max/ghaction-github-labeler needs this to manage repository labels 58 | issues: write 59 | runs-on: ubuntu-latest 60 | steps: 61 | - name: Apply standard cisagov job preamble 62 | uses: cisagov/action-job-preamble@v1 63 | with: 64 | # This functionality is poorly implemented and has been 65 | # causing problems due to the MITM implementation hogging or 66 | # leaking memory. As a result we disable it by default. If 67 | # you want to temporarily enable it, simply set 68 | # monitor_permissions equal to "true". 69 | # 70 | # TODO: Re-enable this functionality when practical. See 71 | # cisagov/skeleton-generic#207 for more details. 72 | monitor_permissions: "false" 73 | # Use a variable to specify the permissions monitoring 74 | # configuration. By default this will yield the 75 | # configuration stored in the cisagov organization-level 76 | # variable, but if you want to use a different configuration 77 | # then simply: 78 | # 1. Create a repository-level variable with the name 79 | # ACTIONS_PERMISSIONS_CONFIG. 80 | # 2. Set this new variable's value to the configuration you 81 | # want to use for this repository. 82 | # 83 | # Note in particular that changing the permissions 84 | # monitoring configuration *does not* require you to modify 85 | # this workflow. 86 | permissions_monitoring_config: ${{ vars.ACTIONS_PERMISSIONS_CONFIG }} 87 | - uses: actions/checkout@v4 88 | - name: Sync repository labels 89 | if: success() 90 | uses: crazy-max/ghaction-github-labeler@v5 91 | with: 92 | # This is a hideous ternary equivalent so we only do a dry run unless 93 | # this workflow is triggered by the develop branch. 94 | dry-run: ${{ github.ref_name == 'develop' && 'false' || 'true' }} 95 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # This file specifies intentionally untracked files that Git should ignore. 2 | # Files already tracked by Git are not affected. 3 | # See: https://git-scm.com/docs/gitignore 4 | 5 | ## Python ## 6 | __pycache__ 7 | .coverage 8 | .mypy_cache 9 | .pytest_cache 10 | .python-version 11 | *.egg-info 12 | dist 13 | -------------------------------------------------------------------------------- /.isort.cfg: -------------------------------------------------------------------------------- 1 | [settings] 2 | combine_star=true 3 | force_sort_within_sections=true 4 | 5 | import_heading_stdlib=Standard Python Libraries 6 | import_heading_thirdparty=Third-Party Libraries 7 | import_heading_firstparty=cisagov Libraries 8 | 9 | # Run isort under the black profile to align with our other Python linting 10 | profile=black 11 | -------------------------------------------------------------------------------- /.mdl_config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # Default state for all rules 4 | default: true 5 | 6 | # MD003/heading-style/header-style - Heading style 7 | MD003: 8 | # Enforce the ATX-closed style of header 9 | style: atx_closed 10 | 11 | # MD004/ul-style - Unordered list style 12 | MD004: 13 | # Enforce dashes for unordered lists 14 | style: dash 15 | 16 | # MD013/line-length - Line length 17 | MD013: 18 | # Do not enforce for code blocks 19 | code_blocks: false 20 | # Do not enforce for tables 21 | tables: false 22 | 23 | # MD024/no-duplicate-heading/no-duplicate-header - Multiple headings with the 24 | # same content 25 | MD024: 26 | # Allow headers with the same content as long as they are not in the same 27 | # parent heading 28 | allow_different_nesting: true 29 | 30 | # MD029/ol-prefix - Ordered list item prefix 31 | MD029: 32 | # Enforce the `1.` style for ordered lists 33 | style: one 34 | 35 | # MD033/no-inline-html - Inline HTML 36 | MD033: 37 | # The h1 and img elements are allowed to permit header images 38 | allowed_elements: 39 | - h1 40 | - img 41 | 42 | # MD035/hr-style - Horizontal rule style 43 | MD035: 44 | # Enforce dashes for horizontal rules 45 | style: --- 46 | 47 | # MD046/code-block-style - Code block style 48 | MD046: 49 | # Enforce the fenced style for code blocks 50 | style: fenced 51 | 52 | # MD049/emphasis-style - Emphasis style should be consistent 53 | MD049: 54 | # Enforce asterisks as the style to use for emphasis 55 | style: asterisk 56 | 57 | # MD050/strong-style - Strong style should be consistent 58 | MD050: 59 | # Enforce asterisks as the style to use for strong 60 | style: asterisk 61 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | default_language_version: 3 | # force all unspecified python hooks to run python3 4 | python: python3 5 | 6 | repos: 7 | # Check the pre-commit configuration 8 | - repo: meta 9 | hooks: 10 | - id: check-useless-excludes 11 | 12 | - repo: https://github.com/pre-commit/pre-commit-hooks 13 | rev: v5.0.0 14 | hooks: 15 | - id: check-case-conflict 16 | - id: check-executables-have-shebangs 17 | - id: check-json 18 | - id: check-merge-conflict 19 | - id: check-shebang-scripts-are-executable 20 | - id: check-symlinks 21 | - id: check-toml 22 | - id: check-vcs-permalinks 23 | - id: check-xml 24 | - id: debug-statements 25 | - id: destroyed-symlinks 26 | - id: detect-aws-credentials 27 | args: 28 | - --allow-missing-credentials 29 | - id: detect-private-key 30 | - id: end-of-file-fixer 31 | - id: mixed-line-ending 32 | args: 33 | - --fix=lf 34 | - id: pretty-format-json 35 | args: 36 | - --autofix 37 | - id: requirements-txt-fixer 38 | - id: trailing-whitespace 39 | 40 | # Text file hooks 41 | - repo: https://github.com/igorshubovych/markdownlint-cli 42 | rev: v0.44.0 43 | hooks: 44 | - id: markdownlint 45 | args: 46 | - --config=.mdl_config.yaml 47 | - repo: https://github.com/rbubley/mirrors-prettier 48 | rev: v3.5.3 49 | hooks: 50 | - id: prettier 51 | - repo: https://github.com/adrienverge/yamllint 52 | rev: v1.37.0 53 | hooks: 54 | - id: yamllint 55 | args: 56 | - --strict 57 | 58 | # GitHub Actions hooks 59 | - repo: https://github.com/python-jsonschema/check-jsonschema 60 | rev: 0.32.1 61 | hooks: 62 | - id: check-github-actions 63 | - id: check-github-workflows 64 | 65 | # pre-commit hooks 66 | - repo: https://github.com/pre-commit/pre-commit 67 | rev: v4.2.0 68 | hooks: 69 | - id: validate_manifest 70 | 71 | # Go hooks 72 | - repo: https://github.com/TekWizely/pre-commit-golang 73 | rev: v1.0.0-rc.1 74 | hooks: 75 | # Go Build 76 | - id: go-build-repo-mod 77 | # Style Checkers 78 | - id: go-critic 79 | # goimports 80 | - id: go-imports-repo 81 | args: 82 | # Write changes to files 83 | - -w 84 | # Go Mod Tidy 85 | - id: go-mod-tidy-repo 86 | # GoSec 87 | - id: go-sec-repo-mod 88 | # StaticCheck 89 | - id: go-staticcheck-repo-mod 90 | # Go Test 91 | - id: go-test-repo-mod 92 | # Go Vet 93 | - id: go-vet-repo-mod 94 | # Nix hooks 95 | - repo: https://github.com/nix-community/nixpkgs-fmt 96 | rev: v1.3.0 97 | hooks: 98 | - id: nixpkgs-fmt 99 | 100 | # Shell script hooks 101 | - repo: https://github.com/scop/pre-commit-shfmt 102 | rev: v3.11.0-1 103 | hooks: 104 | - id: shfmt 105 | args: 106 | # List files that will be formatted 107 | - --list 108 | # Write result to file instead of stdout 109 | - --write 110 | # Indent by two spaces 111 | - --indent 112 | - "2" 113 | # Binary operators may start a line 114 | - --binary-next-line 115 | # Switch cases are indented 116 | - --case-indent 117 | # Redirect operators are followed by a space 118 | - --space-redirects 119 | - repo: https://github.com/shellcheck-py/shellcheck-py 120 | rev: v0.10.0.1 121 | hooks: 122 | - id: shellcheck 123 | 124 | # Python hooks 125 | # Run bandit on the "tests" tree with a configuration 126 | - repo: https://github.com/PyCQA/bandit 127 | rev: 1.8.3 128 | hooks: 129 | - id: bandit 130 | name: bandit (tests tree) 131 | files: tests 132 | args: 133 | - --config=.bandit.yml 134 | # Run bandit on everything except the "tests" tree 135 | - repo: https://github.com/PyCQA/bandit 136 | rev: 1.8.3 137 | hooks: 138 | - id: bandit 139 | name: bandit (everything else) 140 | exclude: tests 141 | - repo: https://github.com/psf/black-pre-commit-mirror 142 | rev: 25.1.0 143 | hooks: 144 | - id: black 145 | - repo: https://github.com/PyCQA/flake8 146 | rev: 7.1.2 147 | hooks: 148 | - id: flake8 149 | additional_dependencies: 150 | - flake8-docstrings==1.7.0 151 | - repo: https://github.com/PyCQA/isort 152 | rev: 6.0.1 153 | hooks: 154 | - id: isort 155 | - repo: https://github.com/pre-commit/mirrors-mypy 156 | rev: v1.15.0 157 | hooks: 158 | - id: mypy 159 | # IMPORTANT: Keep type hinting-related dependencies of the 160 | # mypy pre-commit hook additional_dependencies in sync with 161 | # the dev section of setup.py to avoid discrepancies in type 162 | # checking between environments. 163 | additional_dependencies: 164 | - types-docopt 165 | - types-setuptools 166 | - repo: https://github.com/pypa/pip-audit 167 | rev: v2.8.0 168 | hooks: 169 | - id: pip-audit 170 | args: 171 | # Add any pip requirements files to scan 172 | - --requirement 173 | - requirements-dev.txt 174 | - --requirement 175 | - requirements-test.txt 176 | - --requirement 177 | - requirements.txt 178 | - repo: https://github.com/asottile/pyupgrade 179 | rev: v3.19.1 180 | hooks: 181 | - id: pyupgrade 182 | 183 | # Ansible hooks 184 | - repo: https://github.com/ansible/ansible-lint 185 | rev: v25.1.3 186 | hooks: 187 | - id: ansible-lint 188 | additional_dependencies: 189 | # On its own ansible-lint does not pull in ansible, only 190 | # ansible-core. Therefore, if an Ansible module lives in 191 | # ansible instead of ansible-core, the linter will complain 192 | # that the module is unknown. In these cases it is 193 | # necessary to add the ansible package itself as an 194 | # additional dependency, with the same pinning as is done in 195 | # requirements-test.txt of cisagov/skeleton-ansible-role. 196 | # 197 | # Version 10 is required because the pip-audit pre-commit 198 | # hook identifies a vulnerability in ansible-core 2.16.13, 199 | # but all versions of ansible 9 have a dependency on 200 | # ~=2.16.X. 201 | # 202 | # It is also a good idea to go ahead and upgrade to version 203 | # 10 since version 9 is going EOL at the end of November: 204 | # https://endoflife.date/ansible 205 | # - ansible>=10,<11 206 | # ansible-core 2.16.3 through 2.16.6 suffer from the bug 207 | # discussed in ansible/ansible#82702, which breaks any 208 | # symlinked files in vars, tasks, etc. for any Ansible role 209 | # installed via ansible-galaxy. Hence we never want to 210 | # install those versions. 211 | # 212 | # Note that the pip-audit pre-commit hook identifies a 213 | # vulnerability in ansible-core 2.16.13. The pin of 214 | # ansible-core to >=2.17 effectively also pins ansible to 215 | # >=10. 216 | # 217 | # It is also a good idea to go ahead and upgrade to 218 | # ansible-core 2.17 since security support for ansible-core 219 | # 2.16 ends this month: 220 | # https://docs.ansible.com/ansible/devel/reference_appendices/release_and_maintenance.html#ansible-core-support-matrix 221 | # 222 | # Note that any changes made to this dependency must also be 223 | # made in requirements.txt in cisagov/skeleton-packer and 224 | # requirements-test.txt in cisagov/skeleton-ansible-role. 225 | - ansible-core>=2.17 226 | 227 | # Terraform hooks 228 | - repo: https://github.com/antonbabenko/pre-commit-terraform 229 | rev: v1.98.0 230 | hooks: 231 | - id: terraform_fmt 232 | - id: terraform_validate 233 | 234 | # Docker hooks 235 | - repo: https://github.com/IamTheFij/docker-pre-commit 236 | rev: v3.0.1 237 | hooks: 238 | - id: docker-compose-check 239 | 240 | # Packer hooks 241 | - repo: https://github.com/cisagov/pre-commit-packer 242 | rev: v0.3.0 243 | hooks: 244 | - id: packer_fmt 245 | - id: packer_validate 246 | -------------------------------------------------------------------------------- /.prettierignore: -------------------------------------------------------------------------------- 1 | # Already being linted by pretty-format-json 2 | *.json 3 | # Already being linted by mdl 4 | *.md 5 | # Already being linted by yamllint 6 | *.yaml 7 | *.yml 8 | -------------------------------------------------------------------------------- /.yamllint: -------------------------------------------------------------------------------- 1 | --- 2 | extends: default 3 | 4 | rules: 5 | braces: 6 | # Do not allow non-empty flow mappings 7 | forbid: non-empty 8 | # Allow up to one space inside braces. This is required for Ansible compatibility. 9 | max-spaces-inside: 1 10 | 11 | brackets: 12 | # Do not allow non-empty flow sequences 13 | forbid: non-empty 14 | 15 | comments: 16 | # Ensure that inline comments have at least one space before the preceding content. 17 | # This is required for Ansible compatibility. 18 | min-spaces-from-content: 1 19 | 20 | # yamllint does not like it when you comment out different parts of 21 | # dictionaries in a list. You can see 22 | # https://github.com/adrienverge/yamllint/issues/384 for some examples of 23 | # this behavior. 24 | comments-indentation: disable 25 | 26 | indentation: 27 | # Ensure that block sequences inside of a mapping are indented 28 | indent-sequences: true 29 | # Enforce a specific number of spaces 30 | spaces: 2 31 | 32 | # yamllint does not allow inline mappings that exceed the line length by 33 | # default. There are many scenarios where the inline mapping may be a key, 34 | # hash, or other long value that would exceed the line length but cannot 35 | # reasonably be broken across lines. 36 | line-length: 37 | # This rule implies the allow-non-breakable-words rule 38 | allow-non-breakable-inline-mappings: true 39 | # Allows a 10% overage from the default limit of 80 40 | max: 88 41 | 42 | # Using anything other than strings to express octal values can lead to unexpected 43 | # and potentially unsafe behavior. Ansible strongly recommends against such practices 44 | # and these rules are needed for Ansible compatibility. Please see the following for 45 | # more information: 46 | # https://ansible.readthedocs.io/projects/lint/rules/risky-octal/ 47 | octal-values: 48 | # Do not allow explicit octal values (those beginning with a leading 0o). 49 | forbid-explicit-octal: true 50 | # Do not allow implicit octal values (those beginning with a leading 0). 51 | forbid-implicit-octal: true 52 | 53 | quoted-strings: 54 | # Allow disallowed quotes (single quotes) for strings that contain allowed quotes 55 | # (double quotes). 56 | allow-quoted-quotes: true 57 | # Apply these rules to keys in mappings as well 58 | check-keys: true 59 | # We prefer double quotes for strings when they are needed 60 | quote-type: double 61 | # Only require quotes when they are necessary for proper processing 62 | required: only-when-needed 63 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Welcome # 2 | 3 | We're so glad you're thinking about contributing to this open source 4 | project! If you're unsure or afraid of anything, just ask or submit 5 | the issue or pull request anyway. The worst that can happen is that 6 | you'll be politely asked to change something. We appreciate any sort 7 | of contribution, and don't want a wall of rules to get in the way of 8 | that. 9 | 10 | Before contributing, we encourage you to read our CONTRIBUTING policy 11 | (you are here), our [LICENSE](LICENSE), and our [README](README.md), 12 | all of which should be in this repository. 13 | 14 | ## Issues ## 15 | 16 | If you want to report a bug or request a new feature, the most direct 17 | method is to [create an 18 | issue](https://github.com/cisagov/aws-profile-sync/issues) in 19 | this repository. We recommend that you first search through existing 20 | issues (both open and closed) to check if your particular issue has 21 | already been reported. If it has then you might want to add a comment 22 | to the existing issue. If it hasn't then feel free to create a new 23 | one. 24 | 25 | ## Pull requests ## 26 | 27 | If you choose to [submit a pull 28 | request](https://github.com/cisagov/aws-profile-sync/pulls), 29 | you will notice that our continuous integration (CI) system runs a 30 | fairly extensive set of linters, syntax checkers, system, and unit tests. 31 | Your pull request may fail these checks, and that's OK. If you want 32 | you can stop there and wait for us to make the necessary corrections 33 | to ensure your code passes the CI checks. 34 | 35 | If you want to make the changes yourself, or if you want to become a 36 | regular contributor, then you will want to set up 37 | [pre-commit](https://pre-commit.com/) on your local machine. Once you 38 | do that, the CI checks will run locally before you even write your 39 | commit message. This speeds up your development cycle considerably. 40 | 41 | ### Setting up pre-commit ### 42 | 43 | There are a few ways to do this, but we prefer to use 44 | [`pyenv`](https://github.com/pyenv/pyenv) and 45 | [`pyenv-virtualenv`](https://github.com/pyenv/pyenv-virtualenv) to 46 | create and manage a Python virtual environment specific to this 47 | project. 48 | 49 | We recommend using the `setup-env` script located in this repository, 50 | as it automates the entire environment configuration process. The 51 | dependencies required to run this script are 52 | [GNU `getopt`](https://github.com/util-linux/util-linux/blob/master/misc-utils/getopt.1.adoc), 53 | [`pyenv`](https://github.com/pyenv/pyenv), and [`pyenv-virtualenv`](https://github.com/pyenv/pyenv-virtualenv). 54 | If these tools are already configured on your system, you can simply run the 55 | following command: 56 | 57 | ```console 58 | ./setup-env 59 | ``` 60 | 61 | Otherwise, follow the steps below to manually configure your 62 | environment. 63 | 64 | #### Installing and using GNU `getopt`, `pyenv`, and `pyenv-virtualenv` #### 65 | 66 | On macOS, we recommend installing [brew](https://brew.sh/). Then 67 | installation is as simple as `brew install gnu-getopt pyenv pyenv-virtualenv` and 68 | adding this to your profile: 69 | 70 | ```bash 71 | # GNU getopt must be explicitly added to the path since it is 72 | # keg-only (https://docs.brew.sh/FAQ#what-does-keg-only-mean) 73 | export PATH="$(brew --prefix)/opt/gnu-getopt/bin:$PATH" 74 | 75 | # Setup pyenv 76 | export PYENV_ROOT="$HOME/.pyenv" 77 | export PATH="$PYENV_ROOT/bin:$PATH" 78 | eval "$(pyenv init --path)" 79 | eval "$(pyenv init -)" 80 | eval "$(pyenv virtualenv-init -)" 81 | ``` 82 | 83 | For Linux, Windows Subsystem for Linux (WSL), or macOS (if you 84 | don't want to use `brew`) you can use 85 | [pyenv/pyenv-installer](https://github.com/pyenv/pyenv-installer) to 86 | install the necessary tools. Before running this ensure that you have 87 | installed the prerequisites for your platform according to the 88 | [`pyenv` wiki 89 | page](https://github.com/pyenv/pyenv/wiki/common-build-problems). 90 | GNU `getopt` is included in most Linux distributions as part of the 91 | [`util-linux`](https://github.com/util-linux/util-linux) package. 92 | 93 | On WSL you should treat your platform as whatever Linux distribution 94 | you've chosen to install. 95 | 96 | Once you have installed `pyenv` you will need to add the following 97 | lines to your `.bash_profile` (or `.profile`): 98 | 99 | ```bash 100 | export PYENV_ROOT="$HOME/.pyenv" 101 | export PATH="$PYENV_ROOT/bin:$PATH" 102 | eval "$(pyenv init --path)" 103 | ``` 104 | 105 | and then add the following lines to your `.bashrc`: 106 | 107 | ```bash 108 | eval "$(pyenv init -)" 109 | eval "$(pyenv virtualenv-init -)" 110 | ``` 111 | 112 | If you want more information about setting up `pyenv` once installed, please run 113 | 114 | ```console 115 | pyenv init 116 | ``` 117 | 118 | and 119 | 120 | ```console 121 | pyenv virtualenv-init 122 | ``` 123 | 124 | for the current configuration instructions. 125 | 126 | If you are using a shell other than `bash` you should follow the 127 | instructions that the `pyenv-installer` script outputs. 128 | 129 | You will need to reload your shell for these changes to take effect so 130 | you can begin to use `pyenv`. 131 | 132 | For a list of Python versions that are already installed and ready to 133 | use with `pyenv`, use the command `pyenv versions`. To see a list of 134 | the Python versions available to be installed and used with `pyenv` 135 | use the command `pyenv install --list`. You can read more 136 | [here](https://github.com/pyenv/pyenv/blob/master/COMMANDS.md) about 137 | the many things that `pyenv` can do. See 138 | [here](https://github.com/pyenv/pyenv-virtualenv#usage) for the 139 | additional capabilities that pyenv-virtualenv adds to the `pyenv` 140 | command. 141 | 142 | #### Creating the Python virtual environment #### 143 | 144 | Once `pyenv` and `pyenv-virtualenv` are installed on your system, you 145 | can create and configure the Python virtual environment with these 146 | commands: 147 | 148 | ```console 149 | cd aws-profile-sync 150 | pyenv virtualenv aws-profile-sync 151 | pyenv local aws-profile-sync 152 | pip install --requirement requirements-dev.txt 153 | ``` 154 | 155 | #### Installing the pre-commit hook #### 156 | 157 | Now setting up pre-commit is as simple as: 158 | 159 | ```console 160 | pre-commit install 161 | ``` 162 | 163 | At this point the pre-commit checks will run against any files that 164 | you attempt to commit. If you want to run the checks against the 165 | entire repo, just execute `pre-commit run --all-files`. 166 | 167 | ### Running unit and system tests ### 168 | 169 | In addition to the pre-commit checks the CI system will run the suite 170 | of unit and system tests that are included with this project. To run 171 | these tests locally execute `pytest` from the root of the project. 172 | 173 | We encourage any updates to these tests to improve the overall code 174 | coverage. If your pull request adds new functionality we would 175 | appreciate it if you extend existing test cases, or add new ones to 176 | exercise the newly added code. 177 | 178 | ## Public domain ## 179 | 180 | This project is in the public domain within the United States, and 181 | copyright and related rights in the work worldwide are waived through 182 | the [CC0 1.0 Universal public domain 183 | dedication](https://creativecommons.org/publicdomain/zero/1.0/). 184 | 185 | All contributions to this project will be released under the CC0 186 | dedication. By submitting a pull request, you are agreeing to comply 187 | with this waiver of copyright interest. 188 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | CC0 1.0 Universal 2 | 3 | Statement of Purpose 4 | 5 | The laws of most jurisdictions throughout the world automatically confer 6 | exclusive Copyright and Related Rights (defined below) upon the creator and 7 | subsequent owner(s) (each and all, an "owner") of an original work of 8 | authorship and/or a database (each, a "Work"). 9 | 10 | Certain owners wish to permanently relinquish those rights to a Work for the 11 | purpose of contributing to a commons of creative, cultural and scientific 12 | works ("Commons") that the public can reliably and without fear of later 13 | claims of infringement build upon, modify, incorporate in other works, reuse 14 | and redistribute as freely as possible in any form whatsoever and for any 15 | purposes, including without limitation commercial purposes. These owners may 16 | contribute to the Commons to promote the ideal of a free culture and the 17 | further production of creative, cultural and scientific works, or to gain 18 | reputation or greater distribution for their Work in part through the use and 19 | efforts of others. 20 | 21 | For these and/or other purposes and motivations, and without any expectation 22 | of additional consideration or compensation, the person associating CC0 with a 23 | Work (the "Affirmer"), to the extent that he or she is an owner of Copyright 24 | and Related Rights in the Work, voluntarily elects to apply CC0 to the Work 25 | and publicly distribute the Work under its terms, with knowledge of his or her 26 | Copyright and Related Rights in the Work and the meaning and intended legal 27 | effect of CC0 on those rights. 28 | 29 | 1. Copyright and Related Rights. A Work made available under CC0 may be 30 | protected by copyright and related or neighboring rights ("Copyright and 31 | Related Rights"). Copyright and Related Rights include, but are not limited 32 | to, the following: 33 | 34 | i. the right to reproduce, adapt, distribute, perform, display, communicate, 35 | and translate a Work; 36 | 37 | ii. moral rights retained by the original author(s) and/or performer(s); 38 | 39 | iii. publicity and privacy rights pertaining to a person's image or likeness 40 | depicted in a Work; 41 | 42 | iv. rights protecting against unfair competition in regards to a Work, 43 | subject to the limitations in paragraph 4(a), below; 44 | 45 | v. rights protecting the extraction, dissemination, use and reuse of data in 46 | a Work; 47 | 48 | vi. database rights (such as those arising under Directive 96/9/EC of the 49 | European Parliament and of the Council of 11 March 1996 on the legal 50 | protection of databases, and under any national implementation thereof, 51 | including any amended or successor version of such directive); and 52 | 53 | vii. other similar, equivalent or corresponding rights throughout the world 54 | based on applicable law or treaty, and any national implementations thereof. 55 | 56 | 2. Waiver. To the greatest extent permitted by, but not in contravention of, 57 | applicable law, Affirmer hereby overtly, fully, permanently, irrevocably and 58 | unconditionally waives, abandons, and surrenders all of Affirmer's Copyright 59 | and Related Rights and associated claims and causes of action, whether now 60 | known or unknown (including existing as well as future claims and causes of 61 | action), in the Work (i) in all territories worldwide, (ii) for the maximum 62 | duration provided by applicable law or treaty (including future time 63 | extensions), (iii) in any current or future medium and for any number of 64 | copies, and (iv) for any purpose whatsoever, including without limitation 65 | commercial, advertising or promotional purposes (the "Waiver"). Affirmer makes 66 | the Waiver for the benefit of each member of the public at large and to the 67 | detriment of Affirmer's heirs and successors, fully intending that such Waiver 68 | shall not be subject to revocation, rescission, cancellation, termination, or 69 | any other legal or equitable action to disrupt the quiet enjoyment of the Work 70 | by the public as contemplated by Affirmer's express Statement of Purpose. 71 | 72 | 3. Public License Fallback. Should any part of the Waiver for any reason be 73 | judged legally invalid or ineffective under applicable law, then the Waiver 74 | shall be preserved to the maximum extent permitted taking into account 75 | Affirmer's express Statement of Purpose. In addition, to the extent the Waiver 76 | is so judged Affirmer hereby grants to each affected person a royalty-free, 77 | non transferable, non sublicensable, non exclusive, irrevocable and 78 | unconditional license to exercise Affirmer's Copyright and Related Rights in 79 | the Work (i) in all territories worldwide, (ii) for the maximum duration 80 | provided by applicable law or treaty (including future time extensions), (iii) 81 | in any current or future medium and for any number of copies, and (iv) for any 82 | purpose whatsoever, including without limitation commercial, advertising or 83 | promotional purposes (the "License"). The License shall be deemed effective as 84 | of the date CC0 was applied by Affirmer to the Work. Should any part of the 85 | License for any reason be judged legally invalid or ineffective under 86 | applicable law, such partial invalidity or ineffectiveness shall not 87 | invalidate the remainder of the License, and in such case Affirmer hereby 88 | affirms that he or she will not (i) exercise any of his or her remaining 89 | Copyright and Related Rights in the Work or (ii) assert any associated claims 90 | and causes of action with respect to the Work, in either case contrary to 91 | Affirmer's express Statement of Purpose. 92 | 93 | 4. Limitations and Disclaimers. 94 | 95 | a. No trademark or patent rights held by Affirmer are waived, abandoned, 96 | surrendered, licensed or otherwise affected by this document. 97 | 98 | b. Affirmer offers the Work as-is and makes no representations or warranties 99 | of any kind concerning the Work, express, implied, statutory or otherwise, 100 | including without limitation warranties of title, merchantability, fitness 101 | for a particular purpose, non infringement, or the absence of latent or 102 | other defects, accuracy, or the present or absence of errors, whether or not 103 | discoverable, all to the greatest extent permissible under applicable law. 104 | 105 | c. Affirmer disclaims responsibility for clearing rights of other persons 106 | that may apply to the Work or any use thereof, including without limitation 107 | any person's Copyright and Related Rights in the Work. Further, Affirmer 108 | disclaims responsibility for obtaining any necessary consents, permissions 109 | or other rights required for any use of the Work. 110 | 111 | d. Affirmer understands and acknowledges that Creative Commons is not a 112 | party to this document and has no duty or obligation with respect to this 113 | CC0 or use of the Work. 114 | 115 | For more information, please see 116 | 117 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # aws-profile-sync ☁️🧻🚰 # 2 | 3 | [![GitHub Build Status](https://github.com/cisagov/aws-profile-sync/workflows/build/badge.svg)](https://github.com/cisagov/aws-profile-sync/actions) 4 | [![CodeQL](https://github.com/cisagov/aws-profile-sync/workflows/CodeQL/badge.svg)](https://github.com/cisagov/aws-profile-sync/actions/workflows/codeql-analysis.yml) 5 | [![Coverage Status](https://coveralls.io/repos/github/cisagov/aws-profile-sync/badge.svg?branch=develop)](https://coveralls.io/github/cisagov/aws-profile-sync?branch=develop) 6 | [![Known Vulnerabilities](https://snyk.io/test/github/cisagov/aws-profile-sync/develop/badge.svg)](https://snyk.io/test/github/cisagov/aws-profile-sync) 7 | 8 | `aws-profile-sync` is a command line utility that simplifies the synchronization 9 | of 10 | [AWS credential profiles](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html) 11 | across groups of users. 12 | 13 | ## Requirements ## 14 | 15 | - Python versions 3.6 and above. Note that Python 2 *is not* supported. 16 | - Git version 2.23 and above if using the Git handler. 17 | 18 | ## Installation ## 19 | 20 | From source: 21 | 22 | ```console 23 | git clone https://github.com/cisagov/aws-profile-sync.git 24 | cd aws-profile-sync 25 | pip install -r requirements.txt 26 | ``` 27 | 28 | ## Usage ## 29 | 30 | The utility reads a credentials file looking for magic `#!aws-profile-sync` comments. 31 | It will then fetch the remote content and intelligently integrate it into a new 32 | credentials file. 33 | 34 | ```gitconfig 35 | [cool-user] 36 | aws_access_key_id = XXXXXXXXXXXXXXXXXXXX 37 | aws_secret_access_key = XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX 38 | 39 | #!profile-sync ssh://git@github.com/aceofspades/aws-profiles.git branch=master filename=roles -- source_profile=cool-user role_session_name=lemmy-is-god mfa_serial=arn:aws:iam::123456789012:mfa/ian.kilmister 40 | 41 | # This line will get replaced 42 | 43 | #!profile-sync-stop 44 | 45 | # These lines won't be modified by the utility. 46 | # That was a great time, the summer of '71 - I can't remember it, but I'll never forget it!. 47 | ``` 48 | 49 | The utility will replace all the content between the `#!aws-profile-sync` and 50 | `#!aws-profile-sync-stop` lines in the above example. To do this it will: 51 | 52 | - Clone the repository that lives at `git@github.com/aceofspades/aws-profiles.git`. 53 | - Switch to the `master` branch. 54 | - Read the file `roles`. 55 | - Override and replace any values specified after the `--` in the magic line. 56 | 57 | A copy of your previous `credentials` file is stored next to it as `credentials.backup`. 58 | 59 | For detailed usage instructions see: `aws-profile-sync --help` 60 | 61 | ## Contributing ## 62 | 63 | We welcome contributions! Please see [`CONTRIBUTING.md`](CONTRIBUTING.md) for 64 | details. 65 | 66 | ## License ## 67 | 68 | This project is in the worldwide [public domain](LICENSE). 69 | 70 | This project is in the public domain within the United States, and 71 | copyright and related rights in the work worldwide are waived through 72 | the [CC0 1.0 Universal public domain 73 | dedication](https://creativecommons.org/publicdomain/zero/1.0/). 74 | 75 | All contributions to this project will be released under the CC0 76 | dedication. By submitting a pull request, you are agreeing to comply 77 | with this waiver of copyright interest. 78 | -------------------------------------------------------------------------------- /bump-version: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # bump-version [--push] [--label LABEL] (major | minor | patch | prerelease | build | finalize | show) 4 | # bump-version --list-files 5 | 6 | set -o nounset 7 | set -o errexit 8 | set -o pipefail 9 | 10 | # Stores the canonical version for the project. 11 | VERSION_FILE=src/aws_profile_sync/_version.py 12 | # Files that should be updated with the new version. 13 | VERSION_FILES=("$VERSION_FILE") 14 | 15 | USAGE=$( 16 | cat << END_OF_LINE 17 | Update the version of the project. 18 | 19 | Usage: 20 | ${0##*/} [--push] [--label LABEL] (major | minor | patch | prerelease | build | finalize | show) 21 | ${0##*/} --list-files 22 | ${0##*/} (-h | --help) 23 | 24 | Options: 25 | -h | --help Show this message. 26 | --push Perform a \`git push\` after updating the version. 27 | --label LABEL Specify the label to use when updating the build or prerelease version. 28 | --list-files List the files that will be updated when the version is bumped. 29 | END_OF_LINE 30 | ) 31 | 32 | old_version=$(sed -n "s/^__version__ = \"\(.*\)\"$/\1/p" $VERSION_FILE) 33 | # Comment out periods so they are interpreted as periods and don't 34 | # just match any character 35 | old_version_regex=${old_version//\./\\\.} 36 | new_version="$old_version" 37 | 38 | bump_part="" 39 | label="" 40 | commit_prefix="Bump" 41 | with_push=false 42 | commands_with_label=("build" "prerelease") 43 | commands_with_prerelease=("major" "minor" "patch") 44 | with_prerelease=false 45 | 46 | ####################################### 47 | # Display an error message, the help information, and exit with a non-zero status. 48 | # Arguments: 49 | # Error message. 50 | ####################################### 51 | function invalid_option() { 52 | echo "$1" 53 | echo "$USAGE" 54 | exit 1 55 | } 56 | 57 | ####################################### 58 | # Bump the version using the provided command. 59 | # Arguments: 60 | # The version to bump. 61 | # The command to bump the version. 62 | # Returns: 63 | # The new version. 64 | ####################################### 65 | function bump_version() { 66 | local temp_version 67 | temp_version=$(python -c "import semver; print(semver.parse_version_info('$1').${2})") 68 | echo "$temp_version" 69 | } 70 | 71 | if [ $# -eq 0 ]; then 72 | echo "$USAGE" 73 | exit 1 74 | else 75 | while [ $# -gt 0 ]; do 76 | case $1 in 77 | --push) 78 | if [ "$with_push" = true ]; then 79 | invalid_option "Push has already been set." 80 | fi 81 | 82 | with_push=true 83 | shift 84 | ;; 85 | --label) 86 | if [ -n "$label" ]; then 87 | invalid_option "Label has already been set." 88 | fi 89 | 90 | label="$2" 91 | shift 2 92 | ;; 93 | build | finalize | major | minor | patch) 94 | if [ -n "$bump_part" ]; then 95 | invalid_option "Only one version part should be bumped at a time." 96 | fi 97 | 98 | bump_part="$1" 99 | shift 100 | ;; 101 | prerelease) 102 | with_prerelease=true 103 | shift 104 | ;; 105 | show) 106 | echo "$old_version" 107 | exit 0 108 | ;; 109 | -h | --help) 110 | echo "$USAGE" 111 | exit 0 112 | ;; 113 | --list-files) 114 | printf '%s\n' "${VERSION_FILES[@]}" 115 | exit 0 116 | ;; 117 | *) 118 | invalid_option "Invalid option: $1" 119 | ;; 120 | esac 121 | done 122 | fi 123 | 124 | if [ -n "$label" ] && [ "$with_prerelease" = false ] && [[ ! " ${commands_with_label[*]} " =~ [[:space:]]${bump_part}[[:space:]] ]]; then 125 | invalid_option "Setting the label is only allowed for the following commands: ${commands_with_label[*]}" 126 | fi 127 | 128 | if [ "$with_prerelease" = true ] && [ -n "$bump_part" ] && [[ ! " ${commands_with_prerelease[*]} " =~ [[:space:]]${bump_part}[[:space:]] ]]; then 129 | invalid_option "Changing the prerelease is only allowed in conjunction with the following commands: ${commands_with_prerelease[*]}" 130 | fi 131 | 132 | label_option="" 133 | if [ -n "$label" ]; then 134 | label_option="token='$label'" 135 | fi 136 | 137 | if [ -n "$bump_part" ]; then 138 | if [ "$bump_part" = "finalize" ]; then 139 | commit_prefix="Finalize" 140 | bump_command="finalize_version()" 141 | elif [ "$bump_part" = "build" ]; then 142 | bump_command="bump_${bump_part}($label_option)" 143 | else 144 | bump_command="bump_${bump_part}()" 145 | fi 146 | new_version=$(bump_version "$old_version" "$bump_command") 147 | echo Changing version from "$old_version" to "$new_version" 148 | fi 149 | 150 | if [ "$with_prerelease" = true ]; then 151 | bump_command="bump_prerelease($label_option)" 152 | temp_version=$(bump_version "$new_version" "$bump_command") 153 | echo Changing version from "$new_version" to "$temp_version" 154 | new_version="$temp_version" 155 | fi 156 | 157 | tmp_file=/tmp/version.$$ 158 | for version_file in "${VERSION_FILES[@]}"; do 159 | if [ ! -f "$version_file" ]; then 160 | echo Missing expected file: "$version_file" 161 | exit 1 162 | fi 163 | sed "s/$old_version_regex/$new_version/" "$version_file" > $tmp_file 164 | mv $tmp_file "$version_file" 165 | done 166 | 167 | git add "${VERSION_FILES[@]}" 168 | git commit --message "$commit_prefix version from $old_version to $new_version" 169 | 170 | if [ "$with_push" = true ]; then 171 | git push 172 | fi 173 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | # Increase verbosity, display extra test summary info for tests that did not pass, 3 | # display code coverage results, and enable debug logging 4 | addopts = --verbose -ra --cov --log-cli-level=DEBUG 5 | -------------------------------------------------------------------------------- /requirements-dev.txt: -------------------------------------------------------------------------------- 1 | --editable .[dev] 2 | --requirement requirements-test.txt 3 | ipython 4 | mypy 5 | # The bump-version script requires at least version 3 of semver. 6 | semver>=3 7 | -------------------------------------------------------------------------------- /requirements-test.txt: -------------------------------------------------------------------------------- 1 | --editable .[test] 2 | --requirement requirements.txt 3 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # Note: Add any additional requirements to setup.py's install_requires field 2 | --editable . 3 | wheel 4 | -------------------------------------------------------------------------------- /setup-env: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -o nounset 4 | set -o errexit 5 | set -o pipefail 6 | 7 | USAGE=$( 8 | cat << 'END_OF_LINE' 9 | Configure a development environment for this repository. 10 | 11 | It does the following: 12 | - Allows the user to specify the Python version to use for the virtual environment. 13 | - Allows the user to specify a name for the virtual environment. 14 | - Verifies pyenv and pyenv-virtualenv are installed. 15 | - Creates the Python virtual environment. 16 | - Configures the activation of the virtual enviroment for the repo directory. 17 | - Installs the requirements needed for development (including mypy type stubs). 18 | - Installs git pre-commit hooks. 19 | - Configures git remotes for upstream "lineage" repositories. 20 | 21 | Usage: 22 | setup-env [--venv-name venv_name] [--python-version python_version] 23 | setup-env (-h | --help) 24 | 25 | Options: 26 | -f | --force Delete virtual enviroment if it already exists. 27 | -h | --help Show this message. 28 | -i | --install-hooks Install hook environments for all environments in the 29 | pre-commit config file. 30 | -l | --list-versions List available Python versions and select one interactively. 31 | -v | --venv-name Specify the name of the virtual environment. 32 | -p | --python-version Specify the Python version for the virtual environment. 33 | 34 | END_OF_LINE 35 | ) 36 | 37 | # Display pyenv's installed Python versions 38 | python_versions() { 39 | pyenv versions --bare --skip-aliases --skip-envs 40 | } 41 | 42 | check_python_version() { 43 | local version=$1 44 | 45 | # This is a valid regex for semantically correct Python version strings. 46 | # For more information see here: 47 | # https://semver.org/#is-there-a-suggested-regular-expression-regex-to-check-a-semver-string 48 | # Break down the regex into readable parts major.minor.patch 49 | local major="0|[1-9]\d*" 50 | local minor="0|[1-9]\d*" 51 | local patch="0|[1-9]\d*" 52 | 53 | # Splitting the prerelease part for readability 54 | # Start of the prerelease 55 | local prerelease="(?:-" 56 | # Numeric or alphanumeric identifiers 57 | local prerelease+="(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)" 58 | # Additional dot-separated identifiers 59 | local prerelease+="(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*" 60 | # End of the prerelease, making it optional 61 | local prerelease+=")?" 62 | # Optional build metadata 63 | local build="(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?" 64 | 65 | # Final regex composed of parts 66 | local regex="^($major)\.($minor)\.($patch)$prerelease$build$" 67 | 68 | # This checks if the Python version does not match the regex pattern specified in $regex, 69 | # using Perl for regex matching. If the pattern is not found, then prompt the user with 70 | # the invalid version message. 71 | if ! echo "$version" | perl -ne "exit(!/$regex/)"; then 72 | echo "Invalid version of Python: Python follows semantic versioning," \ 73 | "so any version string that is not a valid semantic version is an" \ 74 | "invalid version of Python." 75 | exit 1 76 | # Else if the Python version isn't installed then notify the user. 77 | # grep -E is used for searching through text lines that match the 78 | # specific version. 79 | elif ! python_versions | grep -E "^${version}$" > /dev/null; then 80 | echo "Error: Python version $version is not installed." 81 | echo "Installed Python versions are:" 82 | python_versions 83 | exit 1 84 | else 85 | echo "Using Python version $version" 86 | fi 87 | } 88 | 89 | # Flag to force deletion and creation of virtual environment 90 | FORCE=0 91 | 92 | # Initialize the other flags 93 | INSTALL_HOOKS=0 94 | LIST_VERSIONS=0 95 | PYTHON_VERSION="" 96 | VENV_NAME="" 97 | 98 | # Define long options 99 | LONGOPTS="force,help,install-hooks,list-versions,python-version:,venv-name:" 100 | 101 | # Define short options for getopt 102 | SHORTOPTS="fhilp:v:" 103 | 104 | # Check for GNU getopt by matching a specific pattern ("getopt from util-linux") 105 | # in its version output. This approach presumes the output format remains stable. 106 | # Be aware that format changes could invalidate this check. 107 | if [[ $(getopt --version 2> /dev/null) != *"getopt from util-linux"* ]]; then 108 | cat << 'END_OF_LINE' 109 | 110 | Please note, this script requires GNU getopt due to its enhanced 111 | functionality and compatibility with certain script features that 112 | are not supported by the POSIX getopt found in some systems, particularly 113 | those with a non-GNU version of getopt. This distinction is crucial 114 | as a system might have a non-GNU version of getopt installed by default, 115 | which could lead to unexpected behavior. 116 | 117 | On macOS, we recommend installing brew (https://brew.sh/). Then installation 118 | is as simple as `brew install gnu-getopt` and adding this to your 119 | profile: 120 | 121 | export PATH="$(brew --prefix)/opt/gnu-getopt/bin:$PATH" 122 | 123 | GNU getopt must be explicitly added to the PATH since it 124 | is keg-only (https://docs.brew.sh/FAQ#what-does-keg-only-mean). 125 | 126 | END_OF_LINE 127 | exit 1 128 | fi 129 | 130 | # Check to see if pyenv is installed 131 | if [ -z "$(command -v pyenv)" ] || { [ -z "$(command -v pyenv-virtualenv)" ] && [ ! -f "$(pyenv root)/plugins/pyenv-virtualenv/bin/pyenv-virtualenv" ]; }; then 132 | echo "pyenv and pyenv-virtualenv are required." 133 | if [[ "$OSTYPE" == "darwin"* ]]; then 134 | cat << 'END_OF_LINE' 135 | 136 | On macOS, we recommend installing brew, https://brew.sh/. Then installation 137 | is as simple as `brew install pyenv pyenv-virtualenv` and adding this to your 138 | profile: 139 | 140 | eval "$(pyenv init -)" 141 | eval "$(pyenv virtualenv-init -)" 142 | 143 | END_OF_LINE 144 | 145 | fi 146 | cat << 'END_OF_LINE' 147 | For Linux, Windows Subsystem for Linux (WSL), or macOS (if you don't want 148 | to use "brew") you can use https://github.com/pyenv/pyenv-installer to install 149 | the necessary tools. Before running this ensure that you have installed the 150 | prerequisites for your platform according to the pyenv wiki page, 151 | https://github.com/pyenv/pyenv/wiki/common-build-problems. 152 | 153 | On WSL you should treat your platform as whatever Linux distribution you've 154 | chosen to install. 155 | 156 | Once you have installed "pyenv" you will need to add the following lines to 157 | your ".bashrc": 158 | 159 | export PATH="$PATH:$HOME/.pyenv/bin" 160 | eval "$(pyenv init -)" 161 | eval "$(pyenv virtualenv-init -)" 162 | END_OF_LINE 163 | exit 1 164 | fi 165 | 166 | # Use GNU getopt to parse options 167 | if ! PARSED=$(getopt --options $SHORTOPTS --longoptions $LONGOPTS --name "$0" -- "$@"); then 168 | echo "Error parsing options" 169 | exit 1 170 | fi 171 | eval set -- "$PARSED" 172 | 173 | while true; do 174 | case "$1" in 175 | -f | --force) 176 | FORCE=1 177 | shift 178 | ;; 179 | -h | --help) 180 | echo "$USAGE" 181 | exit 0 182 | ;; 183 | -i | --install-hooks) 184 | INSTALL_HOOKS=1 185 | shift 186 | ;; 187 | -l | --list-versions) 188 | LIST_VERSIONS=1 189 | shift 190 | ;; 191 | -p | --python-version) 192 | PYTHON_VERSION="$2" 193 | shift 2 194 | # Check the Python version being passed in. 195 | check_python_version "$PYTHON_VERSION" 196 | ;; 197 | -v | --venv-name) 198 | VENV_NAME="$2" 199 | shift 2 200 | ;; 201 | --) 202 | shift 203 | break 204 | ;; 205 | *) 206 | # Unreachable due to GNU getopt handling all options 207 | echo "Programming error" 208 | exit 64 209 | ;; 210 | esac 211 | done 212 | 213 | # Determine the virtual environment name 214 | if [ -n "$VENV_NAME" ]; then 215 | # Use the user-provided environment name 216 | env_name="$VENV_NAME" 217 | else 218 | # Set the environment name to the last part of the working directory. 219 | env_name=${PWD##*/} 220 | fi 221 | 222 | # List Python versions and select one interactively. 223 | if [ $LIST_VERSIONS -ne 0 ]; then 224 | echo Available Python versions: 225 | python_versions 226 | # Read the user's desired Python version. 227 | # -r: treat backslashes as literal, -p: display prompt before input. 228 | read -r -p "Enter the desired Python version: " PYTHON_VERSION 229 | # Check the Python version being passed in. 230 | check_python_version "$PYTHON_VERSION" 231 | fi 232 | 233 | # Remove any lingering local configuration. 234 | if [ $FORCE -ne 0 ]; then 235 | rm -f .python-version 236 | pyenv virtualenv-delete --force "${env_name}" || true 237 | elif [[ -f .python-version ]]; then 238 | cat << 'END_OF_LINE' 239 | An existing .python-version file was found. Either remove this file yourself 240 | or re-run with the --force option to have it deleted along with the associated 241 | virtual environment. 242 | 243 | rm .python-version 244 | 245 | END_OF_LINE 246 | exit 1 247 | fi 248 | 249 | # Create a new virtual environment for this project 250 | # 251 | # If $PYTHON_VERSION is undefined then the current pyenv Python version will be used. 252 | # 253 | # We can't quote ${PYTHON_VERSION:=} below since if the variable is 254 | # undefined then we want nothing to appear; this is the reason for the 255 | # "shellcheck disable" line below. 256 | # 257 | # shellcheck disable=SC2086 258 | if ! pyenv virtualenv ${PYTHON_VERSION:=} "${env_name}"; then 259 | cat << END_OF_LINE 260 | An existing virtual environment named $env_name was found. Either delete this 261 | environment yourself or re-run with the --force option to have it deleted. 262 | 263 | pyenv virtualenv-delete ${env_name} 264 | 265 | END_OF_LINE 266 | exit 1 267 | fi 268 | 269 | # Set the local application-specific Python version(s) by writing the 270 | # version name to a file named `.python-version'. 271 | pyenv local "${env_name}" 272 | 273 | # Upgrade pip and friends 274 | python3 -m pip install --upgrade pip setuptools wheel 275 | 276 | # Find a requirements file (if possible) and install 277 | for req_file in "requirements-dev.txt" "requirements-test.txt" "requirements.txt"; do 278 | if [[ -f $req_file ]]; then 279 | pip install --requirement $req_file 280 | break 281 | fi 282 | done 283 | 284 | # Install git pre-commit hooks now or later. 285 | pre-commit install ${INSTALL_HOOKS:+"--install-hooks"} 286 | 287 | # Setup git remotes from lineage configuration 288 | # This could fail if the remotes are already setup, but that is ok. 289 | set +o errexit 290 | 291 | eval "$( 292 | python3 << 'END_OF_LINE' 293 | from pathlib import Path 294 | import yaml 295 | import sys 296 | 297 | LINEAGE_CONFIG = Path(".github/lineage.yml") 298 | 299 | if not LINEAGE_CONFIG.exists(): 300 | print("No lineage configuration found.", file=sys.stderr) 301 | sys.exit(0) 302 | 303 | with LINEAGE_CONFIG.open("r") as f: 304 | lineage = yaml.safe_load(stream=f) 305 | 306 | if lineage["version"] == "1": 307 | for parent_name, v in lineage["lineage"].items(): 308 | remote_url = v["remote-url"] 309 | print(f"git remote add {parent_name} {remote_url};") 310 | print(f"git remote set-url --push {parent_name} no_push;") 311 | else: 312 | print(f'Unsupported lineage version: {lineage["version"]}', file=sys.stderr) 313 | END_OF_LINE 314 | )" 315 | 316 | # Install all necessary mypy type stubs 317 | mypy --install-types --non-interactive src/ 318 | 319 | # Qapla' 320 | echo "Success!" 321 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | """ 2 | This is the setup module for the aws_profile_sync project. 3 | 4 | Based on: 5 | 6 | - https://packaging.python.org/distributing/ 7 | - https://github.com/pypa/sampleproject/blob/master/setup.py 8 | - https://blog.ionelmc.ro/2014/05/25/python-packaging/#the-structure 9 | """ 10 | 11 | # Standard Python Libraries 12 | import codecs 13 | from glob import glob 14 | from os.path import abspath, basename, dirname, join, splitext 15 | 16 | # Third-Party Libraries 17 | from setuptools import find_packages, setup 18 | 19 | 20 | def readme(): 21 | """Read in and return the contents of the project's README.md file.""" 22 | with open("README.md", encoding="utf-8") as f: 23 | return f.read() 24 | 25 | 26 | # Below two methods were pulled from: 27 | # https://packaging.python.org/guides/single-sourcing-package-version/ 28 | def read(rel_path): 29 | """Open a file for reading from a given relative path.""" 30 | here = abspath(dirname(__file__)) 31 | with codecs.open(join(here, rel_path), "r") as fp: 32 | return fp.read() 33 | 34 | 35 | def get_version(version_file): 36 | """Extract a version number from the given file path.""" 37 | for line in read(version_file).splitlines(): 38 | if line.startswith("__version__"): 39 | delim = '"' if '"' in line else "'" 40 | return line.split(delim)[1] 41 | raise RuntimeError("Unable to find version string.") 42 | 43 | 44 | setup( 45 | name="aws_profile_sync", 46 | # Versions should comply with PEP440 47 | version=get_version("src/aws_profile_sync/_version.py"), 48 | description="AWS Profile Sync utility", 49 | long_description=readme(), 50 | long_description_content_type="text/markdown", 51 | # Landing page for CISA's cybersecurity mission 52 | url="https://www.cisa.gov/cybersecurity", 53 | # Additional URLs for this project per 54 | # https://packaging.python.org/guides/distributing-packages-using-setuptools/#project-urls 55 | project_urls={ 56 | "Source": "https://github.com/cisagov/aws-profile-sync", 57 | "Tracker": "https://github.com/cisagov/aws-profile-sync/issues", 58 | }, 59 | # Author details 60 | author="Cybersecurity and Infrastructure Security Agency", 61 | author_email="github@cisa.dhs.gov", 62 | license="License :: CC0 1.0 Universal (CC0 1.0) Public Domain Dedication", 63 | # See https://pypi.python.org/pypi?%3Aaction=list_classifiers 64 | classifiers=[ 65 | # How mature is this project? Common values are 66 | # 3 - Alpha 67 | # 4 - Beta 68 | # 5 - Production/Stable 69 | "Development Status :: 3 - Alpha", 70 | # Indicate who your project is intended for 71 | "Intended Audience :: Developers", 72 | # Pick your license as you wish (should match "license" above) 73 | "License :: CC0 1.0 Universal (CC0 1.0) Public Domain Dedication", 74 | # Specify the Python versions you support here. In particular, ensure 75 | # that you indicate whether you support Python 2, Python 3 or both. 76 | "Programming Language :: Python :: 3", 77 | "Programming Language :: Python :: 3 :: Only", 78 | "Programming Language :: Python :: 3.9", 79 | "Programming Language :: Python :: 3.10", 80 | "Programming Language :: Python :: 3.11", 81 | "Programming Language :: Python :: 3.12", 82 | "Programming Language :: Python :: 3.13", 83 | "Programming Language :: Python :: Implementation :: CPython", 84 | ], 85 | python_requires=">=3.9", 86 | # What does your project relate to? 87 | keywords="aws profile sync", 88 | packages=find_packages(where="src"), 89 | package_dir={"": "src"}, 90 | package_data={}, 91 | py_modules=[splitext(basename(path))[0] for path in glob("src/*.py")], 92 | include_package_data=True, 93 | install_requires=["docopt", "more-itertools", "schema", "setuptools"], 94 | extras_require={ 95 | # IMPORTANT: Keep type hinting-related dependencies of the dev section 96 | # in sync with the mypy pre-commit hook configuration (see 97 | # .pre-commit-config.yaml). Any changes to type hinting-related 98 | # dependencies here should be reflected in the additional_dependencies 99 | # field of the mypy pre-commit hook to avoid discrepancies in type 100 | # checking between environments. 101 | "dev": [ 102 | "types-docopt", 103 | "types-setuptools", 104 | ], 105 | "test": [ 106 | "coverage", 107 | "coveralls", 108 | "pre-commit", 109 | "pytest-cov", 110 | "pytest", 111 | ], 112 | }, 113 | # Conveniently allows one to run the CLI tool as `aws-profile-sync` 114 | entry_points={ 115 | "console_scripts": ["aws-profile-sync = aws_profile_sync.aws_profile_sync:main"] 116 | }, 117 | ) 118 | -------------------------------------------------------------------------------- /src/aws_profile_sync/__init__.py: -------------------------------------------------------------------------------- 1 | """The aws_profile_sync library.""" 2 | 3 | from . import aws_profile_sync 4 | from ._version import __version__ # noqa: F401 5 | 6 | __all__ = ["aws_profile_sync"] 7 | -------------------------------------------------------------------------------- /src/aws_profile_sync/__main__.py: -------------------------------------------------------------------------------- 1 | """Code to run if this package is used as a Python module.""" 2 | 3 | from .aws_profile_sync import main 4 | 5 | main() 6 | -------------------------------------------------------------------------------- /src/aws_profile_sync/_version.py: -------------------------------------------------------------------------------- 1 | """This file defines the version of this module.""" 2 | 3 | __version__ = "3.0.0" 4 | -------------------------------------------------------------------------------- /src/aws_profile_sync/aws_profile_sync.py: -------------------------------------------------------------------------------- 1 | """Synchronize AWS CLI named profiles from a remote source. 2 | 3 | This utility will fetch shared named profiles from a remote source 4 | and then update an aws credentials file. 5 | 6 | EXIT STATUS 7 | This utility exits with one of the following values: 8 | 0 Update was successful. 9 | >0 An error occurred. 10 | 11 | Usage: 12 | aws-profile-sync [options] 13 | aws-profile-sync (-h | --help) 14 | 15 | Options: 16 | -c --credentials-file=FILENAME The credentials file to update. 17 | [default: ~/.aws/credentials] 18 | -d --dry-run Show what would be changed, but don't modify anything 19 | on disk. 20 | -h --help Show this message. 21 | --log-level=LEVEL If specified, then the log level will be set to 22 | the specified value. Valid values are "debug", "info", 23 | "warning", "error", and "critical". [default: info] 24 | -w --warn-missing Treat missing overrides as a warning instead of an error. 25 | """ 26 | 27 | # Standard Python Libraries 28 | import hashlib 29 | import logging 30 | from pathlib import Path 31 | import sys 32 | 33 | # Third-Party Libraries 34 | import docopt 35 | from more_itertools import peekable 36 | from schema import And, Schema, SchemaError, Use 37 | 38 | from . import handlers 39 | from ._version import __version__ 40 | 41 | MAGIC_START = "#!profile-sync " 42 | MAGIC_STOP = "#!profile-sync-stop" 43 | PROFILE_START = "[" 44 | SYNC_PATH = "sync" 45 | 46 | 47 | def generate_profile(line_gen, config_overrides, missing_override_level=logging.ERROR): 48 | """Generate a profile block with applied overrides. 49 | 50 | Args: 51 | line_gen: A peekable generator that will yield lines of a single profile. 52 | The first line yielded must be the profile header in the form: [name] 53 | line_gen will be read until a new profile header is peeked or the end of 54 | file. 55 | config_overrides: A dictonary mapping configuration names to their values. 56 | Any matching configuration names read from the line_gen will be overriden 57 | with the assocated value from this dictionary. Only overrides that match 58 | will be yielded by the generator. 59 | 60 | Yields: 61 | Modified lines read from line_gen. 62 | 63 | """ 64 | try: 65 | # The first line is the profile name in brackets 66 | yield next(line_gen) 67 | # Read until the next profile start or EOF 68 | while not line_gen.peek().startswith(PROFILE_START): 69 | line = next(line_gen) 70 | # Output configurations after applying overrides 71 | if "=" in line: 72 | key, value = line.split("=", 1) 73 | key = key.strip() 74 | value = value.strip() 75 | if not value and key not in config_overrides: 76 | logging.log( 77 | missing_override_level, 78 | f"No override provided for an empty external configuration line: {key}", 79 | ) 80 | if missing_override_level >= logging.ERROR: 81 | raise ValueError(f"Missing override: {key}") 82 | yield f"{key} = {config_overrides.get(key, value)}" 83 | else: 84 | # Comment or whitespace pass through 85 | yield line 86 | 87 | except StopIteration: 88 | pass 89 | return 90 | 91 | 92 | def read_external(line_gen, config_overrides, missing_override_level=logging.ERROR): 93 | """Read an external source for profiles and apply configuration overrides. 94 | 95 | Args: 96 | line_gen: A peekable generator that will yield lines of one or more profiles. 97 | config_overrides: A dictonary mapping configuration names to their values. 98 | 99 | Yields: 100 | Modified lines read from line_gen. 101 | 102 | """ 103 | try: 104 | while True: 105 | if line_gen.peek().startswith(PROFILE_START): 106 | yield from generate_profile( 107 | line_gen, 108 | config_overrides, 109 | missing_override_level=missing_override_level, 110 | ) 111 | else: 112 | yield next(line_gen) 113 | except StopIteration: 114 | pass 115 | return 116 | 117 | 118 | def parse_magic(line): 119 | """Parse a magic config line and return the associated parameters. 120 | 121 | Parses a magic line of the following form: 122 | 123 | #!profile-sync url [handler-param=value...] -- [config-override=value...] 124 | 125 | Args: 126 | line: A magic string containing a URL, handler-specific parameters, and a list 127 | of key / value configuration overrides. 128 | 129 | Returns: 130 | A tuple containing the URL, handler parameter dictonary, and a configuration 131 | overrides dictionary. 132 | 133 | """ 134 | logging.debug(f"Parsing magic: {line}") 135 | if "--" in line: 136 | # Split the line into handler and override sections 137 | handler_line, overrides_line = line.split("--") 138 | else: 139 | handler_line = line 140 | overrides_line = "" 141 | # Split the line into terms 142 | handler_terms = handler_line.split() 143 | # Discard the magic 144 | handler_terms.pop(0) 145 | url = handler_terms.pop(0) 146 | # Remaining handler_terms are params to the handler 147 | handler_params = dict(map(lambda x: x.split("="), handler_terms)) 148 | 149 | # Process override line 150 | override_terms = overrides_line.split() 151 | # Split remaining terms into key value pairs 152 | config_overrides = dict(map(lambda x: x.split("="), override_terms)) 153 | 154 | return url, handler_params, config_overrides 155 | 156 | 157 | def handle_magic(magic_line, work_path, missing_override_level=logging.ERROR): 158 | """Handle the magic line and route it to the correct fetcher. 159 | 160 | Args: 161 | magic_line: A magic string to handle. 162 | work_path: A directory where the handler can store state. 163 | 164 | Returns: 165 | A generator that will access the external resource referenced in the 166 | magic line. 167 | 168 | """ 169 | url, handler_params, config_overrides = parse_magic(magic_line) 170 | logging.debug(f"Processing remote: {url}") 171 | clazz = handlers.find_handler(url) 172 | if not clazz: 173 | raise ValueError(f"Could not find a handler that can fetch: {url}") 174 | logging.debug(f"Using {clazz} to fetch external data.") 175 | # Instanciate the handler 176 | handler = clazz(work_path) 177 | external_profile_gen = peekable(handler.fetch(url, **handler_params)) 178 | return read_external(external_profile_gen, config_overrides, missing_override_level) 179 | 180 | 181 | def generate_credentials_file(credentials_file, missing_override_level=logging.ERROR): 182 | """Generate lines for a credentials file by expanding external references. 183 | 184 | Args: 185 | credentials_file: The credentials file to read. 186 | 187 | Returns: 188 | A generator that will return updated lines based on the input credentials file. 189 | 190 | """ 191 | logging.info(f"Reading credentials file located at: {credentials_file}") 192 | in_magic_block = False 193 | work_path = credentials_file.parent / SYNC_PATH 194 | line_gen = (line for line in open(credentials_file)) 195 | while True: 196 | try: 197 | line = next(line_gen) 198 | except StopIteration: 199 | break 200 | if line.startswith(MAGIC_START): 201 | yield line + "\n" 202 | for external_line in handle_magic(line, work_path, missing_override_level): 203 | yield external_line 204 | if not external_line.endswith("\n"): 205 | yield "\n" 206 | yield "\n" + MAGIC_STOP + "\n" 207 | in_magic_block = True 208 | continue 209 | if line.startswith(MAGIC_STOP): 210 | in_magic_block = False 211 | continue 212 | if not in_magic_block: 213 | yield line 214 | 215 | 216 | def files_identical(path1, path2): 217 | """Compare two files to see if they are identical. 218 | 219 | Args: 220 | path1: A pathlib.Path to the first file. 221 | path2: A pathlib.Path to the second file. 222 | 223 | Returns: 224 | True if the files have the same hash, False otherwise. 225 | 226 | """ 227 | hash1 = hashlib.sha256() 228 | hash2 = hashlib.sha256() 229 | hash1.update(path1.read_bytes()) 230 | hash2.update(path2.read_bytes()) 231 | return hash1.digest() == hash2.digest() 232 | 233 | 234 | def main() -> None: 235 | """Set up logging and generate a new credentials file.""" 236 | args = docopt.docopt(__doc__, version=__version__) 237 | # Validate and convert arguments as needed 238 | schema = Schema( 239 | { 240 | "--log-level": And( 241 | str, 242 | Use(str.lower), 243 | lambda n: n in ("debug", "info", "warning", "error", "critical"), 244 | error="Possible values for --log-level are " 245 | + "debug, info, warning, error, and critical.", 246 | ), 247 | str: object, # Don't care about other keys, if any 248 | } 249 | ) 250 | 251 | try: 252 | args = schema.validate(args) 253 | except SchemaError as err: 254 | # Exit because one or more of the arguments were invalid 255 | print(err, file=sys.stderr) 256 | sys.exit(1) 257 | 258 | # Assign validated arguments to variables 259 | credentials_file = Path(args["--credentials-file"]).expanduser() 260 | dry_run = args["--dry-run"] 261 | log_level = args["--log-level"] 262 | missing_override_level = ( 263 | logging.WARNING if args["--warn-missing"] else logging.ERROR 264 | ) 265 | 266 | # Set up logging 267 | logging.basicConfig( 268 | format="%(asctime)-15s %(levelname)s %(message)s", level=log_level.upper() 269 | ) 270 | 271 | if dry_run: 272 | # The user requested a dry-run. Just output the new file to stdout 273 | logging.info("Dry run. Outputting credentials file to standard out:") 274 | for line in generate_credentials_file(credentials_file, missing_override_level): 275 | sys.stdout.write(line) 276 | else: 277 | # Carefully craft a new credentials file on disk. 278 | temp_file = credentials_file.with_suffix(".temp") 279 | backup_file = credentials_file.with_suffix(".backup") 280 | logging.info(f"Writing new credentials file to: {temp_file}") 281 | with open(temp_file, "w") as out: 282 | for line in generate_credentials_file( 283 | credentials_file, missing_override_level 284 | ): 285 | out.write(line) 286 | 287 | # Check to see if the new files differs from the original. 288 | if files_identical(temp_file, credentials_file): 289 | # Nothing has changed. Delete the temp file. Preserve our backup. 290 | logging.info("No changes applied.") 291 | temp_file.unlink() 292 | else: 293 | # If everything has succeeded we swap in the new file and backup the original 294 | logging.info(f"Backing up previous credentials file to: {backup_file}") 295 | credentials_file.replace(backup_file) 296 | logging.info(f"Installing new credentials file to: {credentials_file}") 297 | temp_file.replace(credentials_file) 298 | 299 | # Stop logging and clean up 300 | logging.shutdown() 301 | -------------------------------------------------------------------------------- /src/aws_profile_sync/handlers/__init__.py: -------------------------------------------------------------------------------- 1 | """This module contains handlers for the various supported URLs.""" 2 | 3 | from .ssh_git import SSHGitHandler 4 | 5 | __all__ = ["SSHGitHandler"] 6 | 7 | 8 | def find_handler(url): 9 | """Find a handler that will support a URL. 10 | 11 | Args: 12 | url: The URL to find a handler for. 13 | 14 | Returns: 15 | If a handler is found a class will be returned, None otherwise. 16 | 17 | """ 18 | for handler in __all__: 19 | # Get the symbol for handler 20 | mod = globals()[handler] 21 | # Ask handler if it can handle the url 22 | if getattr(mod, "can_handle")(url): 23 | return mod 24 | return None 25 | -------------------------------------------------------------------------------- /src/aws_profile_sync/handlers/ssh_git.py: -------------------------------------------------------------------------------- 1 | """A Git repository over secure shell handler.""" 2 | 3 | # Standard Python Libraries 4 | import logging 5 | from pathlib import Path 6 | import subprocess # nosec: Security of subprocess has been considered 7 | 8 | 9 | class SSHGitHandler: 10 | """A Git repository over secure shell handler. 11 | 12 | This class can clone or update an existing clone of a remote repository that is 13 | served over ssh. 14 | """ 15 | 16 | CLONE_PATH = Path("git") 17 | 18 | @staticmethod 19 | def can_handle(url): 20 | """Determine if this class can handle a specified URL. 21 | 22 | Args: 23 | url: A URL of any format. 24 | 25 | Returns: 26 | True if the URL can be handled. False otherwise. 27 | 28 | """ 29 | return url.startswith("ssh://") and url.endswith(".git") 30 | 31 | def __init__(self, work_path): 32 | """Instanciate a new SSHGitHandler. 33 | 34 | The class will create a directory structure it requires to store cloned 35 | repositories within the working path. 36 | 37 | Args: 38 | work_path: A pathlib.Path pointing to a work directory. 39 | 40 | """ 41 | super().__init__() 42 | self.work_path = work_path / SSHGitHandler.CLONE_PATH 43 | self.work_path.mkdir(parents=True, exist_ok=True) 44 | 45 | def fetch(self, url, branch="master", filename="roles"): 46 | """Generate lines from the retrieved repository file. 47 | 48 | Args: 49 | url: A git-style URL pointing to a repository with profile formatted files 50 | repo_file: The file to read from the repository. 51 | 52 | Yields: 53 | Lines read from the specified repository file. 54 | 55 | Raises: 56 | subprocess.CalledProcessError: If a subprocess returns a non-zero exit code. 57 | 58 | """ 59 | repo_name = url.split("/")[-1].split(".")[0] 60 | repo_path = self.work_path / repo_name 61 | read_file = repo_path / filename 62 | 63 | if repo_path.exists(): 64 | logging.info(f"Pulling {url}") 65 | subprocess.run(["git", "pull"], check=True, cwd=repo_path) # nosec 66 | else: 67 | logging.info(f"Cloning {url}") 68 | subprocess.run( # nosec 69 | ["git", "clone", url], check=True, cwd=self.work_path 70 | ) 71 | # Switch to the requested branch 72 | logging.debug(f"Switching to branch {branch}") 73 | subprocess.run(["git", "switch", branch], check=True, cwd=repo_path) # nosec 74 | 75 | logging.debug(f"Reading from repo: {read_file}") 76 | with read_file.open() as f: 77 | yield from f 78 | return 79 | -------------------------------------------------------------------------------- /tag.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -o nounset 4 | set -o errexit 5 | set -o pipefail 6 | 7 | version=$(./bump_version.sh show) 8 | 9 | git tag "v$version" && git push --tags 10 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | """pytest plugin configuration. 2 | 3 | https://docs.pytest.org/en/latest/writing_plugins.html#conftest-py-plugins 4 | """ 5 | 6 | # Third-Party Libraries 7 | import pytest 8 | 9 | 10 | def pytest_addoption(parser): 11 | """Add new commandline options to pytest.""" 12 | parser.addoption( 13 | "--runslow", action="store_true", default=False, help="run slow tests" 14 | ) 15 | 16 | 17 | def pytest_configure(config): 18 | """Register new markers.""" 19 | config.addinivalue_line("markers", "slow: mark test as slow") 20 | 21 | 22 | def pytest_collection_modifyitems(config, items): 23 | """Modify collected tests based on custom marks and commandline options.""" 24 | if config.getoption("--runslow"): 25 | # --runslow given in cli: do not skip slow tests 26 | return 27 | skip_slow = pytest.mark.skip(reason="need --runslow option to run") 28 | for item in items: 29 | if "slow" in item.keywords: 30 | item.add_marker(skip_slow) 31 | -------------------------------------------------------------------------------- /tests/credentials-test: -------------------------------------------------------------------------------- 1 | [test-user] 2 | aws_access_key_id = XXXXXXXXXXXXXXXXXXXX 3 | aws_secret_access_key = XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX 4 | -------------------------------------------------------------------------------- /tests/test_aws_profile_sync.py: -------------------------------------------------------------------------------- 1 | """Tests for aws_profile_sync.""" 2 | 3 | # Standard Python Libraries 4 | import logging 5 | import os 6 | import sys 7 | from unittest.mock import patch 8 | 9 | # Third-Party Libraries 10 | import pytest 11 | 12 | # cisagov Libraries 13 | import aws_profile_sync 14 | 15 | log_levels = ( 16 | "debug", 17 | "info", 18 | "warning", 19 | "error", 20 | "critical", 21 | ) 22 | 23 | # define sources of version strings 24 | RELEASE_TAG = os.getenv("RELEASE_TAG") 25 | PROJECT_VERSION = aws_profile_sync.__version__ 26 | 27 | 28 | def test_stdout_version(capsys): 29 | """Verify that version string sent to stdout agrees with the module version.""" 30 | with pytest.raises(SystemExit): 31 | with patch.object(sys, "argv", ["bogus", "--version"]): 32 | aws_profile_sync.aws_profile_sync.main() 33 | captured = capsys.readouterr() 34 | assert ( 35 | captured.out == f"{PROJECT_VERSION}\n" 36 | ), "standard output by '--version' should agree with module.__version__" 37 | 38 | 39 | def test_running_as_module(capsys): 40 | """Verify that the __main__.py file loads correctly.""" 41 | with pytest.raises(SystemExit): 42 | with patch.object(sys, "argv", ["bogus", "--version"]): 43 | # F401 is a "Module imported but unused" warning. This import 44 | # emulates how this project would be run as a module. The only thing 45 | # being done by __main__ is importing the main entrypoint of the 46 | # package and running it, so there is nothing to use from this 47 | # import. As a result, we can safely ignore this warning. 48 | # cisagov Libraries 49 | import aws_profile_sync.__main__ # noqa: F401 50 | captured = capsys.readouterr() 51 | assert ( 52 | captured.out == f"{PROJECT_VERSION}\n" 53 | ), "standard output by '--version' should agree with module.__version__" 54 | 55 | 56 | @pytest.mark.skipif( 57 | RELEASE_TAG in [None, ""], reason="this is not a release (RELEASE_TAG not set)" 58 | ) 59 | def test_release_version(): 60 | """Verify that release tag version agrees with the module version.""" 61 | assert ( 62 | RELEASE_TAG == f"v{PROJECT_VERSION}" 63 | ), "RELEASE_TAG does not match the project version" 64 | 65 | 66 | @pytest.mark.parametrize("level", log_levels) 67 | def test_log_levels(level): 68 | """Validate commandline log-level arguments.""" 69 | with patch.object( 70 | sys, 71 | "argv", 72 | ["bogus", f"--log-level={level}", "--credentials-file=tests/credentials-test"], 73 | ): 74 | with patch.object(logging.root, "handlers", []): 75 | assert ( 76 | logging.root.hasHandlers() is False 77 | ), "root logger should not have handlers yet" 78 | return_code = None 79 | try: 80 | aws_profile_sync.aws_profile_sync.main() 81 | except SystemExit as sys_exit: 82 | return_code = sys_exit.code 83 | assert ( 84 | logging.root.hasHandlers() is True 85 | ), "root logger should now have a handler" 86 | assert ( 87 | logging.getLevelName(logging.root.getEffectiveLevel()) == level.upper() 88 | ), f"root logger level should be set to {level.upper()}" 89 | assert return_code is None, "main() should return success" 90 | 91 | 92 | def test_bad_log_level(): 93 | """Validate bad log-level argument returns error.""" 94 | with patch.object( 95 | sys, 96 | "argv", 97 | ["bogus", "--log-level=emergency", "--credentials-file=tests/credentials-test"], 98 | ): 99 | return_code = None 100 | try: 101 | aws_profile_sync.aws_profile_sync.main() 102 | except SystemExit as sys_exit: 103 | return_code = sys_exit.code 104 | assert return_code == 1, "main() should return failure" 105 | --------------------------------------------------------------------------------