├── .github ├── dependabot.yml └── workflows │ ├── publish.yml │ └── test.yml ├── .gitignore ├── .readthedocs.yaml ├── LICENSE ├── README.md ├── docs ├── .gitignore ├── Makefile ├── conf.py ├── configuration.md ├── contributing.md ├── create.md ├── help.md ├── index.md ├── other-commands.md ├── policy-documents.md └── requirements.txt ├── s3_credentials ├── __init__.py ├── cli.py └── policies.py ├── setup.py └── tests ├── conftest.py ├── test_dry_run.py ├── test_integration.py └── test_s3_credentials.py /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "pip" 4 | directory: "/" 5 | schedule: 6 | interval: "daily" 7 | -------------------------------------------------------------------------------- /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | name: Publish Python Package 2 | 3 | on: 4 | release: 5 | types: [created] 6 | 7 | permissions: 8 | contents: read 9 | 10 | jobs: 11 | test: 12 | runs-on: ubuntu-latest 13 | strategy: 14 | matrix: 15 | python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"] 16 | steps: 17 | - uses: actions/checkout@v4 18 | - name: Set up Python ${{ matrix.python-version }} 19 | uses: actions/setup-python@v5 20 | with: 21 | python-version: ${{ matrix.python-version }} 22 | cache: pip 23 | cache-dependency-path: setup.py 24 | - name: Install dependencies 25 | run: | 26 | pip install '.[test]' 27 | - name: Run tests 28 | run: | 29 | pytest 30 | deploy: 31 | runs-on: ubuntu-latest 32 | needs: [test] 33 | environment: release 34 | permissions: 35 | id-token: write 36 | steps: 37 | - uses: actions/checkout@v4 38 | - name: Set up Python 39 | uses: actions/setup-python@v5 40 | with: 41 | python-version: "3.12" 42 | cache: pip 43 | cache-dependency-path: setup.py 44 | - name: Install dependencies 45 | run: | 46 | pip install setuptools wheel build 47 | - name: Build 48 | run: | 49 | python -m build 50 | - name: Publish 51 | uses: pypa/gh-action-pypi-publish@release/v1 52 | 53 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: Test 2 | 3 | on: [push, pull_request] 4 | 5 | permissions: 6 | contents: read 7 | 8 | jobs: 9 | test: 10 | runs-on: ubuntu-latest 11 | strategy: 12 | matrix: 13 | python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"] 14 | steps: 15 | - uses: actions/checkout@v4 16 | - name: Set up Python ${{ matrix.python-version }} 17 | uses: actions/setup-python@v5 18 | with: 19 | python-version: ${{ matrix.python-version }} 20 | cache: pip 21 | cache-dependency-path: setup.py 22 | - name: Install dependencies 23 | run: | 24 | pip install '.[test]' 25 | - name: Run tests 26 | run: | 27 | pytest 28 | - name: Check if cog needs to run 29 | run: | 30 | cog --check README.md 31 | cog --check docs/*.md -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .venv 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | venv 6 | .eggs 7 | .pytest_cache 8 | *.egg-info 9 | .DS_Store 10 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | version: 2 2 | 3 | build: 4 | os: ubuntu-22.04 5 | tools: 6 | python: "3.11" 7 | 8 | sphinx: 9 | configuration: docs/conf.py 10 | 11 | formats: 12 | - pdf 13 | - epub 14 | 15 | python: 16 | install: 17 | - requirements: docs/requirements.txt 18 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # s3-credentials 2 | 3 | [![PyPI](https://img.shields.io/pypi/v/s3-credentials.svg)](https://pypi.org/project/s3-credentials/) 4 | [![Changelog](https://img.shields.io/github/v/release/simonw/s3-credentials?include_prereleases&label=changelog)](https://github.com/simonw/s3-credentials/releases) 5 | [![Tests](https://github.com/simonw/s3-credentials/workflows/Test/badge.svg)](https://github.com/simonw/s3-credentials/actions?query=workflow%3ATest) 6 | [![Documentation Status](https://readthedocs.org/projects/s3-credentials/badge/?version=latest)](https://s3-credentials.readthedocs.org/) 7 | [![License](https://img.shields.io/badge/license-Apache%202.0-blue.svg)](https://github.com/simonw/s3-credentials/blob/master/LICENSE) 8 | 9 | A tool for creating credentials for accessing S3 buckets 10 | 11 | For project background, see [s3-credentials: a tool for creating credentials for S3 buckets](https://simonwillison.net/2021/Nov/3/s3-credentials/) on my blog. 12 | 13 | ## Installation 14 | 15 | pip install s3-credentials 16 | 17 | ## Basic usage 18 | 19 | To create a new S3 bucket and output credentials that can be used with only that bucket: 20 | ``` 21 | % s3-credentials create my-new-s3-bucket --create-bucket 22 | Created bucket: my-new-s3-bucket 23 | Created user: s3.read-write.my-new-s3-bucket with permissions boundary: arn:aws:iam::aws:policy/AmazonS3FullAccess 24 | Attached policy s3.read-write.my-new-s3-bucket to user s3.read-write.my-new-s3-bucket 25 | Created access key for user: s3.read-write.my-new-s3-bucket 26 | { 27 | "UserName": "s3.read-write.my-new-s3-bucket", 28 | "AccessKeyId": "AKIAWXFXAIOZOYLZAEW5", 29 | "Status": "Active", 30 | "SecretAccessKey": "...", 31 | "CreateDate": "2021-11-03 01:38:24+00:00" 32 | } 33 | ``` 34 | The tool can do a lot more than this. See the [documentation](https://s3-credentials.readthedocs.io/) for details. 35 | 36 | ## Documentation 37 | 38 | - [Full documentation](https://s3-credentials.readthedocs.io/) 39 | - [Command help reference](https://s3-credentials.readthedocs.io/en/stable/help.html) 40 | - [Release notes](https://github.com/simonw/s3-credentials/releases) 41 | -------------------------------------------------------------------------------- /docs/.gitignore: -------------------------------------------------------------------------------- 1 | _build 2 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | SPHINXPROJ = sqlite-utils 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | 22 | livehtml: 23 | sphinx-autobuild -b html "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(0) 24 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | 4 | from subprocess import PIPE, Popen 5 | 6 | # This file is execfile()d with the current directory set to its 7 | # containing dir. 8 | # 9 | # Note that not all possible configuration values are present in this 10 | # autogenerated file. 11 | # 12 | # All configuration values have a default; values that are commented out 13 | # serve to show the default. 14 | 15 | # If extensions (or modules to document with autodoc) are in another directory, 16 | # add these directories to sys.path here. If the directory is relative to the 17 | # documentation root, use os.path.abspath to make it absolute, like shown here. 18 | # 19 | # import os 20 | # import sys 21 | # sys.path.insert(0, os.path.abspath('.')) 22 | 23 | 24 | # -- General configuration ------------------------------------------------ 25 | 26 | # If your documentation needs a minimal Sphinx version, state it here. 27 | # 28 | # needs_sphinx = '1.0' 29 | 30 | # Add any Sphinx extension module names here, as strings. They can be 31 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 32 | # ones. 33 | extensions = ["myst_parser"] 34 | 35 | # Add any paths that contain templates here, relative to this directory. 36 | templates_path = ["_templates"] 37 | 38 | # The suffix(es) of source filenames. 39 | # You can specify multiple suffix as a list of string: 40 | # 41 | # source_suffix = ['.rst', '.md'] 42 | source_suffix = ".rst" 43 | 44 | # The master toctree document. 45 | master_doc = "index" 46 | 47 | # General information about the project. 48 | project = "s3-credentials" 49 | copyright = "2022, Simon Willison" 50 | author = "Simon Willison" 51 | 52 | # The version info for the project you're documenting, acts as replacement for 53 | # |version| and |release|, also used in various other places throughout the 54 | # built documents. 55 | # 56 | # The short X.Y version. 57 | pipe = Popen("git describe --tags --always", stdout=PIPE, shell=True) 58 | git_version = pipe.stdout.read().decode("utf8") 59 | 60 | if git_version: 61 | version = git_version.rsplit("-", 1)[0] 62 | release = git_version 63 | else: 64 | version = "" 65 | release = "" 66 | 67 | # The language for content autogenerated by Sphinx. Refer to documentation 68 | # for a list of supported languages. 69 | # 70 | # This is also used if you do content translation via gettext catalogs. 71 | # Usually you set "language" from the command line for these cases. 72 | language = "en" 73 | 74 | # List of patterns, relative to source directory, that match files and 75 | # directories to ignore when looking for source files. 76 | # This patterns also effect to html_static_path and html_extra_path 77 | exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] 78 | 79 | # The name of the Pygments (syntax highlighting) style to use. 80 | pygments_style = "sphinx" 81 | 82 | # If true, `todo` and `todoList` produce output, else they produce nothing. 83 | todo_include_todos = False 84 | 85 | 86 | # -- Options for HTML output ---------------------------------------------- 87 | 88 | # The theme to use for HTML and HTML Help pages. See the documentation for 89 | # a list of builtin themes. 90 | # 91 | html_theme = "furo" 92 | 93 | # Theme options are theme-specific and customize the look and feel of a theme 94 | # further. For a list of options available for each theme, see the 95 | # documentation. 96 | 97 | html_theme_options = {} 98 | html_title = "s3-credentials" 99 | 100 | # Add any paths that contain custom static files (such as style sheets) here, 101 | # relative to this directory. They are copied after the builtin static files, 102 | # so a file named "default.css" will overwrite the builtin "default.css". 103 | html_static_path = ["_static"] 104 | 105 | 106 | # -- Options for HTMLHelp output ------------------------------------------ 107 | 108 | # Output file base name for HTML help builder. 109 | htmlhelp_basename = "s3-credentials-doc" 110 | 111 | 112 | # -- Options for LaTeX output --------------------------------------------- 113 | 114 | latex_elements = { 115 | # The paper size ('letterpaper' or 'a4paper'). 116 | # 117 | # 'papersize': 'letterpaper', 118 | # The font size ('10pt', '11pt' or '12pt'). 119 | # 120 | # 'pointsize': '10pt', 121 | # Additional stuff for the LaTeX preamble. 122 | # 123 | # 'preamble': '', 124 | # Latex figure (float) alignment 125 | # 126 | # 'figure_align': 'htbp', 127 | } 128 | 129 | # Grouping the document tree into LaTeX files. List of tuples 130 | # (source start file, target name, title, 131 | # author, documentclass [howto, manual, or own class]). 132 | latex_documents = [ 133 | ( 134 | master_doc, 135 | "s3-credentials.tex", 136 | "s3-credentials documentation", 137 | "Simon Willison", 138 | "manual", 139 | ) 140 | ] 141 | 142 | 143 | # -- Options for manual page output --------------------------------------- 144 | 145 | # One entry per manual page. List of tuples 146 | # (source start file, name, description, authors, manual section). 147 | man_pages = [ 148 | ( 149 | master_doc, 150 | "s3-credentials", 151 | "s3-credentials documentation", 152 | [author], 153 | 1, 154 | ) 155 | ] 156 | 157 | 158 | # -- Options for Texinfo output ------------------------------------------- 159 | 160 | # Grouping the document tree into Texinfo files. List of tuples 161 | # (source start file, target name, title, author, 162 | # dir menu entry, description, category) 163 | texinfo_documents = [ 164 | ( 165 | master_doc, 166 | "s3-credentials", 167 | "s3-credentials documentation", 168 | author, 169 | "s3-credentials", 170 | " A tool for creating credentials for accessing S3 buckets ", 171 | "Miscellaneous", 172 | ) 173 | ] 174 | -------------------------------------------------------------------------------- /docs/configuration.md: -------------------------------------------------------------------------------- 1 | # Configuration 2 | 3 | This tool uses [boto3](https://boto3.amazonaws.com/) under the hood which supports [a number of different ways](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html) of providing your AWS credentials. 4 | 5 | If you have an existing `~/.aws/config` or `~/.aws/credentials` file the tool will use that. 6 | 7 | One way to create those files is using the `aws configure` command, available if you first run `pip install awscli`. 8 | 9 | Alternatively, you can set the `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` environment variables before calling this tool. 10 | 11 | You can also use the `--access-key=`, `--secret-key=`, `--session-token` and `--auth` options documented below. 12 | 13 | ## Common command options 14 | 15 | All of the `s3-credentials` commands also accept the following options for authenticating against AWS: 16 | 17 | - `--access-key`: AWS access key ID 18 | - `--secret-key`: AWS secret access key 19 | - `--session-token`: AWS session token 20 | - `--endpoint-url`: Custom endpoint URL 21 | - `--auth`: file (or `-` for standard input) containing credentials to use 22 | 23 | The file passed to `--auth` can be either a JSON file or an INI file. JSON files should contain the following: 24 | 25 | ```json 26 | { 27 | "AccessKeyId": "AKIAWXFXAIOZA5IR5PY4", 28 | "SecretAccessKey": "g63..." 29 | } 30 | ``` 31 | The JSON file can also optionally include a session token in a `"SessionToken"` key. 32 | 33 | The INI format variant of this file should look like this: 34 | 35 | ```ini 36 | [default] 37 | aws_access_key_id=AKIAWXFXAIOZNCR2ST7S 38 | aws_secret_access_key=g63... 39 | ``` 40 | Any section headers will do - the tool will use the information from the first section it finds in the file which has a `aws_access_key_id` key. 41 | 42 | These auth file formats are the same as those that can be created using the `create` command. 43 | -------------------------------------------------------------------------------- /docs/contributing.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | To contribute to this tool, first checkout [the code](https://github.com/simonw/s3-credentials). Then create a new virtual environment: 4 | 5 | cd s3-credentials 6 | python -m venv venv 7 | source venv/bin/activate 8 | 9 | Or if you are using `pipenv`: 10 | 11 | pipenv shell 12 | 13 | Now install the dependencies and test dependencies: 14 | 15 | pip install -e '.[test]' 16 | 17 | To run the tests: 18 | 19 | pytest 20 | 21 | Any changes to the generated policies require an update to the README using [Cog](https://github.com/nedbat/cog): 22 | 23 | cog -r README.md 24 | 25 | ## Integration tests 26 | 27 | The main tests all use stubbed interfaces to AWS, so will not make any outbound API calls. 28 | 29 | There is also a suite of integration tests in `tests/test_integration.py` which DO make API calls to AWS, using credentials from your environment variables or `~/.aws/credentials` file. 30 | 31 | These tests are skipped by default. If you have AWS configured with an account that has permission to run the actions required by `s3-credentials` (create users, roles, buckets etc) you can run these tests using: 32 | 33 | pytest --integration 34 | 35 | The tests will create a number of different users and buckets and should then delete them once they finish running. 36 | -------------------------------------------------------------------------------- /docs/create.md: -------------------------------------------------------------------------------- 1 | # Creating S3 credentials 2 | 3 | The `s3-credentials create` command is the core feature of this tool. Pass it one or more S3 bucket names, specify a policy (read-write, read-only or write-only) and it will return AWS credentials that can be used to access those buckets. 4 | 5 | These credentials can be **temporary** or **permanent**. 6 | 7 | - Temporary credentials can last for between 15 minutes and 12 hours. They are created using [STS.AssumeRole()](https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html). 8 | - Permanent credentials never expire. They are created by first creating a dedicated AWS user, then assigning a policy to that user and creating and returning an access key for it. 9 | 10 | Make sure to record the `SecretAccessKey` because it will only be displayed once and cannot be recreated later on. 11 | 12 | In this example I create permanent credentials for reading and writing files in my `static.niche-museums.com` S3 bucket: 13 | 14 | ``` 15 | % s3-credentials create static.niche-museums.com 16 | 17 | Created user: s3.read-write.static.niche-museums.com with permissions boundary: arn:aws:iam::aws:policy/AmazonS3FullAccess 18 | Attached policy s3.read-write.static.niche-museums.com to user s3.read-write.static.niche-museums.com 19 | Created access key for user: s3.read-write.static.niche-museums.com 20 | { 21 | "UserName": "s3.read-write.static.niche-museums.com", 22 | "AccessKeyId": "AKIAWXFXAIOZOYLZAEW5", 23 | "Status": "Active", 24 | "SecretAccessKey": "...", 25 | "CreateDate": "2021-11-03 01:38:24+00:00" 26 | } 27 | ``` 28 | If you add `--format ini` the credentials will be output in INI format, suitable for pasting into a `~/.aws/credentials` file: 29 | ``` 30 | % s3-credentials create static.niche-museums.com --format ini > ini.txt 31 | Created user: s3.read-write.static.niche-museums.com with permissions boundary: arn:aws:iam::aws:policy/AmazonS3FullAccess 32 | Attached policy s3.read-write.static.niche-museums.com to user s3.read-write.static.niche-museums.com 33 | Created access key for user: s3.read-write.static.niche-museums.com 34 | % cat ini.txt 35 | [default] 36 | aws_access_key_id=AKIAWXFXAIOZKGXI4PVO 37 | aws_secret_access_key=... 38 | ``` 39 | 40 | To create temporary credentials, add `--duration 15m` (or `1h` or `1200s`). The specified duration must be between 15 minutes and 12 hours. 41 | 42 | ``` 43 | % s3-credentials create static.niche-museums.com --duration 15m 44 | Assume role against arn:aws:iam::462092780466:role/s3-credentials.AmazonS3FullAccess for 900s 45 | { 46 | "AccessKeyId": "ASIAWXFXAIOZPAHAYHUG", 47 | "SecretAccessKey": "Nrnoc...", 48 | "SessionToken": "FwoGZXIvYXd...mr9Fjs=", 49 | "Expiration": "2021-11-11 03:24:07+00:00" 50 | } 51 | ``` 52 | When using temporary credentials the session token must be passed in addition to the access key and secret key. 53 | 54 | The `create` command has a number of options: 55 | 56 | - `--format TEXT`: The output format to use. Defaults to `json`, but can also be `ini`. 57 | - `--duration 15m`: For temporary credentials, how long should they last? This can be specified in seconds, minutes or hours using a suffix of `s`, `m` or `h` - but must be between 15 minutes and 12 hours. 58 | - `--username TEXT`: The username to use for the user that is created by the command (or the username of an existing user if you do not want to create a new one). If ommitted a default such as `s3.read-write.static.niche-museums.com` will be used. 59 | - `-c, --create-bucket`: Create the buckets if they do not exist. Without this any missing buckets will be treated as an error. 60 | - `--prefix my-prefix/`: Credentials should only allow access to keys in the S3 bucket that start with this prefix. 61 | - `--public`: When creating a bucket, set it so that any file uploaded to that bucket can be downloaded by anyone who knows its filename. This attaches the {ref}`public_bucket_policy` and sets the `PublicAccessBlockConfiguration` to `false` for [every option](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PublicAccessBlockConfiguration.html). 62 | - `--website`: Sets the bucket to public and configures it to act as a website, with `index.html` treated as an index page and `error.html` used to display custom errors. The URL for the website will be `http://.s3-website..amazonaws.com/` - the region defaults to `us-east-1` unless you specify a `--bucket-region`. 63 | - `--read-only`: The user should only be allowed to read files from the bucket. 64 | - `--write-only`: The user should only be allowed to write files to the bucket, but not read them. This can be useful for logging and backups. 65 | - `--policy filepath-or-string`: A custom policy document (as a file path, literal JSON string or `-` for standard input) - see below. 66 | - `--statement json-statement`: Custom JSON statement block to be added to the generated policy. 67 | - `--bucket-region`: If creating buckets, the region in which they should be created. 68 | - `--silent`: Don't output details of what is happening, just output the JSON for the created access credentials at the end. 69 | - `--dry-run`: Output details of AWS changes that would have been made without applying them. 70 | - `--user-permissions-boundary`: Custom [permissions boundary](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html) to use for users created by this tool. The default is to restrict those users to only interacting with S3, taking the `--read-only` option into account. Use `none` to create users without any permissions boundary at all. 71 | 72 | ## Changes that will be made to your AWS account 73 | 74 | How the tool works varies depending on if you are creating temporary or permanent credentials. 75 | 76 | For permanent credentials, the steps are as follows: 77 | 78 | 1. Confirm that each of the specified buckets exists. If they do not and `--create-bucket` was passed create them - otherwise exit with an error. 79 | 2. If a username was not specified, derive a username using the `s3.$permission.$buckets` format. 80 | 3. If a user with that username does not exist, create one with an S3 permissions boundary of [AmazonS3ReadOnlyAccess](https://github.com/glassechidna/trackiam/blob/master/policies/AmazonS3ReadOnlyAccess.json) for `--read-only` or [AmazonS3FullAccess](https://github.com/glassechidna/trackiam/blob/master/policies/AmazonS3FullAccess.json) otherwise - unless `--user-permissions-boundary=none` was passed, or a custom permissions boundary string. 81 | 4. For each specified bucket, add an inline IAM policy to the user that gives them permission to either read-only, write-only or read-write against that bucket. 82 | 5. Create a new access key for that user and output the key and its secret to the console. 83 | 84 | For temporary credentials: 85 | 86 | 1. Confirm or create buckets, in the same way as for permanent credentials. 87 | 2. Check if an AWS role called `s3-credentials.AmazonS3FullAccess` exists. If it does not exist create it, configured to allow the user's AWS account to assume it and with the `arn:aws:iam::aws:policy/AmazonS3FullAccess` policy attached. 88 | 3. Use `STS.AssumeRole()` to return temporary credentials that are restricted to just the specified buckets and specified read-only/read-write/write-only policy. 89 | 90 | You can run the `create` command with the `--dry-run` option to see a summary of changes that would be applied, including details of generated policy documents, without actually applying those changes. 91 | 92 | ## Using a custom policy 93 | 94 | The policy documents applied by this tool [are listed here](policy-documents.md). 95 | 96 | If you want to use a custom policy document you can do so using the `--policy` option. 97 | 98 | First, create your policy document as a JSON file that looks something like this: 99 | 100 | ```json 101 | { 102 | "Version": "2012-10-17", 103 | "Statement": [ 104 | { 105 | "Effect": "Allow", 106 | "Action": ["s3:GetObject*", "s3:ListBucket"], 107 | "Resource": [ 108 | "arn:aws:s3:::$!BUCKET_NAME!$", 109 | "arn:aws:s3:::$!BUCKET_NAME!$/*" 110 | ], 111 | } 112 | ] 113 | } 114 | ``` 115 | Note the `$!BUCKET_NAME!$` strings - these will be replaced with the name of the relevant S3 bucket before the policy is applied. 116 | 117 | Save that as `custom-policy.json` and apply it using the following command: 118 | 119 | % s3-credentials create my-s3-bucket \ 120 | --policy custom-policy.json 121 | 122 | You can also pass `-` to read from standard input, or you can pass the literal JSON string directly to the `--policy` option: 123 | ``` 124 | % s3-credentials create my-s3-bucket --policy '{ 125 | "Version": "2012-10-17", 126 | "Statement": [ 127 | { 128 | "Effect": "Allow", 129 | "Action": ["s3:GetObject*", "s3:ListBucket"], 130 | "Resource": [ 131 | "arn:aws:s3:::$!BUCKET_NAME!$", 132 | "arn:aws:s3:::$!BUCKET_NAME!$/*" 133 | ], 134 | } 135 | ] 136 | }' 137 | ``` 138 | You can also specify one or more extra statement blocks that should be added to the generated policy, using `--statement JSON`. This example enables the AWS `textract:` APIs for the generated credentials, useful for using with the [s3-ocr](https://datasette.io/tools/s3-ocr) tool: 139 | ``` 140 | % s3-credentials create my-s3-bucket --statement '{ 141 | "Effect": "Allow", 142 | "Action": "textract:*", 143 | "Resource": "*" 144 | }' 145 | ``` 146 | -------------------------------------------------------------------------------- /docs/help.md: -------------------------------------------------------------------------------- 1 | # Command help 2 | 3 | This page shows the `--help` output for all of the `s3-credentials` commands. 4 | 5 | 22 | ## s3-credentials --help 23 | 24 | ``` 25 | Usage: s3-credentials [OPTIONS] COMMAND [ARGS]... 26 | 27 | A tool for creating credentials for accessing S3 buckets 28 | 29 | Documentation: https://s3-credentials.readthedocs.io/ 30 | 31 | Options: 32 | --version Show the version and exit. 33 | --help Show this message and exit. 34 | 35 | Commands: 36 | create Create and return new AWS credentials for... 37 | debug-bucket Run a bunch of diagnostics to help debug a bucket 38 | delete-objects Delete one or more object from an S3 bucket 39 | delete-user Delete specified users, their access keys and... 40 | get-bucket-policy Get bucket policy for a bucket 41 | get-cors-policy Get CORS policy for a bucket 42 | get-object Download an object from an S3 bucket 43 | get-objects Download multiple objects from an S3 bucket 44 | get-public-access-block Get the public access settings for an S3 bucket 45 | list-bucket List contents of bucket 46 | list-buckets List buckets 47 | list-roles List roles 48 | list-user-policies List inline policies for specified users 49 | list-users List all users for this account 50 | policy Output generated JSON policy for one or more... 51 | put-object Upload an object to an S3 bucket 52 | put-objects Upload multiple objects to an S3 bucket 53 | set-bucket-policy Set bucket policy for a bucket 54 | set-cors-policy Set CORS policy for a bucket 55 | set-public-access-block Configure public access settings for an S3 bucket. 56 | whoami Identify currently authenticated user 57 | ``` 58 | ## s3-credentials create --help 59 | 60 | ``` 61 | Usage: s3-credentials create [OPTIONS] BUCKETS... 62 | 63 | Create and return new AWS credentials for specified S3 buckets - optionally 64 | also creating the bucket if it does not yet exist. 65 | 66 | To create a new bucket and output read-write credentials: 67 | 68 | s3-credentials create my-new-bucket -c 69 | 70 | To create read-only credentials for an existing bucket: 71 | 72 | s3-credentials create my-existing-bucket --read-only 73 | 74 | To create write-only credentials that are only valid for 15 minutes: 75 | 76 | s3-credentials create my-existing-bucket --write-only -d 15m 77 | 78 | Options: 79 | -f, --format [ini|json] Output format for credentials 80 | -d, --duration DURATION How long should these credentials work for? 81 | Default is forever, use 3600 for 3600 seconds, 82 | 15m for 15 minutes, 1h for 1 hour 83 | --username TEXT Username to create or existing user to use 84 | -c, --create-bucket Create buckets if they do not already exist 85 | --prefix TEXT Restrict to keys starting with this prefix 86 | --public Make the created bucket public: anyone will be 87 | able to download files if they know their name 88 | --website Configure bucket to act as a website, using 89 | index.html and error.html 90 | --read-only Only allow reading from the bucket 91 | --write-only Only allow writing to the bucket 92 | --policy POLICY Path to a policy.json file, or literal JSON 93 | string - $!BUCKET_NAME!$ will be replaced with 94 | the name of the bucket 95 | --statement STATEMENT JSON statement to add to the policy 96 | --bucket-region TEXT Region in which to create buckets 97 | --silent Don't show performed steps 98 | --dry-run Show steps without executing them 99 | --user-permissions-boundary TEXT 100 | Custom permissions boundary to use for created 101 | users, or 'none' to create without. Defaults 102 | to limiting to S3 based on --read-only and 103 | --write-only options. 104 | --access-key TEXT AWS access key ID 105 | --secret-key TEXT AWS secret access key 106 | --session-token TEXT AWS session token 107 | --endpoint-url TEXT Custom endpoint URL 108 | -a, --auth FILENAME Path to JSON/INI file containing credentials 109 | --help Show this message and exit. 110 | ``` 111 | ## s3-credentials debug-bucket --help 112 | 113 | ``` 114 | Usage: s3-credentials debug-bucket [OPTIONS] BUCKET 115 | 116 | Run a bunch of diagnostics to help debug a bucket 117 | 118 | s3-credentials debug-bucket my-bucket 119 | 120 | Options: 121 | --access-key TEXT AWS access key ID 122 | --secret-key TEXT AWS secret access key 123 | --session-token TEXT AWS session token 124 | --endpoint-url TEXT Custom endpoint URL 125 | -a, --auth FILENAME Path to JSON/INI file containing credentials 126 | --help Show this message and exit. 127 | ``` 128 | ## s3-credentials delete-objects --help 129 | 130 | ``` 131 | Usage: s3-credentials delete-objects [OPTIONS] BUCKET [KEYS]... 132 | 133 | Delete one or more object from an S3 bucket 134 | 135 | Pass one or more keys to delete them: 136 | 137 | s3-credentials delete-objects my-bucket one.txt two.txt 138 | 139 | To delete all files matching a prefix, pass --prefix: 140 | 141 | s3-credentials delete-objects my-bucket --prefix my-folder/ 142 | 143 | Options: 144 | --prefix TEXT Delete everything with this prefix 145 | -s, --silent Don't show informational output 146 | -d, --dry-run Show keys that would be deleted without deleting them 147 | --access-key TEXT AWS access key ID 148 | --secret-key TEXT AWS secret access key 149 | --session-token TEXT AWS session token 150 | --endpoint-url TEXT Custom endpoint URL 151 | -a, --auth FILENAME Path to JSON/INI file containing credentials 152 | --help Show this message and exit. 153 | ``` 154 | ## s3-credentials delete-user --help 155 | 156 | ``` 157 | Usage: s3-credentials delete-user [OPTIONS] USERNAMES... 158 | 159 | Delete specified users, their access keys and their inline policies 160 | 161 | s3-credentials delete-user username1 username2 162 | 163 | Options: 164 | --access-key TEXT AWS access key ID 165 | --secret-key TEXT AWS secret access key 166 | --session-token TEXT AWS session token 167 | --endpoint-url TEXT Custom endpoint URL 168 | -a, --auth FILENAME Path to JSON/INI file containing credentials 169 | --help Show this message and exit. 170 | ``` 171 | ## s3-credentials get-bucket-policy --help 172 | 173 | ``` 174 | Usage: s3-credentials get-bucket-policy [OPTIONS] BUCKET 175 | 176 | Get bucket policy for a bucket 177 | 178 | s3-credentials get-bucket-policy my-bucket 179 | 180 | Returns the bucket policy for this bucket, if set, as JSON 181 | 182 | Options: 183 | --access-key TEXT AWS access key ID 184 | --secret-key TEXT AWS secret access key 185 | --session-token TEXT AWS session token 186 | --endpoint-url TEXT Custom endpoint URL 187 | -a, --auth FILENAME Path to JSON/INI file containing credentials 188 | --help Show this message and exit. 189 | ``` 190 | ## s3-credentials get-cors-policy --help 191 | 192 | ``` 193 | Usage: s3-credentials get-cors-policy [OPTIONS] BUCKET 194 | 195 | Get CORS policy for a bucket 196 | 197 | s3-credentials get-cors-policy my-bucket 198 | 199 | Returns the CORS policy for this bucket, if set, as JSON 200 | 201 | Options: 202 | --access-key TEXT AWS access key ID 203 | --secret-key TEXT AWS secret access key 204 | --session-token TEXT AWS session token 205 | --endpoint-url TEXT Custom endpoint URL 206 | -a, --auth FILENAME Path to JSON/INI file containing credentials 207 | --help Show this message and exit. 208 | ``` 209 | ## s3-credentials get-object --help 210 | 211 | ``` 212 | Usage: s3-credentials get-object [OPTIONS] BUCKET KEY 213 | 214 | Download an object from an S3 bucket 215 | 216 | To see the contents of the bucket on standard output: 217 | 218 | s3-credentials get-object my-bucket hello.txt 219 | 220 | To save to a file: 221 | 222 | s3-credentials get-object my-bucket hello.txt -o hello.txt 223 | 224 | Options: 225 | -o, --output FILE Write to this file instead of stdout 226 | --access-key TEXT AWS access key ID 227 | --secret-key TEXT AWS secret access key 228 | --session-token TEXT AWS session token 229 | --endpoint-url TEXT Custom endpoint URL 230 | -a, --auth FILENAME Path to JSON/INI file containing credentials 231 | --help Show this message and exit. 232 | ``` 233 | ## s3-credentials get-objects --help 234 | 235 | ``` 236 | Usage: s3-credentials get-objects [OPTIONS] BUCKET [KEYS]... 237 | 238 | Download multiple objects from an S3 bucket 239 | 240 | To download everything, run: 241 | 242 | s3-credentials get-objects my-bucket 243 | 244 | Files will be saved to a directory called my-bucket. Use -o dirname to save to 245 | a different directory. 246 | 247 | To download specific keys, list them: 248 | 249 | s3-credentials get-objects my-bucket one.txt path/two.txt 250 | 251 | To download files matching a glob-style pattern, use: 252 | 253 | s3-credentials get-objects my-bucket --pattern '*/*.js' 254 | 255 | Options: 256 | -o, --output DIRECTORY Write to this directory instead of one matching the 257 | bucket name 258 | -p, --pattern TEXT Glob patterns for files to download, e.g. '*/*.js' 259 | -s, --silent Don't show progress bar 260 | --access-key TEXT AWS access key ID 261 | --secret-key TEXT AWS secret access key 262 | --session-token TEXT AWS session token 263 | --endpoint-url TEXT Custom endpoint URL 264 | -a, --auth FILENAME Path to JSON/INI file containing credentials 265 | --help Show this message and exit. 266 | ``` 267 | ## s3-credentials get-public-access-block --help 268 | 269 | ``` 270 | Usage: s3-credentials get-public-access-block [OPTIONS] BUCKET 271 | 272 | Get the public access settings for an S3 bucket 273 | 274 | Example usage: 275 | 276 | s3-credentials get-public-access-block my-bucket 277 | 278 | Options: 279 | --access-key TEXT AWS access key ID 280 | --secret-key TEXT AWS secret access key 281 | --session-token TEXT AWS session token 282 | --endpoint-url TEXT Custom endpoint URL 283 | -a, --auth FILENAME Path to JSON/INI file containing credentials 284 | --help Show this message and exit. 285 | ``` 286 | ## s3-credentials list-bucket --help 287 | 288 | ``` 289 | Usage: s3-credentials list-bucket [OPTIONS] BUCKET 290 | 291 | List contents of bucket 292 | 293 | To list the contents of a bucket as JSON: 294 | 295 | s3-credentials list-bucket my-bucket 296 | 297 | Add --csv or --csv for CSV or TSV format: 298 | 299 | s3-credentials list-bucket my-bucket --csv 300 | 301 | Add --urls to get an extra URL field for each key: 302 | 303 | s3-credentials list-bucket my-bucket --urls 304 | 305 | Options: 306 | --prefix TEXT List keys starting with this prefix 307 | --urls Show URLs for each key 308 | --nl Output newline-delimited JSON 309 | --csv Output CSV 310 | --tsv Output TSV 311 | --access-key TEXT AWS access key ID 312 | --secret-key TEXT AWS secret access key 313 | --session-token TEXT AWS session token 314 | --endpoint-url TEXT Custom endpoint URL 315 | -a, --auth FILENAME Path to JSON/INI file containing credentials 316 | --help Show this message and exit. 317 | ``` 318 | ## s3-credentials list-buckets --help 319 | 320 | ``` 321 | Usage: s3-credentials list-buckets [OPTIONS] [BUCKETS]... 322 | 323 | List buckets 324 | 325 | To list all buckets and their creation time as JSON: 326 | 327 | s3-credentials list-buckets 328 | 329 | Add --csv or --csv for CSV or TSV format: 330 | 331 | s3-credentials list-buckets --csv 332 | 333 | For extra details per bucket (much slower) add --details 334 | 335 | s3-credentials list-buckets --details 336 | 337 | Options: 338 | --details Include extra bucket details (slower) 339 | --nl Output newline-delimited JSON 340 | --csv Output CSV 341 | --tsv Output TSV 342 | --access-key TEXT AWS access key ID 343 | --secret-key TEXT AWS secret access key 344 | --session-token TEXT AWS session token 345 | --endpoint-url TEXT Custom endpoint URL 346 | -a, --auth FILENAME Path to JSON/INI file containing credentials 347 | --help Show this message and exit. 348 | ``` 349 | ## s3-credentials list-roles --help 350 | 351 | ``` 352 | Usage: s3-credentials list-roles [OPTIONS] [ROLE_NAMES]... 353 | 354 | List roles 355 | 356 | To list all roles for this AWS account: 357 | 358 | s3-credentials list-roles 359 | 360 | Add --csv or --csv for CSV or TSV format: 361 | 362 | s3-credentials list-roles --csv 363 | 364 | For extra details per role (much slower) add --details 365 | 366 | s3-credentials list-roles --details 367 | 368 | Options: 369 | --details Include attached policies (slower) 370 | --nl Output newline-delimited JSON 371 | --csv Output CSV 372 | --tsv Output TSV 373 | --access-key TEXT AWS access key ID 374 | --secret-key TEXT AWS secret access key 375 | --session-token TEXT AWS session token 376 | --endpoint-url TEXT Custom endpoint URL 377 | -a, --auth FILENAME Path to JSON/INI file containing credentials 378 | --help Show this message and exit. 379 | ``` 380 | ## s3-credentials list-user-policies --help 381 | 382 | ``` 383 | Usage: s3-credentials list-user-policies [OPTIONS] [USERNAMES]... 384 | 385 | List inline policies for specified users 386 | 387 | s3-credentials list-user-policies username 388 | 389 | Returns policies for all users if no usernames are provided. 390 | 391 | Options: 392 | --access-key TEXT AWS access key ID 393 | --secret-key TEXT AWS secret access key 394 | --session-token TEXT AWS session token 395 | --endpoint-url TEXT Custom endpoint URL 396 | -a, --auth FILENAME Path to JSON/INI file containing credentials 397 | --help Show this message and exit. 398 | ``` 399 | ## s3-credentials list-users --help 400 | 401 | ``` 402 | Usage: s3-credentials list-users [OPTIONS] 403 | 404 | List all users for this account 405 | 406 | s3-credentials list-users 407 | 408 | Add --csv or --csv for CSV or TSV format: 409 | 410 | s3-credentials list-users --csv 411 | 412 | Options: 413 | --nl Output newline-delimited JSON 414 | --csv Output CSV 415 | --tsv Output TSV 416 | --access-key TEXT AWS access key ID 417 | --secret-key TEXT AWS secret access key 418 | --session-token TEXT AWS session token 419 | --endpoint-url TEXT Custom endpoint URL 420 | -a, --auth FILENAME Path to JSON/INI file containing credentials 421 | --help Show this message and exit. 422 | ``` 423 | ## s3-credentials policy --help 424 | 425 | ``` 426 | Usage: s3-credentials policy [OPTIONS] BUCKETS... 427 | 428 | Output generated JSON policy for one or more buckets 429 | 430 | Takes the same options as s3-credentials create 431 | 432 | To output a read-only JSON policy for a bucket: 433 | 434 | s3-credentials policy my-bucket --read-only 435 | 436 | Options: 437 | --read-only Only allow reading from the bucket 438 | --write-only Only allow writing to the bucket 439 | --prefix TEXT Restrict to keys starting with this prefix 440 | --statement STATEMENT JSON statement to add to the policy 441 | --public-bucket Bucket policy for allowing public access 442 | --help Show this message and exit. 443 | ``` 444 | ## s3-credentials put-object --help 445 | 446 | ``` 447 | Usage: s3-credentials put-object [OPTIONS] BUCKET KEY PATH 448 | 449 | Upload an object to an S3 bucket 450 | 451 | To upload a file to /my-key.txt in the my-bucket bucket: 452 | 453 | s3-credentials put-object my-bucket my-key.txt /path/to/file.txt 454 | 455 | Use - to upload content from standard input: 456 | 457 | echo "Hello" | s3-credentials put-object my-bucket hello.txt - 458 | 459 | Options: 460 | --content-type TEXT Content-Type to use (default is auto-detected based on 461 | file extension) 462 | -s, --silent Don't show progress bar 463 | --access-key TEXT AWS access key ID 464 | --secret-key TEXT AWS secret access key 465 | --session-token TEXT AWS session token 466 | --endpoint-url TEXT Custom endpoint URL 467 | -a, --auth FILENAME Path to JSON/INI file containing credentials 468 | --help Show this message and exit. 469 | ``` 470 | ## s3-credentials put-objects --help 471 | 472 | ``` 473 | Usage: s3-credentials put-objects [OPTIONS] BUCKET OBJECTS... 474 | 475 | Upload multiple objects to an S3 bucket 476 | 477 | Pass one or more files to upload them: 478 | 479 | s3-credentials put-objects my-bucket one.txt two.txt 480 | 481 | These will be saved to the root of the bucket. To save to a different location 482 | use the --prefix option: 483 | 484 | s3-credentials put-objects my-bucket one.txt two.txt --prefix my-folder 485 | 486 | This will upload them my-folder/one.txt and my-folder/two.txt. 487 | 488 | If you pass a directory it will be uploaded recursively: 489 | 490 | s3-credentials put-objects my-bucket my-folder 491 | 492 | This will create keys in my-folder/... in the S3 bucket. 493 | 494 | To upload all files in a folder to the root of the bucket instead use this: 495 | 496 | s3-credentials put-objects my-bucket my-folder/* 497 | 498 | Options: 499 | --prefix TEXT Prefix to add to the files within the bucket 500 | -s, --silent Don't show progress bar 501 | --dry-run Show steps without executing them 502 | --access-key TEXT AWS access key ID 503 | --secret-key TEXT AWS secret access key 504 | --session-token TEXT AWS session token 505 | --endpoint-url TEXT Custom endpoint URL 506 | -a, --auth FILENAME Path to JSON/INI file containing credentials 507 | --help Show this message and exit. 508 | ``` 509 | ## s3-credentials set-bucket-policy --help 510 | 511 | ``` 512 | Usage: s3-credentials set-bucket-policy [OPTIONS] BUCKET 513 | 514 | Set bucket policy for a bucket 515 | 516 | s3-credentials set-bucket-policy my-bucket --policy-file policy.json 517 | 518 | Or to set a policy that allows GET requests from all: 519 | 520 | s3-credentials set-bucket-policy my-bucket --allow-all-get 521 | 522 | Options: 523 | --policy-file FILENAME 524 | --allow-all-get Allow GET requests from all 525 | --access-key TEXT AWS access key ID 526 | --secret-key TEXT AWS secret access key 527 | --session-token TEXT AWS session token 528 | --endpoint-url TEXT Custom endpoint URL 529 | -a, --auth FILENAME Path to JSON/INI file containing credentials 530 | --help Show this message and exit. 531 | ``` 532 | ## s3-credentials set-cors-policy --help 533 | 534 | ``` 535 | Usage: s3-credentials set-cors-policy [OPTIONS] BUCKET 536 | 537 | Set CORS policy for a bucket 538 | 539 | To allow GET requests from any origin: 540 | 541 | s3-credentials set-cors-policy my-bucket 542 | 543 | To allow GET and PUT from a specific origin and expose ETag headers: 544 | 545 | s3-credentials set-cors-policy my-bucket \ 546 | --allowed-method GET \ 547 | --allowed-method PUT \ 548 | --allowed-origin https://www.example.com/ \ 549 | --expose-header ETag 550 | 551 | Options: 552 | -m, --allowed-method TEXT Allowed method e.g. GET 553 | -h, --allowed-header TEXT Allowed header e.g. Authorization 554 | -o, --allowed-origin TEXT Allowed origin e.g. https://www.example.com/ 555 | -e, --expose-header TEXT Header to expose e.g. ETag 556 | --max-age-seconds INTEGER How long to cache preflight requests 557 | --access-key TEXT AWS access key ID 558 | --secret-key TEXT AWS secret access key 559 | --session-token TEXT AWS session token 560 | --endpoint-url TEXT Custom endpoint URL 561 | -a, --auth FILENAME Path to JSON/INI file containing credentials 562 | --help Show this message and exit. 563 | ``` 564 | ## s3-credentials set-public-access-block --help 565 | 566 | ``` 567 | Usage: s3-credentials set-public-access-block [OPTIONS] BUCKET 568 | 569 | Configure public access settings for an S3 bucket. 570 | 571 | Example: 572 | 573 | s3-credentials set-public-access-block my-bucket --block-public-acls false 574 | 575 | To allow full public access to the bucket, use the --allow-public-access flag: 576 | 577 | s3-credentials set-public-access-block my-bucket --allow-public-access 578 | 579 | Options: 580 | --block-public-acls BOOLEAN Block public ACLs for the bucket (true/false). 581 | --ignore-public-acls BOOLEAN Ignore public ACLs for the bucket 582 | (true/false). 583 | --block-public-policy BOOLEAN Block public bucket policies (true/false). 584 | --restrict-public-buckets BOOLEAN 585 | Restrict public buckets (true/false). 586 | --allow-public-access Set all public access settings to false 587 | (allows full public access). 588 | --access-key TEXT AWS access key ID 589 | --secret-key TEXT AWS secret access key 590 | --session-token TEXT AWS session token 591 | --endpoint-url TEXT Custom endpoint URL 592 | -a, --auth FILENAME Path to JSON/INI file containing credentials 593 | --help Show this message and exit. 594 | ``` 595 | ## s3-credentials whoami --help 596 | 597 | ``` 598 | Usage: s3-credentials whoami [OPTIONS] 599 | 600 | Identify currently authenticated user 601 | 602 | Options: 603 | --access-key TEXT AWS access key ID 604 | --secret-key TEXT AWS secret access key 605 | --session-token TEXT AWS session token 606 | --endpoint-url TEXT Custom endpoint URL 607 | -a, --auth FILENAME Path to JSON/INI file containing credentials 608 | --help Show this message and exit. 609 | ``` 610 | 611 | -------------------------------------------------------------------------------- /docs/index.md: -------------------------------------------------------------------------------- 1 | # s3-credentials 2 | 3 | [![PyPI](https://img.shields.io/pypi/v/s3-credentials.svg)](https://pypi.org/project/s3-credentials/) 4 | [![Changelog](https://img.shields.io/github/v/release/simonw/s3-credentials?include_prereleases&label=changelog)](https://github.com/simonw/s3-credentials/releases) 5 | [![Tests](https://github.com/simonw/s3-credentials/workflows/Test/badge.svg)](https://github.com/simonw/s3-credentials/actions?query=workflow%3ATest) 6 | [![Documentation Status](https://readthedocs.org/projects/s3-credentials/badge/?version=latest)](https://s3-credentials.readthedocs.org/) 7 | [![License](https://img.shields.io/badge/license-Apache%202.0-blue.svg)](https://github.com/simonw/s3-credentials/blob/master/LICENSE) 8 | 9 | A tool for creating credentials for accessing S3 buckets 10 | 11 | For project background, see [s3-credentials: a tool for creating credentials for S3 buckets](https://simonwillison.net/2021/Nov/3/s3-credentials/) on my blog. 12 | 13 | Why would you need this? If you want to read and write to an S3 bucket from an automated script somewhere, you'll need an access key and secret key to authenticate your calls. This tool helps you create those with the most restrictive permissions possible. 14 | 15 | If your code is running in EC2 or Lambda you can likely solve this [using roles instead](https://aws.amazon.com/premiumsupport/knowledge-center/lambda-execution-role-s3-bucket/). This tool is mainly useful for when you are interacting with S3 from outside the boundaries of AWS itself. 16 | 17 | ## Installation 18 | 19 | Install this tool using `pip`: 20 | 21 | $ pip install s3-credentials 22 | 23 | ## Documentation 24 | 25 | ```{toctree} 26 | --- 27 | maxdepth: 3 28 | --- 29 | configuration 30 | create 31 | other-commands 32 | policy-documents 33 | help 34 | contributing 35 | ``` 36 | 37 | ## Tips 38 | 39 | You can see a log of changes made by this tool using AWS CloudTrail - the following link should provide an Event History interface showing revelant changes made to your AWS account such as `CreateAccessKey`, `CreateUser`, `PutUserPolicy` and more: 40 | 41 | 42 | 43 | You can view a list of your S3 buckets and confirm that they have the desired permissions and properties here: 44 | 45 | 46 | 47 | The management interface for an individual bucket is at `https://console.aws.amazon.com/s3/buckets/NAME-OF-BUCKET` 48 | -------------------------------------------------------------------------------- /docs/other-commands.md: -------------------------------------------------------------------------------- 1 | # Other commands 2 | 3 | ```{contents} 4 | --- 5 | local: 6 | class: this-will-duplicate-information-and-it-is-still-useful-here 7 | --- 8 | ``` 9 | 10 | ## policy 11 | 12 | You can use the `s3-credentials policy` command to generate the JSON policy document that would be used without applying it. The command takes one or more required bucket names and a subset of the options available on the `create` command: 13 | 14 | - `--read-only` - generate a read-only policy 15 | - `--write-only` - generate a write-only policy 16 | - `--prefix` - policy should be restricted to keys in the bucket that start with this prefix 17 | - `--statement json-statement`: Custom JSON statement block 18 | - `--public-bucket` - generate a bucket policy for a public bucket 19 | 20 | With none of these options it defaults to a read-write policy. 21 | ```bash 22 | s3-credentials policy my-bucket --read-only 23 | ``` 24 | ```json 25 | { 26 | "Version": "2012-10-17", 27 | ... 28 | ``` 29 | 30 | ## whoami 31 | 32 | To see which user you are authenticated as: 33 | ```bash 34 | s3-credentials whoami 35 | ``` 36 | This will output JSON representing the currently authenticated user. 37 | 38 | Using this with the `--auth` option is useful for verifying created credentials: 39 | ```bash 40 | s3-credentials create static.niche-museums.com --read-only > auth.json 41 | ``` 42 | ```bash 43 | s3-credentials whoami --auth auth.json 44 | ``` 45 | ```json 46 | { 47 | "UserId": "AIDAWXFXAIOZPIZC6MHAG", 48 | "Account": "462092780466", 49 | "Arn": "arn:aws:iam::462092780466:user/s3.read-only.static.niche-museums.com" 50 | } 51 | ``` 52 | ## list-users 53 | 54 | To see a list of all users that exist for your AWS account: 55 | ```bash 56 | s3-credentials list-users 57 | ``` 58 | This will return a pretty-printed array of JSON objects by default. 59 | 60 | Add `--nl` to collapse these to single lines as valid newline-delimited JSON. 61 | 62 | Add `--csv` or `--tsv` to get back CSV or TSV data. 63 | 64 | ## list-buckets 65 | 66 | Shows a list of all buckets in your AWS account. 67 | 68 | ```bash 69 | s3-credentials list-buckets 70 | ``` 71 | ```json 72 | [ 73 | { 74 | "Name": "aws-cloudtrail-logs-462092780466-f2c900d3", 75 | "CreationDate": "2021-03-25 22:19:54+00:00" 76 | }, 77 | { 78 | "Name": "simonw-test-bucket-for-s3-credentials", 79 | "CreationDate": "2021-11-03 21:46:12+00:00" 80 | } 81 | ] 82 | ``` 83 | With no extra arguments this will show all available buckets - you can also add one or more explicit bucket names to see just those buckets: 84 | 85 | ```bash 86 | s3-credentials list-buckets simonw-test-bucket-for-s3-credentials 87 | ``` 88 | ```json 89 | [ 90 | { 91 | "Name": "simonw-test-bucket-for-s3-credentials", 92 | "CreationDate": "2021-11-03 21:46:12+00:00" 93 | } 94 | ] 95 | ``` 96 | This accepts the same `--nl`, `--csv` and `--tsv` options as `list-users`. 97 | 98 | Add `--details` to include details of the bucket ACL, website configuration and public access block settings. This is useful for running a security audit of your buckets. 99 | 100 | Using `--details` adds several additional API calls for each bucket, so it is advisable to use it with one or more explicit bucket names. 101 | ```bash 102 | s3-credentials list-buckets simonw-test-public-website-bucket --details 103 | ``` 104 | ```json 105 | [ 106 | { 107 | "Name": "simonw-test-public-website-bucket", 108 | "CreationDate": "2021-11-08 22:53:30+00:00", 109 | "region": "us-east-1", 110 | "bucket_acl": { 111 | "Owner": { 112 | "DisplayName": "simon", 113 | "ID": "abcdeabcdeabcdeabcdeabcdeabcde0001" 114 | }, 115 | "Grants": [ 116 | { 117 | "Grantee": { 118 | "DisplayName": "simon", 119 | "ID": "abcdeabcdeabcdeabcdeabcdeabcde0001", 120 | "Type": "CanonicalUser" 121 | }, 122 | "Permission": "FULL_CONTROL" 123 | } 124 | ] 125 | }, 126 | "public_access_block": null, 127 | "bucket_website": { 128 | "IndexDocument": { 129 | "Suffix": "index.html" 130 | }, 131 | "ErrorDocument": { 132 | "Key": "error.html" 133 | }, 134 | "url": "http://simonw-test-public-website-bucket.s3-website.us-east-1.amazonaws.com/" 135 | } 136 | } 137 | ] 138 | ``` 139 | A bucket with `public_access_block` might look like this: 140 | ```json 141 | { 142 | "Name": "aws-cloudtrail-logs-462092780466-f2c900d3", 143 | "CreationDate": "2021-03-25 22:19:54+00:00", 144 | "bucket_acl": { 145 | "Owner": { 146 | "DisplayName": "simon", 147 | "ID": "abcdeabcdeabcdeabcdeabcdeabcde0001" 148 | }, 149 | "Grants": [ 150 | { 151 | "Grantee": { 152 | "DisplayName": "simon", 153 | "ID": "abcdeabcdeabcdeabcdeabcdeabcde0001", 154 | "Type": "CanonicalUser" 155 | }, 156 | "Permission": "FULL_CONTROL" 157 | } 158 | ] 159 | }, 160 | "public_access_block": { 161 | "BlockPublicAcls": true, 162 | "IgnorePublicAcls": true, 163 | "BlockPublicPolicy": true, 164 | "RestrictPublicBuckets": true 165 | }, 166 | "bucket_website": null 167 | } 168 | ``` 169 | 170 | ## list-bucket 171 | 172 | To list the contents of a bucket, use `list-bucket`: 173 | 174 | ```bash 175 | s3-credentials list-bucket static.niche-museums.com 176 | ``` 177 | ```json 178 | [ 179 | { 180 | "Key": "Griffith-Observatory.jpg", 181 | "LastModified": "2020-01-05 16:51:01+00:00", 182 | "ETag": "\"a4cff17d189e7eb0c4d3bf0257e56885\"", 183 | "Size": 3360040, 184 | "StorageClass": "STANDARD" 185 | }, 186 | { 187 | "Key": "IMG_0353.jpeg", 188 | "LastModified": "2019-10-25 02:50:49+00:00", 189 | "ETag": "\"d45bab0b65c0e4b03b2ac0359c7267e3\"", 190 | "Size": 2581023, 191 | "StorageClass": "STANDARD" 192 | } 193 | ] 194 | ``` 195 | You can use the `--prefix myprefix/` option to list only keys that start with a specific prefix. 196 | 197 | The commmand accepts the same `--nl`, `--csv` and `--tsv` options as `list-users`. 198 | 199 | Add `--urls` to include a `URL` field in the output providing the full URL to each object. 200 | 201 | ## list-user-policies 202 | 203 | To see a list of inline policies belonging to users: 204 | 205 | ```bash 206 | s3-credentials list-user-policies s3.read-write.static.niche-museums.com 207 | ``` 208 | ``` 209 | User: s3.read-write.static.niche-museums.com 210 | PolicyName: s3.read-write.static.niche-museums.com 211 | { 212 | "Version": "2012-10-17", 213 | "Statement": [ 214 | { 215 | "Effect": "Allow", 216 | "Action": [ 217 | "s3:ListBucket" 218 | ], 219 | "Resource": [ 220 | "arn:aws:s3:::static.niche-museums.com" 221 | ] 222 | }, 223 | { 224 | "Effect": "Allow", 225 | "Action": "s3:*Object", 226 | "Resource": [ 227 | "arn:aws:s3:::static.niche-museums.com/*" 228 | ] 229 | } 230 | ] 231 | } 232 | ``` 233 | You can pass any number of usernames here. If you don't specify a username the tool will loop through every user belonging to your account: 234 | ```bash 235 | s3-credentials list-user-policies 236 | ``` 237 | ## list-roles 238 | 239 | The `list-roles` command lists all of the roles available for the authenticated account. 240 | 241 | Add `--details` to fetch the inline and attached managed policies for each row as well - this is slower as it needs to make several additional API calls for each role. 242 | 243 | You can optionally add one or more role names to the command to display and fetch details about just those specific roles. 244 | 245 | Example usage: 246 | 247 | ```bash 248 | s3-credentials list-roles AWSServiceRoleForLightsail --details 249 | ``` 250 | ```json 251 | [ 252 | { 253 | "Path": "/aws-service-role/lightsail.amazonaws.com/", 254 | "RoleName": "AWSServiceRoleForLightsail", 255 | "RoleId": "AROAWXFXAIOZG5ACQ5NZ5", 256 | "Arn": "arn:aws:iam::462092780466:role/aws-service-role/lightsail.amazonaws.com/AWSServiceRoleForLightsail", 257 | "CreateDate": "2021-01-15 21:41:48+00:00", 258 | "AssumeRolePolicyDocument": { 259 | "Version": "2012-10-17", 260 | "Statement": [ 261 | { 262 | "Effect": "Allow", 263 | "Principal": { 264 | "Service": "lightsail.amazonaws.com" 265 | }, 266 | "Action": "sts:AssumeRole" 267 | } 268 | ] 269 | }, 270 | "MaxSessionDuration": 3600, 271 | "inline_policies": [ 272 | { 273 | "RoleName": "AWSServiceRoleForLightsail", 274 | "PolicyName": "LightsailExportAccess", 275 | "PolicyDocument": { 276 | "Version": "2012-10-17", 277 | "Statement": [ 278 | { 279 | "Effect": "Allow", 280 | "Action": [ 281 | "kms:Decrypt", 282 | "kms:DescribeKey", 283 | "kms:CreateGrant" 284 | ], 285 | "Resource": "arn:aws:kms:*:451833091580:key/*" 286 | }, 287 | { 288 | "Effect": "Allow", 289 | "Action": [ 290 | "cloudformation:DescribeStacks" 291 | ], 292 | "Resource": "arn:aws:cloudformation:*:*:stack/*/*" 293 | } 294 | ] 295 | } 296 | } 297 | ], 298 | "attached_policies": [ 299 | { 300 | "PolicyName": "LightsailExportAccess", 301 | "PolicyId": "ANPAJ4LZGPQLZWMVR4WMQ", 302 | "Arn": "arn:aws:iam::aws:policy/aws-service-role/LightsailExportAccess", 303 | "Path": "/aws-service-role/", 304 | "DefaultVersionId": "v2", 305 | "AttachmentCount": 1, 306 | "PermissionsBoundaryUsageCount": 0, 307 | "IsAttachable": true, 308 | "Description": "AWS Lightsail service linked role policy which grants permissions to export resources", 309 | "CreateDate": "2018-09-28 16:35:54+00:00", 310 | "UpdateDate": "2022-01-15 01:45:33+00:00", 311 | "Tags": [], 312 | "PolicyVersion": { 313 | "Document": { 314 | "Version": "2012-10-17", 315 | "Statement": [ 316 | { 317 | "Effect": "Allow", 318 | "Action": [ 319 | "iam:DeleteServiceLinkedRole", 320 | "iam:GetServiceLinkedRoleDeletionStatus" 321 | ], 322 | "Resource": "arn:aws:iam::*:role/aws-service-role/lightsail.amazonaws.com/AWSServiceRoleForLightsail*" 323 | }, 324 | { 325 | "Effect": "Allow", 326 | "Action": [ 327 | "ec2:CopySnapshot", 328 | "ec2:DescribeSnapshots", 329 | "ec2:CopyImage", 330 | "ec2:DescribeImages" 331 | ], 332 | "Resource": "*" 333 | }, 334 | { 335 | "Effect": "Allow", 336 | "Action": [ 337 | "s3:GetAccountPublicAccessBlock" 338 | ], 339 | "Resource": "*" 340 | } 341 | ] 342 | }, 343 | "VersionId": "v2", 344 | "IsDefaultVersion": true, 345 | "CreateDate": "2022-01-15 01:45:33+00:00" 346 | } 347 | } 348 | ] 349 | } 350 | ] 351 | ``` 352 | Add `--nl` to collapse these to single lines as valid newline-delimited JSON. 353 | 354 | Add `--csv` or `--tsv` to get back CSV or TSV data. 355 | 356 | ## delete-user 357 | 358 | In trying out this tool it's possible you will create several different user accounts that you later decide to clean up. 359 | 360 | Deleting AWS users is a little fiddly: you first need to delete their access keys, then their inline policies and finally the user themselves. 361 | 362 | The `s3-credentials delete-user` handles this for you: 363 | 364 | ```bash 365 | s3-credentials delete-user s3.read-write.simonw-test-bucket-10 366 | ``` 367 | ``` 368 | User: s3.read-write.simonw-test-bucket-10 369 | Deleted policy: s3.read-write.simonw-test-bucket-10 370 | Deleted access key: AKIAWXFXAIOZK3GPEIWR 371 | Deleted user 372 | ``` 373 | You can pass it multiple usernames to delete multiple users at a time. 374 | 375 | ## put-object 376 | 377 | You can upload a file to a key in an S3 bucket using `s3-credentials put-object`: 378 | ```bash 379 | s3-credentials put-object my-bucket my-key.txt /path/to/file.txt 380 | ``` 381 | Use `-` as the file name to upload from standard input: 382 | ```bash 383 | echo "Hello" | s3-credentials put-object my-bucket hello.txt - 384 | ``` 385 | This command shows a progress bar by default. Use `-s` or `--silent` to hide the progress bar. 386 | 387 | The `Content-Type` on the uploaded object will be automatically set based on the file extension. If you are using standard input, or you want to over-ride the detected type, you can do so using the `--content-type` option: 388 | ```bash 389 | echo "

Hello World

" | \ 390 | s3-credentials put-object my-bucket hello.html - --content-type "text/html" 391 | ``` 392 | ## put-objects 393 | 394 | `s3-credentials put-objects` can be used to upload more than one file at once. 395 | 396 | Pass one or more filenames to upload them to the root of your bucket: 397 | ```bash 398 | s3-credentials put-objects my-bucket one.txt two.txt three.txt 399 | ``` 400 | Use `--prefix my-prefix` to upload them to the specified prefix: 401 | ```bash 402 | s3-credentials put-objects my-bucket one.txt --prefix my-prefix 403 | ``` 404 | This will upload the file to `my-prefix/one.txt`. 405 | 406 | Pass one or more directories to upload the contents of those directories. 407 | `.` uploads everything in your current directory: 408 | ```bash 409 | s3-credentials put-objects my-bucket . 410 | ``` 411 | Passing directory names will upload the directory and all of its contents: 412 | ```bash 413 | s3-credentials put-objects my-bucket my-directory 414 | ``` 415 | If `my-directory` had files `one.txt` and `two.txt` in it, the result would be: 416 | ``` 417 | my-directory/one.txt 418 | my-directory/two.txt 419 | ``` 420 | A progress bar will be shown by default. Use `-s` or `--silent` to hide it. 421 | 422 | Add `--dry-run` to get a preview of what would be uploaded without uploading anything: 423 | ```bash 424 | s3-credentials put-objects my-bucket . --dry-run 425 | ``` 426 | ``` 427 | out/IMG_1254.jpeg => s3://my-bucket/out/IMG_1254.jpeg 428 | out/alverstone-mead-2.jpg => s3://my-bucket/out/alverstone-mead-2.jpg 429 | out/alverstone-mead-1.jpg => s3://my-bucket/out/alverstone-mead-1.jpg 430 | ``` 431 | 432 | ## delete-objects 433 | 434 | `s3-credentials delete-objects` can be used to delete one or more keys from the bucket. 435 | 436 | Pass one or more keys to delete them: 437 | ```bash 438 | s3-credentials delete-objects my-bucket one.txt two.txt three.txt 439 | ``` 440 | Use `--prefix my-prefix` to delete all keys with the specified prefix: 441 | ```bash 442 | s3-credentials delete-objects my-bucket --prefix my-prefix 443 | ``` 444 | Pass `-d` or `--dry-run` to perform a dry-run of the deletion, which will list the keys that would be deleted without actually deleting them. 445 | ```bash 446 | s3-credentials delete-objects my-bucket --prefix my-prefix --dry-run 447 | ``` 448 | ## get-object 449 | 450 | To download a file from a bucket use `s3-credentials get-object`: 451 | ```bash 452 | s3-credentials get-object my-bucket hello.txt 453 | ``` 454 | This defaults to outputting the downloaded file to the terminal. You can instead direct it to save to a file on disk using the `-o` or `--output` option: 455 | ```bash 456 | s3-credentials get-object my-bucket hello.txt -o /path/to/hello.txt 457 | ``` 458 | ## get-objects 459 | 460 | `s3-credentials get-objects` can be used to download multiple files from a bucket at once. 461 | 462 | Without extra arguments, this downloads everything: 463 | ```bash 464 | s3-credentials get-objects my-bucket 465 | ``` 466 | Files will be written to the current directory by default, preserving their directory structure from the bucket. 467 | 468 | To write to a different directory use `--output` or `-o`: 469 | ```bash 470 | s3-credentials get-objects my-bucket -o /path/to/output 471 | ``` 472 | To download multiple specific files, add them as arguments to the command: 473 | ```bash 474 | s3-credentials get-objects my-bucket one.txt two.txt path/to/three.txt 475 | ``` 476 | You can pass one or more `--pattern` or `-p` options to download files matching a specific pattern: 477 | ```bash 478 | s3-credentials get-objects my-bucket -p "*.txt" -p "static/*.css" 479 | ``` 480 | Here the `*` wildcard will match any sequence of characters, including `/`. `?` will match a single character. 481 | 482 | A progress bar will be shown by default. Use `-s` or `--silent` to hide it. 483 | 484 | ## set-cors-policy and get-cors-policy 485 | 486 | You can set the [CORS policy](https://docs.aws.amazon.com/AmazonS3/latest/userguide/cors.html) for a bucket using the `set-cors-policy` command. S3 CORS policies are set at the bucket level - they cannot be set for individual items. 487 | 488 | First, create the bucket. Make sure to make it `--public`: 489 | ```bash 490 | s3-credentials create my-cors-bucket --public -c 491 | ``` 492 | You can set a default CORS policy - allowing `GET` requests from any origin - like this: 493 | ```bash 494 | s3-credentials set-cors-policy my-cors-bucket 495 | ``` 496 | You can use the `get-cors-policy` command to confirm the policy you have set: 497 | ```bash 498 | s3-credentials get-cors-policy my-cors-bucket 499 | ``` 500 | ```json 501 | [ 502 | { 503 | "ID": "set-by-s3-credentials", 504 | "AllowedMethods": [ 505 | "GET" 506 | ], 507 | "AllowedOrigins": [ 508 | "*" 509 | ] 510 | } 511 | ] 512 | ``` 513 | To customize the CORS policy, use the following options: 514 | 515 | - `-m/--allowed-method` - Allowed method e.g. `GET` 516 | - `-h/--allowed-header` - Allowed header e.g. `Authorization` 517 | - `-o/--allowed-origin` - Allowed origin e.g. `https://www.example.com/` 518 | - `-e/--expose-header` - Header to expose e.g. `ETag` 519 | - `--max-age-seconds` - How long to cache preflight requests 520 | 521 | Each of these can be passed multiple times with the exception of `--max-age-seconds`. 522 | 523 | The following example allows GET and PUT methods from code running on `https://www.example.com/`, allows the incoming `Authorization` header and exposes the `ETag` header. It also sets the client to cache preflight requests for 60 seconds: 524 | ```bash 525 | s3-credentials set-cors-policy my-cors-bucket2 \ 526 | --allowed-method GET \ 527 | --allowed-method PUT \ 528 | --allowed-origin https://www.example.com/ \ 529 | --expose-header ETag \ 530 | --max-age-seconds 60 531 | ``` 532 | ## debug-bucket 533 | 534 | The `debug-bucket` command is useful for diagnosing issues with a bucket: 535 | ```bash 536 | s3-credentials debug-bucket my-bucket 537 | ``` 538 | Example output: 539 | ``` 540 | Bucket ACL: 541 | { 542 | "Owner": { 543 | "DisplayName": "username", 544 | "ID": "cc8ca3a037c6a7c1fa7580076bf7cd1949b3f2f58f01c9df9e53c51f6a249910" 545 | }, 546 | "Grants": [ 547 | { 548 | "Grantee": { 549 | "DisplayName": "username", 550 | "ID": "cc8ca3a037c6a7c1fa7580076bf7cd1949b3f2f58f01c9df9e53c51f6a249910", 551 | "Type": "CanonicalUser" 552 | }, 553 | "Permission": "FULL_CONTROL" 554 | } 555 | ] 556 | } 557 | Bucket policy status: 558 | { 559 | "PolicyStatus": { 560 | "IsPublic": true 561 | } 562 | } 563 | Bucket public access block: 564 | { 565 | "PublicAccessBlockConfiguration": { 566 | "BlockPublicAcls": false, 567 | "IgnorePublicAcls": false, 568 | "BlockPublicPolicy": false, 569 | "RestrictPublicBuckets": false 570 | } 571 | } 572 | ``` 573 | ## get-bucket-policy 574 | 575 | The `get-bucket-policy` command displays the current bucket policy for a bucket: 576 | ```bash 577 | s3-credentials get-bucket-policy my-bucket 578 | ``` 579 | Example output: 580 | 581 | ```json 582 | { 583 | "Version": "2012-10-17", 584 | "Statement": [ 585 | { 586 | "Sid": "AllowAllGetObject", 587 | "Effect": "Allow", 588 | "Principal": "*", 589 | "Action": "s3:GetObject", 590 | "Resource": "arn:aws:s3:::my-bucket/*" 591 | } 592 | ] 593 | } 594 | ``` 595 | 596 | ## set-bucket-policy 597 | 598 | The `set-bucket-policy` command can be used to set a bucket policy for a bucket: 599 | ```bash 600 | s3-credentials set-bucket-policy my-bucket --policy-file policy.json 601 | ``` 602 | Or for the common case of setting a policy to allow GET access to all buckets: 603 | ```bash 604 | s3-credentials set-bucket-policy my-bucket --allow-all-get 605 | ``` 606 | 607 | ## get-public-access-block 608 | 609 | The `get-public-access-block` command displays the current public access block configuration for a bucket: 610 | ```bash 611 | s3-credentials get-public-access-block my-bucket 612 | ``` 613 | Example output: 614 | 615 | ```json 616 | { 617 | "BlockPublicAcls": false, 618 | "IgnorePublicAcls": false, 619 | "BlockPublicPolicy": false, 620 | "RestrictPublicBuckets": false 621 | } 622 | ``` 623 | 624 | ## set-public-access-block 625 | 626 | The `set-public-access-block` command can be used to set the public access block configuration for a bucket: 627 | ```bash 628 | s3-credentials set-public-access-block my-bucket \ 629 | --block-public-acls true \ 630 | --ignore-public-acls true \ 631 | --block-public-policy true \ 632 | --restrict-public-buckets true 633 | ``` 634 | Each of the above options accepts `true` or `false`. 635 | 636 | You can use the `--allow-public-access` shortcut to set everything to `false` in one go: 637 | ```bash 638 | s3-credentials set-public-access-block my-bucket \ 639 | --allow-public-access 640 | ``` 641 | -------------------------------------------------------------------------------- /docs/policy-documents.md: -------------------------------------------------------------------------------- 1 | # Policy documents 2 | 3 | The IAM policies generated by this tool for a bucket called `my-s3-bucket` would look like this: 4 | 5 | ## read-write (default) 6 | 7 | 17 | ``` 18 | { 19 | "Version": "2012-10-17", 20 | "Statement": [ 21 | { 22 | "Effect": "Allow", 23 | "Action": [ 24 | "s3:ListBucket", 25 | "s3:GetBucketLocation" 26 | ], 27 | "Resource": [ 28 | "arn:aws:s3:::my-s3-bucket" 29 | ] 30 | }, 31 | { 32 | "Effect": "Allow", 33 | "Action": [ 34 | "s3:GetObject", 35 | "s3:GetObjectAcl", 36 | "s3:GetObjectLegalHold", 37 | "s3:GetObjectRetention", 38 | "s3:GetObjectTagging" 39 | ], 40 | "Resource": [ 41 | "arn:aws:s3:::my-s3-bucket/*" 42 | ] 43 | }, 44 | { 45 | "Effect": "Allow", 46 | "Action": [ 47 | "s3:PutObject", 48 | "s3:DeleteObject" 49 | ], 50 | "Resource": [ 51 | "arn:aws:s3:::my-s3-bucket/*" 52 | ] 53 | } 54 | ] 55 | } 56 | ``` 57 | 58 | 59 | ## `--read-only` 60 | 61 | 67 | ``` 68 | { 69 | "Version": "2012-10-17", 70 | "Statement": [ 71 | { 72 | "Effect": "Allow", 73 | "Action": [ 74 | "s3:ListBucket", 75 | "s3:GetBucketLocation" 76 | ], 77 | "Resource": [ 78 | "arn:aws:s3:::my-s3-bucket" 79 | ] 80 | }, 81 | { 82 | "Effect": "Allow", 83 | "Action": [ 84 | "s3:GetObject", 85 | "s3:GetObjectAcl", 86 | "s3:GetObjectLegalHold", 87 | "s3:GetObjectRetention", 88 | "s3:GetObjectTagging" 89 | ], 90 | "Resource": [ 91 | "arn:aws:s3:::my-s3-bucket/*" 92 | ] 93 | } 94 | ] 95 | } 96 | ``` 97 | 98 | 99 | ## `--write-only` 100 | 101 | 107 | ``` 108 | { 109 | "Version": "2012-10-17", 110 | "Statement": [ 111 | { 112 | "Effect": "Allow", 113 | "Action": [ 114 | "s3:PutObject" 115 | ], 116 | "Resource": [ 117 | "arn:aws:s3:::my-s3-bucket/*" 118 | ] 119 | } 120 | ] 121 | } 122 | ``` 123 | 124 | 125 | ## `--prefix my-prefix/` 126 | 127 | 133 | ``` 134 | { 135 | "Version": "2012-10-17", 136 | "Statement": [ 137 | { 138 | "Effect": "Allow", 139 | "Action": [ 140 | "s3:GetBucketLocation" 141 | ], 142 | "Resource": [ 143 | "arn:aws:s3:::my-s3-bucket" 144 | ] 145 | }, 146 | { 147 | "Effect": "Allow", 148 | "Action": [ 149 | "s3:ListBucket" 150 | ], 151 | "Resource": [ 152 | "arn:aws:s3:::my-s3-bucket" 153 | ], 154 | "Condition": { 155 | "StringLike": { 156 | "s3:prefix": [ 157 | "my-prefix/*" 158 | ] 159 | } 160 | } 161 | }, 162 | { 163 | "Effect": "Allow", 164 | "Action": [ 165 | "s3:GetObject", 166 | "s3:GetObjectAcl", 167 | "s3:GetObjectLegalHold", 168 | "s3:GetObjectRetention", 169 | "s3:GetObjectTagging" 170 | ], 171 | "Resource": [ 172 | "arn:aws:s3:::my-s3-bucket/my-prefix/*" 173 | ] 174 | }, 175 | { 176 | "Effect": "Allow", 177 | "Action": [ 178 | "s3:PutObject", 179 | "s3:DeleteObject" 180 | ], 181 | "Resource": [ 182 | "arn:aws:s3:::my-s3-bucket/my-prefix/*" 183 | ] 184 | } 185 | ] 186 | } 187 | ``` 188 | 189 | 190 | ## `--prefix my-prefix/ --read-only` 191 | 192 | 198 | ``` 199 | { 200 | "Version": "2012-10-17", 201 | "Statement": [ 202 | { 203 | "Effect": "Allow", 204 | "Action": [ 205 | "s3:GetBucketLocation" 206 | ], 207 | "Resource": [ 208 | "arn:aws:s3:::my-s3-bucket" 209 | ] 210 | }, 211 | { 212 | "Effect": "Allow", 213 | "Action": [ 214 | "s3:ListBucket" 215 | ], 216 | "Resource": [ 217 | "arn:aws:s3:::my-s3-bucket" 218 | ], 219 | "Condition": { 220 | "StringLike": { 221 | "s3:prefix": [ 222 | "my-prefix/*" 223 | ] 224 | } 225 | } 226 | }, 227 | { 228 | "Effect": "Allow", 229 | "Action": [ 230 | "s3:GetObject", 231 | "s3:GetObjectAcl", 232 | "s3:GetObjectLegalHold", 233 | "s3:GetObjectRetention", 234 | "s3:GetObjectTagging" 235 | ], 236 | "Resource": [ 237 | "arn:aws:s3:::my-s3-bucket/my-prefix/*" 238 | ] 239 | } 240 | ] 241 | } 242 | ``` 243 | 244 | 245 | ## `--prefix my-prefix/ --write-only` 246 | 247 | 253 | ``` 254 | { 255 | "Version": "2012-10-17", 256 | "Statement": [ 257 | { 258 | "Effect": "Allow", 259 | "Action": [ 260 | "s3:PutObject" 261 | ], 262 | "Resource": [ 263 | "arn:aws:s3:::my-s3-bucket/my-prefix/*" 264 | ] 265 | } 266 | ] 267 | } 268 | ``` 269 | 270 | 271 | (public_bucket_policy)= 272 | 273 | ## public bucket policy 274 | 275 | Buckets created using the `--public` option will have the following bucket policy attached to them: 276 | 277 | 283 | ``` 284 | { 285 | "Version": "2012-10-17", 286 | "Statement": [ 287 | { 288 | "Sid": "AllowAllGetObject", 289 | "Effect": "Allow", 290 | "Principal": "*", 291 | "Action": [ 292 | "s3:GetObject" 293 | ], 294 | "Resource": [ 295 | "arn:aws:s3:::my-s3-bucket/*" 296 | ] 297 | } 298 | ] 299 | } 300 | ``` 301 | -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | furo 2 | sphinx-autobuild 3 | myst-parser 4 | cogapp 5 | -------------------------------------------------------------------------------- /s3_credentials/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/simonw/s3-credentials/eae5593cbe6f6be9ccd30398f1d2be9c7cb93561/s3_credentials/__init__.py -------------------------------------------------------------------------------- /s3_credentials/policies.py: -------------------------------------------------------------------------------- 1 | def read_write(bucket, prefix="*", extra_statements=None): 2 | statements = read_write_statements(bucket, prefix=prefix) 3 | if extra_statements: 4 | statements.extend(extra_statements) 5 | return wrap_policy(statements) 6 | 7 | 8 | def read_write_statements(bucket, prefix="*"): 9 | # https://github.com/simonw/s3-credentials/issues/24 10 | if not prefix.endswith("*"): 11 | prefix += "*" 12 | return read_only_statements(bucket, prefix) + [ 13 | { 14 | "Effect": "Allow", 15 | "Action": ["s3:PutObject", "s3:DeleteObject"], 16 | "Resource": ["arn:aws:s3:::{}/{}".format(bucket, prefix)], 17 | } 18 | ] 19 | 20 | 21 | def read_only(bucket, prefix="*", extra_statements=None): 22 | statements = read_only_statements(bucket, prefix=prefix) 23 | if extra_statements: 24 | statements.extend(extra_statements) 25 | return wrap_policy(statements) 26 | 27 | 28 | def read_only_statements(bucket, prefix="*"): 29 | # https://github.com/simonw/s3-credentials/issues/23 30 | statements = [] 31 | if not prefix.endswith("*"): 32 | prefix += "*" 33 | if prefix != "*": 34 | statements.append( 35 | { 36 | "Effect": "Allow", 37 | "Action": ["s3:GetBucketLocation"], 38 | "Resource": ["arn:aws:s3:::{}".format(bucket)], 39 | } 40 | ) 41 | statements.append( 42 | { 43 | "Effect": "Allow", 44 | "Action": ["s3:ListBucket"], 45 | "Resource": ["arn:aws:s3:::{}".format(bucket)], 46 | "Condition": { 47 | "StringLike": { 48 | # Note that prefix must end in / if user wants to limit to a folder 49 | "s3:prefix": [prefix] 50 | } 51 | }, 52 | } 53 | ) 54 | else: 55 | # We can combine s3:GetBucketLocation and s3:ListBucket into one 56 | statements.append( 57 | { 58 | "Effect": "Allow", 59 | "Action": ["s3:ListBucket", "s3:GetBucketLocation"], 60 | "Resource": ["arn:aws:s3:::{}".format(bucket)], 61 | } 62 | ) 63 | 64 | return statements + [ 65 | { 66 | "Effect": "Allow", 67 | "Action": [ 68 | "s3:GetObject", 69 | "s3:GetObjectAcl", 70 | "s3:GetObjectLegalHold", 71 | "s3:GetObjectRetention", 72 | "s3:GetObjectTagging", 73 | ], 74 | "Resource": ["arn:aws:s3:::{}/{}".format(bucket, prefix)], 75 | }, 76 | ] 77 | 78 | 79 | def write_only(bucket, prefix="*", extra_statements=None): 80 | statements = write_only_statements(bucket, prefix=prefix) 81 | if extra_statements: 82 | statements.extend(extra_statements) 83 | return wrap_policy(statements) 84 | 85 | 86 | def write_only_statements(bucket, prefix="*"): 87 | # https://github.com/simonw/s3-credentials/issues/25 88 | if not prefix.endswith("*"): 89 | prefix += "*" 90 | return [ 91 | { 92 | "Effect": "Allow", 93 | "Action": ["s3:PutObject"], 94 | "Resource": ["arn:aws:s3:::{}/{}".format(bucket, prefix)], 95 | } 96 | ] 97 | 98 | 99 | def wrap_policy(statements): 100 | return {"Version": "2012-10-17", "Statement": statements} 101 | 102 | 103 | def bucket_policy_allow_all_get(bucket): 104 | return { 105 | "Version": "2012-10-17", 106 | "Statement": [ 107 | { 108 | "Sid": "AllowAllGetObject", 109 | "Effect": "Allow", 110 | "Principal": "*", 111 | "Action": ["s3:GetObject"], 112 | "Resource": ["arn:aws:s3:::{}/*".format(bucket)], 113 | } 114 | ], 115 | } 116 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | import os 3 | 4 | VERSION = "0.16.1" 5 | 6 | 7 | def get_long_description(): 8 | with open( 9 | os.path.join(os.path.dirname(os.path.abspath(__file__)), "README.md"), 10 | encoding="utf8", 11 | ) as fp: 12 | return fp.read() 13 | 14 | 15 | setup( 16 | name="s3-credentials", 17 | description="A tool for creating credentials for accessing S3 buckets", 18 | long_description=get_long_description(), 19 | long_description_content_type="text/markdown", 20 | author="Simon Willison", 21 | url="https://github.com/simonw/s3-credentials", 22 | project_urls={ 23 | "Issues": "https://github.com/simonw/s3-credentials/issues", 24 | "CI": "https://github.com/simonw/s3-credentials/actions", 25 | "Changelog": "https://github.com/simonw/s3-credentials/releases", 26 | }, 27 | license="Apache License, Version 2.0", 28 | version=VERSION, 29 | packages=["s3_credentials"], 30 | entry_points=""" 31 | [console_scripts] 32 | s3-credentials=s3_credentials.cli:cli 33 | """, 34 | install_requires=["click", "boto3"], 35 | extras_require={"test": ["pytest", "pytest-mock", "cogapp", "moto>=5.0.4"]}, 36 | python_requires=">=3.7", 37 | ) 38 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | import logging 3 | import os 4 | import pytest 5 | from moto import mock_aws 6 | 7 | 8 | def pytest_addoption(parser): 9 | parser.addoption( 10 | "--integration", 11 | action="store_true", 12 | default=False, 13 | help="run integration tests", 14 | ) 15 | parser.addoption( 16 | "--boto-logging", 17 | action="store_true", 18 | default=False, 19 | help="turn on boto3 logging", 20 | ) 21 | 22 | 23 | def pytest_configure(config): 24 | config.addinivalue_line( 25 | "markers", 26 | "integration: mark test as integration test, only run with --integration", 27 | ) 28 | 29 | 30 | def pytest_collection_modifyitems(config, items): 31 | if config.getoption("--boto-logging"): 32 | boto3.set_stream_logger("botocore.endpoint", logging.DEBUG) 33 | if config.getoption("--integration"): 34 | # Also run integration tests 35 | return 36 | skip_slow = pytest.mark.skip(reason="use --integration option to run") 37 | for item in items: 38 | if "integration" in item.keywords: 39 | item.add_marker(skip_slow) 40 | 41 | 42 | @pytest.fixture(scope="function") 43 | def aws_credentials(): 44 | """Mocked AWS Credentials for moto.""" 45 | os.environ["AWS_ACCESS_KEY_ID"] = "testing" 46 | os.environ["AWS_SECRET_ACCESS_KEY"] = "testing" 47 | os.environ["AWS_SECURITY_TOKEN"] = "testing" 48 | os.environ["AWS_SESSION_TOKEN"] = "testing" 49 | os.environ["AWS_DEFAULT_REGION"] = "us-east-1" 50 | 51 | 52 | @pytest.fixture(scope="function") 53 | def moto_s3(aws_credentials): 54 | with mock_aws(): 55 | client = boto3.client("s3", region_name="us-east-1") 56 | client.create_bucket(Bucket="my-bucket") 57 | yield client 58 | 59 | 60 | @pytest.fixture(scope="function") 61 | def moto_s3_populated(moto_s3): 62 | for key in ("one.txt", "directory/two.txt", "directory/three.json"): 63 | moto_s3.put_object(Bucket="my-bucket", Key=key, Body=key.encode("utf-8")) 64 | yield moto_s3 65 | -------------------------------------------------------------------------------- /tests/test_dry_run.py: -------------------------------------------------------------------------------- 1 | from click.testing import CliRunner 2 | from s3_credentials.cli import cli 3 | import pytest 4 | import re 5 | import textwrap 6 | 7 | 8 | def assert_match_with_wildcards(pattern, input): 9 | # Pattern language is simple: '*' becomes '.*?' 10 | bits = pattern.split("*") 11 | regex = "^{}$".format(".*?".join(re.escape(bit) for bit in bits)) 12 | print(regex) 13 | match = re.compile(regex.strip(), re.DOTALL).match(input.strip()) 14 | if match is None: 15 | # Build a useful message 16 | message = "Pattern:\n{}\n\nDoes not match input:\n\n{}".format(pattern, input) 17 | bad_bits = [bit for bit in bits if bit not in input] 18 | if bad_bits: 19 | message += "\nThese parts were not found in the input:\n\n" 20 | for bit in bad_bits: 21 | message += textwrap.indent("{}\n\n".format(bit), " ") 22 | assert False, message 23 | 24 | 25 | @pytest.mark.parametrize( 26 | "options,expected", 27 | ( 28 | ( 29 | [], 30 | ( 31 | """Would create bucket: 'my-bucket' 32 | Would create user: 's3.read-write.my-bucket' with permissions boundary: 'arn:aws:iam::aws:policy/AmazonS3FullAccess' 33 | Would attach policy called 's3.read-write.my-bucket' to user 's3.read-write.my-bucket', details:* 34 | Would call create access key for user 's3.read-write.my-bucket'""" 35 | ), 36 | ), 37 | ( 38 | ["--username", "frank"], 39 | ( 40 | """Would create bucket: 'my-bucket' 41 | Would create user: 'frank' with permissions boundary: 'arn:aws:iam::aws:policy/AmazonS3FullAccess' 42 | Would attach policy called 's3.read-write.my-bucket' to user 'frank', details:* 43 | Would call create access key for user 'frank'""" 44 | ), 45 | ), 46 | ( 47 | ["--duration", "20m"], 48 | ( 49 | """Would create bucket: 'my-bucket' 50 | Would ensure role: 's3-credentials.AmazonS3FullAccess' 51 | Would assume role using following policy for 1200 seconds:*""" 52 | ), 53 | ), 54 | ( 55 | ["--public"], 56 | ( 57 | """Would create bucket: 'my-bucket' 58 | ... then add this public access block configuration: 59 | {"BlockPublicAcls": false, "IgnorePublicAcls": false, "BlockPublicPolicy": false, "RestrictPublicBuckets": false} 60 | ... then attach the following bucket policy to it:* 61 | Would create user: 's3.read-write.my-bucket' with permissions boundary: 'arn:aws:iam::aws:policy/AmazonS3FullAccess' 62 | Would attach policy called 's3.read-write.my-bucket' to user 's3.read-write.my-bucket', details:* 63 | Would call create access key for user 's3.read-write.my-bucket'""" 64 | ), 65 | ), 66 | ( 67 | [ 68 | "--statement", 69 | '{"Effect": "Allow", "Action": "textract:*", "Resource": "*"}', 70 | ], 71 | ( 72 | """Would create bucket: 'my-bucket' 73 | Would create user: 's3.custom.my-bucket' 74 | *"Action": "textract:*""" 75 | ), 76 | ), 77 | ), 78 | ) 79 | def test_dry_run(options, expected): 80 | runner = CliRunner() 81 | result = runner.invoke(cli, ["create", "my-bucket", "--dry-run"] + options) 82 | assert result.exit_code == 0, result.output 83 | assert_match_with_wildcards(expected, result.output) 84 | -------------------------------------------------------------------------------- /tests/test_integration.py: -------------------------------------------------------------------------------- 1 | # These integration tests only run with "pytest --integration" - 2 | # they execute live calls against AWS using environment variables 3 | # and clean up after themselves 4 | from click.testing import CliRunner 5 | from s3_credentials.cli import bucket_exists, cli 6 | import botocore 7 | import boto3 8 | import datetime 9 | import json 10 | import pytest 11 | import secrets 12 | import time 13 | import urllib 14 | 15 | # Mark all tests in this module with "integration": 16 | pytestmark = pytest.mark.integration 17 | 18 | 19 | @pytest.fixture(autouse=True) 20 | def cleanup(): 21 | cleanup_any_resources() 22 | yield 23 | cleanup_any_resources() 24 | 25 | 26 | def test_create_bucket_with_read_write(tmpdir): 27 | bucket_name = "s3-credentials-tests.read-write.{}".format(secrets.token_hex(4)) 28 | # Bucket should not exist 29 | s3 = boto3.client("s3") 30 | assert not bucket_exists(s3, bucket_name) 31 | credentials = get_output("create", bucket_name, "-c") 32 | credentials_decoded = json.loads(credentials) 33 | credentials_s3 = boto3.session.Session( 34 | aws_access_key_id=credentials_decoded["AccessKeyId"], 35 | aws_secret_access_key=credentials_decoded["SecretAccessKey"], 36 | ).client("s3") 37 | # Bucket should exist - found I needed to sleep(10) before put-object would work 38 | time.sleep(10) 39 | assert bucket_exists(s3, bucket_name) 40 | # Use the credentials to write a file to that bucket 41 | test_write = tmpdir / "test-write.txt" 42 | test_write.write_text("hello", "utf-8") 43 | get_output("put-object", bucket_name, "test-write.txt", str(test_write)) 44 | credentials_s3.put_object( 45 | Body="hello".encode("utf-8"), Bucket=bucket_name, Key="test-write.txt" 46 | ) 47 | # Use default s3 client to check that the write succeeded 48 | get_object_response = s3.get_object(Bucket=bucket_name, Key="test-write.txt") 49 | assert get_object_response["Body"].read() == b"hello" 50 | # Check we can read the file using the credentials too 51 | output = get_output("get-object", bucket_name, "test-write.txt") 52 | assert output == "hello" 53 | 54 | 55 | def test_create_bucket_read_only_duration_15(): 56 | bucket_name = "s3-credentials-tests.read-only.{}".format(secrets.token_hex(4)) 57 | s3 = boto3.client("s3") 58 | assert not bucket_exists(s3, bucket_name) 59 | credentials_decoded = json.loads( 60 | get_output("create", bucket_name, "-c", "--duration", "15m", "--read-only") 61 | ) 62 | assert set(credentials_decoded.keys()) == { 63 | "AccessKeyId", 64 | "SecretAccessKey", 65 | "SessionToken", 66 | "Expiration", 67 | } 68 | # Expiration should be ~15 minutes in the future 69 | delta = ( 70 | datetime.datetime.fromisoformat(credentials_decoded["Expiration"]) 71 | - datetime.datetime.now(datetime.timezone.utc) 72 | ).total_seconds() 73 | # Should be around about 900 seconds 74 | assert 800 < delta < 1000 75 | # Wait for everything to exist 76 | time.sleep(10) 77 | # Create client with these credentials 78 | credentials_s3 = boto3.session.Session( 79 | aws_access_key_id=credentials_decoded["AccessKeyId"], 80 | aws_secret_access_key=credentials_decoded["SecretAccessKey"], 81 | aws_session_token=credentials_decoded["SessionToken"], 82 | ).client("s3") 83 | # Client should NOT be allowed to write objects 84 | with pytest.raises(botocore.exceptions.ClientError): 85 | credentials_s3.put_object( 86 | Body="hello".encode("utf-8"), Bucket=bucket_name, Key="hello.txt" 87 | ) 88 | # Write an object using root credentials 89 | s3.put_object( 90 | Body="hello read-only".encode("utf-8"), 91 | Bucket=bucket_name, 92 | Key="hello-read-only.txt", 93 | ) 94 | # Client should be able to read this 95 | assert ( 96 | read_file(credentials_s3, bucket_name, "hello-read-only.txt") 97 | == "hello read-only" 98 | ) 99 | 100 | 101 | def test_read_write_bucket_prefix_temporary_credentials(): 102 | bucket_name = "s3-credentials-tests.read-write-prefix.{}".format( 103 | secrets.token_hex(4) 104 | ) 105 | s3 = boto3.client("s3") 106 | assert not bucket_exists(s3, bucket_name) 107 | credentials_decoded = json.loads( 108 | get_output( 109 | "create", bucket_name, "-c", "--duration", "15m", "--prefix", "my/prefix/" 110 | ) 111 | ) 112 | # Wait for everything to exist 113 | time.sleep(10) 114 | # Create client with these credentials 115 | credentials_s3 = boto3.session.Session( 116 | aws_access_key_id=credentials_decoded["AccessKeyId"], 117 | aws_secret_access_key=credentials_decoded["SecretAccessKey"], 118 | aws_session_token=credentials_decoded["SessionToken"], 119 | ).client("s3") 120 | # Write file with root credentials that I should not be able to see 121 | s3.put_object( 122 | Body="hello".encode("utf-8"), 123 | Bucket=bucket_name, 124 | Key="should-not-be-visible.txt", 125 | ) 126 | # I should be able to write to and read from /my/prefix/file.txt 127 | credentials_s3.put_object( 128 | Body="hello".encode("utf-8"), 129 | Bucket=bucket_name, 130 | Key="my/prefix/file.txt", 131 | ) 132 | assert read_file(credentials_s3, bucket_name, "my/prefix/file.txt") == "hello" 133 | # Should NOT be able to read should-not-be-visible.txt 134 | with pytest.raises(botocore.exceptions.ClientError): 135 | read_file(credentials_s3, bucket_name, "should-not-be-visible.txt") 136 | 137 | 138 | def test_read_write_bucket_prefix_permanent_credentials(): 139 | bucket_name = "s3-credentials-tests.rw-prefix-perm.{}".format(secrets.token_hex(4)) 140 | s3 = boto3.client("s3") 141 | assert not bucket_exists(s3, bucket_name) 142 | credentials_decoded = json.loads( 143 | get_output("create", bucket_name, "-c", "--prefix", "my/prefix-2/") 144 | ) 145 | # Wait for everything to exist 146 | time.sleep(10) 147 | # Create client with these credentials 148 | credentials_s3 = boto3.session.Session( 149 | aws_access_key_id=credentials_decoded["AccessKeyId"], 150 | aws_secret_access_key=credentials_decoded["SecretAccessKey"], 151 | ).client("s3") 152 | # Write file with root credentials that I should not be able to see 153 | s3.put_object( 154 | Body="hello".encode("utf-8"), 155 | Bucket=bucket_name, 156 | Key="should-not-be-visible.txt", 157 | ) 158 | # I should be able to write to and read from /my/prefix/file.txt 159 | credentials_s3.put_object( 160 | Body="hello".encode("utf-8"), 161 | Bucket=bucket_name, 162 | Key="my/prefix-2/file.txt", 163 | ) 164 | assert read_file(credentials_s3, bucket_name, "my/prefix-2/file.txt") == "hello" 165 | # Should NOT be able to read should-not-be-visible.txt 166 | with pytest.raises(botocore.exceptions.ClientError): 167 | read_file(credentials_s3, bucket_name, "should-not-be-visible.txt") 168 | 169 | 170 | def test_list_bucket_including_with_prefix(): 171 | bucket_name = "s3-credentials-tests.lbucket.{}".format(secrets.token_hex(4)) 172 | s3 = boto3.client("s3") 173 | assert not bucket_exists(s3, bucket_name) 174 | credentials_decoded = json.loads(get_output("create", bucket_name, "-c")) 175 | time.sleep(10) 176 | credentials_s3 = boto3.session.Session( 177 | aws_access_key_id=credentials_decoded["AccessKeyId"], 178 | aws_secret_access_key=credentials_decoded["SecretAccessKey"], 179 | ).client("s3") 180 | credentials_s3.put_object( 181 | Body="one".encode("utf-8"), 182 | Bucket=bucket_name, 183 | Key="one/file.txt", 184 | ) 185 | credentials_s3.put_object( 186 | Body="two".encode("utf-8"), 187 | Bucket=bucket_name, 188 | Key="two/file.txt", 189 | ) 190 | # Try list-bucket against everything 191 | everything = json.loads( 192 | get_output( 193 | "list-bucket", 194 | bucket_name, 195 | "--access-key", 196 | credentials_decoded["AccessKeyId"], 197 | "--secret-key", 198 | credentials_decoded["SecretAccessKey"], 199 | ) 200 | ) 201 | assert [e["Key"] for e in everything] == ["one/file.txt", "two/file.txt"] 202 | # Now use --prefix 203 | prefix_output = json.loads( 204 | get_output( 205 | "list-bucket", 206 | bucket_name, 207 | "--prefix", 208 | "one/", 209 | "--access-key", 210 | credentials_decoded["AccessKeyId"], 211 | "--secret-key", 212 | credentials_decoded["SecretAccessKey"], 213 | ) 214 | ) 215 | assert len(prefix_output) == 1 216 | assert prefix_output[0]["Key"] == "one/file.txt" 217 | 218 | 219 | def test_prefix_read_only(): 220 | bucket_name = "s3-credentials-tests.pre-ro.{}".format(secrets.token_hex(4)) 221 | s3 = boto3.client("s3") 222 | assert not bucket_exists(s3, bucket_name) 223 | credentials_decoded = json.loads( 224 | get_output("create", bucket_name, "-c", "--read-only", "--prefix", "prefix/") 225 | ) 226 | time.sleep(10) 227 | credentials_s3 = boto3.session.Session( 228 | aws_access_key_id=credentials_decoded["AccessKeyId"], 229 | aws_secret_access_key=credentials_decoded["SecretAccessKey"], 230 | ).client("s3") 231 | # Should not be able to write objects 232 | with pytest.raises(botocore.exceptions.ClientError): 233 | credentials_s3.put_object( 234 | Body="allowed".encode("utf-8"), 235 | Bucket=bucket_name, 236 | Key="prefix/allowed.txt", 237 | ) 238 | # So we use root permissions to write these: 239 | s3 = boto3.client("s3") 240 | s3.put_object( 241 | Body="denied".encode("utf-8"), 242 | Bucket=bucket_name, 243 | Key="denied.txt", 244 | ) 245 | s3.put_object( 246 | Body="allowed".encode("utf-8"), 247 | Bucket=bucket_name, 248 | Key="prefix/allowed.txt", 249 | ) 250 | # list-bucket against everything should error 251 | with pytest.raises(GetOutputError): 252 | get_output( 253 | "list-bucket", 254 | bucket_name, 255 | "--access-key", 256 | credentials_decoded["AccessKeyId"], 257 | "--secret-key", 258 | credentials_decoded["SecretAccessKey"], 259 | ) 260 | 261 | # list-bucket against --prefix prefix/ should work 262 | items = json.loads( 263 | get_output( 264 | "list-bucket", 265 | bucket_name, 266 | "--prefix", 267 | "prefix/", 268 | "--access-key", 269 | credentials_decoded["AccessKeyId"], 270 | "--secret-key", 271 | credentials_decoded["SecretAccessKey"], 272 | ) 273 | ) 274 | assert [e["Key"] for e in items] == ["prefix/allowed.txt"] 275 | # Should NOT be able to read "denied.txt" 276 | with pytest.raises(botocore.exceptions.ClientError): 277 | read_file(credentials_s3, bucket_name, "denied.txt") 278 | # Should be able to read prefix/allowed.txt 279 | assert read_file(credentials_s3, bucket_name, "prefix/allowed.txt") == "allowed" 280 | 281 | 282 | def test_prefix_write_only(): 283 | bucket_name = "s3-credentials-tests.pre-wo.{}".format(secrets.token_hex(4)) 284 | s3 = boto3.client("s3") 285 | assert not bucket_exists(s3, bucket_name) 286 | credentials_decoded = json.loads( 287 | get_output("create", bucket_name, "-c", "--write-only", "--prefix", "prefix/") 288 | ) 289 | time.sleep(10) 290 | credentials_s3 = boto3.session.Session( 291 | aws_access_key_id=credentials_decoded["AccessKeyId"], 292 | aws_secret_access_key=credentials_decoded["SecretAccessKey"], 293 | ).client("s3") 294 | # Should not be able to write objects to root 295 | with pytest.raises(botocore.exceptions.ClientError): 296 | credentials_s3.put_object( 297 | Body="denied".encode("utf-8"), 298 | Bucket=bucket_name, 299 | Key="denied.txt", 300 | ) 301 | # Should be able to write them to prefix/ 302 | credentials_s3.put_object( 303 | Body="allowed".encode("utf-8"), 304 | Bucket=bucket_name, 305 | Key="prefix/allowed2.txt", 306 | ) 307 | # Use root permissions to verfy the write 308 | s3 = boto3.client("s3") 309 | assert read_file(s3, bucket_name, "prefix/allowed2.txt") == "allowed" 310 | # Should not be able to run list-bucket, even against the prefix 311 | for options in ([], ["--prefix", "prefix/"]): 312 | with pytest.raises(GetOutputError): 313 | args = [ 314 | "list-bucket", 315 | bucket_name, 316 | "--access-key", 317 | credentials_decoded["AccessKeyId"], 318 | "--secret-key", 319 | credentials_decoded["SecretAccessKey"], 320 | ] + options 321 | get_output(*args) 322 | # Should not be able to get-object 323 | for key in ("denied.txt", "prefix/allowed2.txt"): 324 | with pytest.raises(botocore.exceptions.ClientError): 325 | read_file(credentials_s3, bucket_name, key) 326 | 327 | 328 | class GetOutputError(Exception): 329 | pass 330 | 331 | 332 | def get_output(*args, input=None): 333 | runner = CliRunner(mix_stderr=False) 334 | with runner.isolated_filesystem(): 335 | result = runner.invoke(cli, args, catch_exceptions=False, input=input) 336 | if result.exit_code != 0: 337 | raise GetOutputError(result.stderr) 338 | return result.stdout 339 | 340 | 341 | def read_file(s3, bucket, path): 342 | response = s3.get_object(Bucket=bucket, Key=path) 343 | return response["Body"].read().decode("utf-8") 344 | 345 | 346 | def cleanup_any_resources(): 347 | # Delete any users beginning s3-credentials-tests. 348 | users = json.loads(get_output("list-users")) 349 | users_to_delete = [ 350 | user["UserName"] 351 | for user in users 352 | if ".s3-credentials-tests." in user["UserName"] 353 | ] 354 | if users_to_delete: 355 | print("Deleting users: ", users_to_delete) 356 | get_output("delete-user", *users_to_delete) 357 | s3 = boto3.client("s3") 358 | # Delete any buckets beginning s3-credentials-tests. 359 | buckets = json.loads(get_output("list-buckets")) 360 | buckets_to_delete = [ 361 | bucket["Name"] 362 | for bucket in buckets 363 | if bucket["Name"].startswith("s3-credentials-tests.") 364 | ] 365 | for bucket in buckets_to_delete: 366 | print("Deleting bucket: {}".format(bucket)) 367 | # Delete all objects in the bucket 368 | boto3.resource("s3").Bucket(bucket).objects.all().delete() 369 | # Delete the bucket 370 | s3.delete_bucket(Bucket=bucket) 371 | 372 | 373 | def test_public_bucket(): 374 | bucket_name = "s3-credentials-tests.public-bucket.{}".format(secrets.token_hex(4)) 375 | s3 = boto3.client("s3") 376 | assert not bucket_exists(s3, bucket_name) 377 | credentials_decoded = json.loads( 378 | get_output("create", bucket_name, "-c", "--duration", "15m", "--public") 379 | ) 380 | assert set(credentials_decoded.keys()) == { 381 | "AccessKeyId", 382 | "SecretAccessKey", 383 | "SessionToken", 384 | "Expiration", 385 | } 386 | # Wait for everything to exist 387 | time.sleep(5) 388 | # Use those credentials to upload a file 389 | content = "

Hello world

" 390 | get_output( 391 | "put-object", 392 | bucket_name, 393 | "hello.html", 394 | "-", 395 | "--content-type", 396 | "text/html", 397 | "--access-key", 398 | credentials_decoded["AccessKeyId"], 399 | "--secret-key", 400 | credentials_decoded["SecretAccessKey"], 401 | "--session-token", 402 | credentials_decoded["SessionToken"], 403 | input=content, 404 | ) 405 | # It should be publicly accessible 406 | url = "https://s3.amazonaws.com/{}/hello.html".format(bucket_name) 407 | print(url) 408 | response = urllib.request.urlopen(url) 409 | actual_content = response.read().decode("utf-8") 410 | assert response.status == 200 411 | assert response.headers["content-type"] == "text/html" 412 | assert actual_content == content 413 | -------------------------------------------------------------------------------- /tests/test_s3_credentials.py: -------------------------------------------------------------------------------- 1 | import botocore 2 | from click.testing import CliRunner 3 | import s3_credentials 4 | from s3_credentials.cli import cli 5 | import json 6 | import os 7 | import pathlib 8 | import pytest 9 | from unittest.mock import call, Mock 10 | from botocore.stub import Stubber 11 | 12 | 13 | @pytest.fixture 14 | def stub_iam(mocker): 15 | client = botocore.session.get_session().create_client("iam") 16 | stubber = Stubber(client) 17 | stubber.activate() 18 | mocker.patch("s3_credentials.cli.make_client", return_value=client) 19 | return stubber 20 | 21 | 22 | @pytest.fixture 23 | def stub_s3(mocker): 24 | client = botocore.session.get_session().create_client("s3") 25 | stubber = Stubber(client) 26 | stubber.activate() 27 | mocker.patch("s3_credentials.cli.make_client", return_value=client) 28 | return stubber 29 | 30 | 31 | @pytest.fixture 32 | def stub_sts(mocker): 33 | client = botocore.session.get_session().create_client("sts") 34 | stubber = Stubber(client) 35 | stubber.activate() 36 | mocker.patch("s3_credentials.cli.make_client", return_value=client) 37 | return stubber 38 | 39 | 40 | def test_whoami(mocker, stub_sts): 41 | stub_sts.add_response( 42 | "get_caller_identity", 43 | { 44 | "UserId": "AEONAUTHOUNTOHU", 45 | "Account": "123456", 46 | "Arn": "arn:aws:iam::123456:user/user-name", 47 | "ResponseMetadata": {}, 48 | }, 49 | ) 50 | 51 | runner = CliRunner() 52 | with runner.isolated_filesystem(): 53 | result = runner.invoke(cli, ["whoami"]) 54 | assert result.exit_code == 0 55 | assert json.loads(result.output) == { 56 | "UserId": "AEONAUTHOUNTOHU", 57 | "Account": "123456", 58 | "Arn": "arn:aws:iam::123456:user/user-name", 59 | } 60 | 61 | 62 | @pytest.mark.parametrize( 63 | "option,expected", 64 | ( 65 | ( 66 | "", 67 | "[\n" 68 | " {\n" 69 | ' "Path": "/",\n' 70 | ' "UserName": "NameA",\n' 71 | ' "UserId": "AID000000000000000001",\n' 72 | ' "Arn": "arn:aws:iam::000000000000:user/NameB",\n' 73 | ' "CreateDate": "2020-01-01 00:00:00+00:00"\n' 74 | " },\n" 75 | " {\n" 76 | ' "Path": "/",\n' 77 | ' "UserName": "NameA",\n' 78 | ' "UserId": "AID000000000000000000",\n' 79 | ' "Arn": "arn:aws:iam::000000000000:user/NameB",\n' 80 | ' "CreateDate": "2020-01-01 00:00:00+00:00"\n' 81 | " }\n" 82 | "]\n", 83 | ), 84 | ( 85 | "--nl", 86 | '{"Path": "/", "UserName": "NameA", "UserId": "AID000000000000000001", "Arn": "arn:aws:iam::000000000000:user/NameB", "CreateDate": "2020-01-01 00:00:00+00:00"}\n' 87 | '{"Path": "/", "UserName": "NameA", "UserId": "AID000000000000000000", "Arn": "arn:aws:iam::000000000000:user/NameB", "CreateDate": "2020-01-01 00:00:00+00:00"}\n', 88 | ), 89 | ( 90 | "--csv", 91 | ( 92 | "UserName,UserId,Arn,Path,CreateDate,PasswordLastUsed,PermissionsBoundary,Tags\n" 93 | "NameA,AID000000000000000001,arn:aws:iam::000000000000:user/NameB,/,2020-01-01 00:00:00+00:00,,,\n" 94 | "NameA,AID000000000000000000,arn:aws:iam::000000000000:user/NameB,/,2020-01-01 00:00:00+00:00,,,\n" 95 | ), 96 | ), 97 | ( 98 | "--tsv", 99 | ( 100 | "UserName\tUserId\tArn\tPath\tCreateDate\tPasswordLastUsed\tPermissionsBoundary\tTags\n" 101 | "NameA\tAID000000000000000001\tarn:aws:iam::000000000000:user/NameB\t/\t2020-01-01 00:00:00+00:00\t\t\t\n" 102 | "NameA\tAID000000000000000000\tarn:aws:iam::000000000000:user/NameB\t/\t2020-01-01 00:00:00+00:00\t\t\t\n" 103 | ), 104 | ), 105 | ), 106 | ) 107 | def test_list_users(option, expected, stub_iam): 108 | stub_iam.add_response( 109 | "list_users", 110 | { 111 | "Users": [ 112 | { 113 | "Path": "/", 114 | "UserName": "NameA", 115 | "UserId": "AID000000000000000001", 116 | "Arn": "arn:aws:iam::000000000000:user/NameB", 117 | "CreateDate": "2020-01-01 00:00:00+00:00", 118 | }, 119 | { 120 | "Path": "/", 121 | "UserName": "NameA", 122 | "UserId": "AID000000000000000000", 123 | "Arn": "arn:aws:iam::000000000000:user/NameB", 124 | "CreateDate": "2020-01-01 00:00:00+00:00", 125 | }, 126 | ] 127 | }, 128 | ) 129 | 130 | runner = CliRunner() 131 | with runner.isolated_filesystem(): 132 | result = runner.invoke(cli, ["list-users"] + ([option] if option else [])) 133 | assert result.exit_code == 0 134 | assert result.output == expected 135 | 136 | 137 | @pytest.mark.parametrize( 138 | "options,expected", 139 | ( 140 | ( 141 | [], 142 | ( 143 | "[\n" 144 | " {\n" 145 | ' "Name": "bucket-one",\n' 146 | ' "CreationDate": "2020-01-01 00:00:00+00:00"\n' 147 | " },\n" 148 | " {\n" 149 | ' "Name": "bucket-two",\n' 150 | ' "CreationDate": "2020-02-01 00:00:00+00:00"\n' 151 | " }\n" 152 | "]\n" 153 | ), 154 | ), 155 | ( 156 | ["--nl"], 157 | '{"Name": "bucket-one", "CreationDate": "2020-01-01 00:00:00+00:00"}\n' 158 | '{"Name": "bucket-two", "CreationDate": "2020-02-01 00:00:00+00:00"}\n', 159 | ), 160 | ( 161 | ["--nl", "bucket-one"], 162 | '{"Name": "bucket-one", "CreationDate": "2020-01-01 00:00:00+00:00"}\n', 163 | ), 164 | ), 165 | ) 166 | def test_list_buckets(stub_s3, options, expected): 167 | stub_s3.add_response( 168 | "list_buckets", 169 | { 170 | "Buckets": [ 171 | { 172 | "Name": "bucket-one", 173 | "CreationDate": "2020-01-01 00:00:00+00:00", 174 | }, 175 | { 176 | "Name": "bucket-two", 177 | "CreationDate": "2020-02-01 00:00:00+00:00", 178 | }, 179 | ] 180 | }, 181 | ) 182 | runner = CliRunner() 183 | with runner.isolated_filesystem(): 184 | result = runner.invoke(cli, ["list-buckets"] + options) 185 | assert result.exit_code == 0 186 | assert result.output == expected 187 | 188 | 189 | def test_list_buckets_details(stub_s3): 190 | stub_s3.add_response( 191 | "list_buckets", 192 | { 193 | "Buckets": [ 194 | { 195 | "Name": "bucket-one", 196 | "CreationDate": "2020-01-01 00:00:00+00:00", 197 | } 198 | ] 199 | }, 200 | ) 201 | stub_s3.add_response( 202 | "get_bucket_acl", 203 | { 204 | "Owner": { 205 | "DisplayName": "swillison", 206 | "ID": "36b2eeee501c5952a8ac119f9e5212277a4c01eccfa8d6a9d670bba1e2d5f441", 207 | }, 208 | "Grants": [ 209 | { 210 | "Grantee": { 211 | "DisplayName": "swillison", 212 | "ID": "36b2eeee501c5952a8ac119f9e5212277a4c01eccfa8d6a9d670bba1e2d5f441", 213 | "Type": "CanonicalUser", 214 | }, 215 | "Permission": "FULL_CONTROL", 216 | } 217 | ], 218 | "ResponseMetadata": {}, 219 | }, 220 | ) 221 | stub_s3.add_response( 222 | "get_bucket_location", 223 | { 224 | "LocationConstraint": "us-west-2", 225 | }, 226 | ) 227 | stub_s3.add_response( 228 | "get_public_access_block", 229 | { 230 | "PublicAccessBlockConfiguration": { 231 | "BlockPublicAcls": True, 232 | "IgnorePublicAcls": True, 233 | "BlockPublicPolicy": True, 234 | "RestrictPublicBuckets": True, 235 | }, 236 | }, 237 | ) 238 | stub_s3.add_response( 239 | "get_bucket_website", 240 | { 241 | "IndexDocument": {"Suffix": "index.html"}, 242 | "ErrorDocument": {"Key": "error.html"}, 243 | }, 244 | ) 245 | runner = CliRunner() 246 | with runner.isolated_filesystem(): 247 | result = runner.invoke(cli, ["list-buckets", "--details"]) 248 | assert result.exit_code == 0 249 | assert result.output == ( 250 | "[\n" 251 | " {\n" 252 | ' "Name": "bucket-one",\n' 253 | ' "CreationDate": "2020-01-01 00:00:00+00:00",\n' 254 | ' "region": "us-west-2",\n' 255 | ' "bucket_acl": {\n' 256 | ' "Owner": {\n' 257 | ' "DisplayName": "swillison",\n' 258 | ' "ID": "36b2eeee501c5952a8ac119f9e5212277a4c01eccfa8d6a9d670bba1e2d5f441"\n' 259 | " },\n" 260 | ' "Grants": [\n' 261 | " {\n" 262 | ' "Grantee": {\n' 263 | ' "DisplayName": "swillison",\n' 264 | ' "ID": "36b2eeee501c5952a8ac119f9e5212277a4c01eccfa8d6a9d670bba1e2d5f441",\n' 265 | ' "Type": "CanonicalUser"\n' 266 | " },\n" 267 | ' "Permission": "FULL_CONTROL"\n' 268 | " }\n" 269 | " ]\n" 270 | " },\n" 271 | ' "public_access_block": {\n' 272 | ' "BlockPublicAcls": true,\n' 273 | ' "IgnorePublicAcls": true,\n' 274 | ' "BlockPublicPolicy": true,\n' 275 | ' "RestrictPublicBuckets": true\n' 276 | " },\n" 277 | ' "bucket_website": {\n' 278 | ' "IndexDocument": {\n' 279 | ' "Suffix": "index.html"\n' 280 | " },\n" 281 | ' "ErrorDocument": {\n' 282 | ' "Key": "error.html"\n' 283 | " },\n" 284 | ' "url": "http://bucket-one.s3-website.us-west-2.amazonaws.com/"\n' 285 | " }\n" 286 | " }\n" 287 | "]\n" 288 | ) 289 | 290 | 291 | CUSTOM_POLICY = '{"custom": "policy", "bucket": "$!BUCKET_NAME!$"}' 292 | READ_WRITE_POLICY = '{"Version": "2012-10-17", "Statement": [{"Effect": "Allow", "Action": ["s3:ListBucket", "s3:GetBucketLocation"], "Resource": ["arn:aws:s3:::pytest-bucket-simonw-1"]}, {"Effect": "Allow", "Action": ["s3:GetObject", "s3:GetObjectAcl", "s3:GetObjectLegalHold", "s3:GetObjectRetention", "s3:GetObjectTagging"], "Resource": ["arn:aws:s3:::pytest-bucket-simonw-1/*"]}, {"Effect": "Allow", "Action": ["s3:PutObject", "s3:DeleteObject"], "Resource": ["arn:aws:s3:::pytest-bucket-simonw-1/*"]}]}' 293 | READ_ONLY_POLICY = '{"Version": "2012-10-17", "Statement": [{"Effect": "Allow", "Action": ["s3:ListBucket", "s3:GetBucketLocation"], "Resource": ["arn:aws:s3:::pytest-bucket-simonw-1"]}, {"Effect": "Allow", "Action": ["s3:GetObject", "s3:GetObjectAcl", "s3:GetObjectLegalHold", "s3:GetObjectRetention", "s3:GetObjectTagging"], "Resource": ["arn:aws:s3:::pytest-bucket-simonw-1/*"]}]}' 294 | WRITE_ONLY_POLICY = '{"Version": "2012-10-17", "Statement": [{"Effect": "Allow", "Action": ["s3:PutObject"], "Resource": ["arn:aws:s3:::pytest-bucket-simonw-1/*"]}]}' 295 | PREFIX_POLICY = '{"Version": "2012-10-17", "Statement": [{"Effect": "Allow", "Action": ["s3:GetBucketLocation"], "Resource": ["arn:aws:s3:::pytest-bucket-simonw-1"]}, {"Effect": "Allow", "Action": ["s3:ListBucket"], "Resource": ["arn:aws:s3:::pytest-bucket-simonw-1"], "Condition": {"StringLike": {"s3:prefix": ["my-prefix/*"]}}}, {"Effect": "Allow", "Action": ["s3:GetObject", "s3:GetObjectAcl", "s3:GetObjectLegalHold", "s3:GetObjectRetention", "s3:GetObjectTagging"], "Resource": ["arn:aws:s3:::pytest-bucket-simonw-1/my-prefix/*"]}, {"Effect": "Allow", "Action": ["s3:PutObject", "s3:DeleteObject"], "Resource": ["arn:aws:s3:::pytest-bucket-simonw-1/my-prefix/*"]}]}' 296 | EXTRA_STATEMENTS_POLICY = '{"Version": "2012-10-17", "Statement": [{"Effect": "Allow", "Action": ["s3:ListBucket", "s3:GetBucketLocation"], "Resource": ["arn:aws:s3:::pytest-bucket-simonw-1"]}, {"Effect": "Allow", "Action": ["s3:GetObject", "s3:GetObjectAcl", "s3:GetObjectLegalHold", "s3:GetObjectRetention", "s3:GetObjectTagging"], "Resource": ["arn:aws:s3:::pytest-bucket-simonw-1/*"]}, {"Effect": "Allow", "Action": ["s3:PutObject", "s3:DeleteObject"], "Resource": ["arn:aws:s3:::pytest-bucket-simonw-1/*"]}, {"Effect": "Allow", "Action": "textract:*", "Resource": "*"}]}' 297 | 298 | # Used by both test_create and test_create_duration 299 | CREATE_TESTS = ( 300 | # options,use_policy_stdin,expected_policy,expected_name_fragment 301 | ([], False, READ_WRITE_POLICY, "read-write"), 302 | (["--read-only"], False, READ_ONLY_POLICY, "read-only"), 303 | (["--write-only"], False, WRITE_ONLY_POLICY, "write-only"), 304 | (["--prefix", "my-prefix/"], False, PREFIX_POLICY, "read-write"), 305 | (["--policy", "POLICYFILEPATH"], False, CUSTOM_POLICY, "custom"), 306 | (["--policy", "-"], True, CUSTOM_POLICY, "custom"), 307 | (["--policy", CUSTOM_POLICY], False, CUSTOM_POLICY, "custom"), 308 | ( 309 | ["--statement", '{"Effect": "Allow", "Action": "textract:*", "Resource": "*"}'], 310 | False, 311 | EXTRA_STATEMENTS_POLICY, 312 | "custom", 313 | ), 314 | ) 315 | 316 | 317 | @pytest.mark.parametrize( 318 | "options,use_policy_stdin,expected_policy,expected_name_fragment", 319 | CREATE_TESTS, 320 | ) 321 | def test_create( 322 | mocker, tmpdir, options, use_policy_stdin, expected_policy, expected_name_fragment 323 | ): 324 | boto3 = mocker.patch("boto3.client") 325 | boto3.return_value = Mock() 326 | boto3.return_value.create_access_key.return_value = { 327 | "AccessKey": {"AccessKeyId": "access", "SecretAccessKey": "secret"} 328 | } 329 | runner = CliRunner() 330 | with runner.isolated_filesystem(): 331 | filepath = str(tmpdir / "policy.json") 332 | open(filepath, "w").write(CUSTOM_POLICY) 333 | fixed_options = [ 334 | filepath if option == "POLICYFILEPATH" else option for option in options 335 | ] 336 | args = ["create", "pytest-bucket-simonw-1", "-c"] + fixed_options 337 | kwargs = {} 338 | if use_policy_stdin: 339 | kwargs["input"] = CUSTOM_POLICY 340 | result = runner.invoke(cli, args, **kwargs, catch_exceptions=False) 341 | assert result.exit_code == 0 342 | assert result.output == ( 343 | "Attached policy s3.NAME_FRAGMENT.pytest-bucket-simonw-1 to user s3.NAME_FRAGMENT.pytest-bucket-simonw-1\n" 344 | "Created access key for user: s3.NAME_FRAGMENT.pytest-bucket-simonw-1\n" 345 | '{\n "AccessKeyId": "access",\n "SecretAccessKey": "secret"\n}\n' 346 | ).replace("NAME_FRAGMENT", expected_name_fragment) 347 | assert [str(c) for c in boto3.mock_calls] == [ 348 | "call('s3')", 349 | "call('iam')", 350 | "call('sts')", 351 | "call().head_bucket(Bucket='pytest-bucket-simonw-1')", 352 | "call().get_user(UserName='s3.{}.pytest-bucket-simonw-1')".format( 353 | expected_name_fragment 354 | ), 355 | "call().put_user_policy(PolicyDocument='{}', PolicyName='s3.{}.pytest-bucket-simonw-1', UserName='s3.{}.pytest-bucket-simonw-1')".format( 356 | expected_policy.replace("$!BUCKET_NAME!$", "pytest-bucket-simonw-1"), 357 | expected_name_fragment, 358 | expected_name_fragment, 359 | ), 360 | "call().create_access_key(UserName='s3.{}.pytest-bucket-simonw-1')".format( 361 | expected_name_fragment 362 | ), 363 | ] 364 | 365 | 366 | @pytest.mark.parametrize( 367 | "statement,expected_error", 368 | ( 369 | ("", "Invalid JSON string"), 370 | ("{}", "missing required keys: Action, Effect, Resource"), 371 | ('{"Action": 1}', "missing required keys: Effect, Resource"), 372 | ('{"Action": 1, "Effect": 2}', "missing required keys: Resource"), 373 | ), 374 | ) 375 | def test_create_statement_error(statement, expected_error): 376 | runner = CliRunner() 377 | result = runner.invoke(cli, ["create", "--statement", statement]) 378 | assert result.exit_code == 2 379 | assert expected_error in result.output 380 | 381 | 382 | @pytest.fixture 383 | def mocked_for_duration(mocker): 384 | boto3 = mocker.patch("boto3.client") 385 | boto3.return_value = Mock() 386 | boto3.return_value.create_access_key.return_value = { 387 | "AccessKey": {"AccessKeyId": "access", "SecretAccessKey": "secret"} 388 | } 389 | boto3.return_value.get_caller_identity.return_value = {"Account": "1234"} 390 | boto3.return_value.get_role.return_value = {"Role": {"Arn": "arn:::role"}} 391 | boto3.return_value.assume_role.return_value = { 392 | "Credentials": { 393 | "AccessKeyId": "access", 394 | "SecretAccessKey": "secret", 395 | "SessionToken": "session", 396 | } 397 | } 398 | return boto3 399 | 400 | 401 | @pytest.mark.parametrize( 402 | "options,use_policy_stdin,expected_policy,expected_name_fragment", 403 | CREATE_TESTS, 404 | ) 405 | def test_create_duration( 406 | mocked_for_duration, 407 | tmpdir, 408 | options, 409 | use_policy_stdin, 410 | expected_policy, 411 | expected_name_fragment, 412 | ): 413 | runner = CliRunner() 414 | with runner.isolated_filesystem(): 415 | filepath = str(tmpdir / "policy.json") 416 | open(filepath, "w").write(CUSTOM_POLICY) 417 | fixed_options = [ 418 | filepath if option == "POLICYFILEPATH" else option for option in options 419 | ] 420 | args = [ 421 | "create", 422 | "pytest-bucket-simonw-1", 423 | "-c", 424 | "--duration", 425 | "15m", 426 | ] + fixed_options 427 | kwargs = {} 428 | if use_policy_stdin: 429 | kwargs["input"] = CUSTOM_POLICY 430 | result = runner.invoke(cli, args, **kwargs, catch_exceptions=False) 431 | assert result.exit_code == 0 432 | assert result.output == ( 433 | "Assume role against arn:::role for 900s\n" 434 | "{\n" 435 | ' "AccessKeyId": "access",\n' 436 | ' "SecretAccessKey": "secret",\n' 437 | ' "SessionToken": "session"\n' 438 | "}\n" 439 | ) 440 | assert mocked_for_duration.mock_calls == [ 441 | call("s3"), 442 | call("iam"), 443 | call("sts"), 444 | call().head_bucket(Bucket="pytest-bucket-simonw-1"), 445 | call().get_caller_identity(), 446 | call().get_role(RoleName="s3-credentials.AmazonS3FullAccess"), 447 | call().assume_role( 448 | RoleArn="arn:::role", 449 | RoleSessionName="s3.{fragment}.pytest-bucket-simonw-1".format( 450 | fragment=expected_name_fragment 451 | ), 452 | Policy="{policy}".format( 453 | policy=expected_policy.replace( 454 | "$!BUCKET_NAME!$", "pytest-bucket-simonw-1" 455 | ), 456 | ), 457 | DurationSeconds=900, 458 | ), 459 | ] 460 | 461 | 462 | def test_create_public(mocker): 463 | boto3 = mocker.patch("boto3.client") 464 | boto3.return_value = Mock() 465 | boto3.return_value.create_access_key.return_value = { 466 | "AccessKey": {"AccessKeyId": "access", "SecretAccessKey": "secret"} 467 | } 468 | # Fake that the bucket does not exist 469 | boto3.return_value.head_bucket.side_effect = botocore.exceptions.ClientError( 470 | error_response={}, operation_name="" 471 | ) 472 | runner = CliRunner() 473 | with runner.isolated_filesystem(): 474 | args = ["create", "pytest-bucket-simonw-1", "-c", "--public"] 475 | result = runner.invoke(cli, args, catch_exceptions=False) 476 | assert result.exit_code == 0 477 | assert result.output == ( 478 | "Created bucket: pytest-bucket-simonw-1\n" 479 | "Set public access block configuration\n" 480 | "Attached bucket policy allowing public access\n" 481 | "Attached policy s3.read-write.pytest-bucket-simonw-1 to user s3.read-write.pytest-bucket-simonw-1\n" 482 | "Created access key for user: s3.read-write.pytest-bucket-simonw-1\n" 483 | "{\n" 484 | ' "AccessKeyId": "access",\n' 485 | ' "SecretAccessKey": "secret"\n' 486 | "}\n" 487 | ) 488 | assert [str(c) for c in boto3.mock_calls] == [ 489 | "call('s3')", 490 | "call('iam')", 491 | "call('sts')", 492 | "call().head_bucket(Bucket='pytest-bucket-simonw-1')", 493 | "call().create_bucket(Bucket='pytest-bucket-simonw-1')", 494 | "call().put_public_access_block(Bucket='pytest-bucket-simonw-1', PublicAccessBlockConfiguration={'BlockPublicAcls': False, 'IgnorePublicAcls': False, 'BlockPublicPolicy': False, 'RestrictPublicBuckets': False})", 495 | 'call().put_bucket_policy(Bucket=\'pytest-bucket-simonw-1\', Policy=\'{"Version": "2012-10-17", "Statement": [{"Sid": "AllowAllGetObject", "Effect": "Allow", "Principal": "*", "Action": ["s3:GetObject"], "Resource": ["arn:aws:s3:::pytest-bucket-simonw-1/*"]}]}\')', 496 | "call().get_user(UserName='s3.read-write.pytest-bucket-simonw-1')", 497 | "call().put_user_policy(PolicyDocument='{}', PolicyName='s3.read-write.pytest-bucket-simonw-1', UserName='s3.read-write.pytest-bucket-simonw-1')".format( 498 | READ_WRITE_POLICY.replace("$!BUCKET_NAME!$", "pytest-bucket-simonw-1"), 499 | ), 500 | "call().create_access_key(UserName='s3.read-write.pytest-bucket-simonw-1')", 501 | ] 502 | 503 | 504 | def test_create_website(mocker): 505 | boto3 = mocker.patch("boto3.client") 506 | boto3.return_value = Mock() 507 | boto3.return_value.create_access_key.return_value = { 508 | "AccessKey": {"AccessKeyId": "access", "SecretAccessKey": "secret"} 509 | } 510 | # Fake that the bucket does not exist 511 | boto3.return_value.head_bucket.side_effect = botocore.exceptions.ClientError( 512 | error_response={}, operation_name="" 513 | ) 514 | runner = CliRunner() 515 | with runner.isolated_filesystem(): 516 | args = ["create", "pytest-bucket-simonw-1", "-c", "--website"] 517 | result = runner.invoke(cli, args, catch_exceptions=False) 518 | assert result.exit_code == 0 519 | assert result.output == ( 520 | "Created bucket: pytest-bucket-simonw-1\n" 521 | "Set public access block configuration\n" 522 | "Attached bucket policy allowing public access\n" 523 | "Configured website: IndexDocument=index.html, ErrorDocument=error.html\n" 524 | "Attached policy s3.read-write.pytest-bucket-simonw-1 to user s3.read-write.pytest-bucket-simonw-1\n" 525 | "Created access key for user: s3.read-write.pytest-bucket-simonw-1\n" 526 | "{\n" 527 | ' "AccessKeyId": "access",\n' 528 | ' "SecretAccessKey": "secret"\n' 529 | "}\n" 530 | ) 531 | assert [str(c) for c in boto3.mock_calls] == [ 532 | "call('s3')", 533 | "call('iam')", 534 | "call('sts')", 535 | "call().head_bucket(Bucket='pytest-bucket-simonw-1')", 536 | "call().create_bucket(Bucket='pytest-bucket-simonw-1')", 537 | "call().put_public_access_block(Bucket='pytest-bucket-simonw-1', PublicAccessBlockConfiguration={'BlockPublicAcls': False, 'IgnorePublicAcls': False, 'BlockPublicPolicy': False, 'RestrictPublicBuckets': False})", 538 | 'call().put_bucket_policy(Bucket=\'pytest-bucket-simonw-1\', Policy=\'{"Version": "2012-10-17", "Statement": [{"Sid": "AllowAllGetObject", "Effect": "Allow", "Principal": "*", "Action": ["s3:GetObject"], "Resource": ["arn:aws:s3:::pytest-bucket-simonw-1/*"]}]}\')', 539 | "call().put_bucket_website(Bucket='pytest-bucket-simonw-1', WebsiteConfiguration={'ErrorDocument': {'Key': 'error.html'}, 'IndexDocument': {'Suffix': 'index.html'}})", 540 | "call().get_user(UserName='s3.read-write.pytest-bucket-simonw-1')", 541 | "call().put_user_policy(PolicyDocument='{}', PolicyName='s3.read-write.pytest-bucket-simonw-1', UserName='s3.read-write.pytest-bucket-simonw-1')".format( 542 | READ_WRITE_POLICY.replace("$!BUCKET_NAME!$", "pytest-bucket-simonw-1"), 543 | ), 544 | "call().create_access_key(UserName='s3.read-write.pytest-bucket-simonw-1')", 545 | ] 546 | 547 | 548 | def test_create_format_ini(mocker): 549 | boto3 = mocker.patch("boto3.client") 550 | boto3.return_value = Mock() 551 | boto3.return_value.create_access_key.return_value = { 552 | "AccessKey": { 553 | "AccessKeyId": "access", 554 | "SecretAccessKey": "secret", 555 | "SessionToken": "session", 556 | } 557 | } 558 | runner = CliRunner(mix_stderr=False) 559 | result = runner.invoke( 560 | cli, 561 | ["create", "test-bucket", "-c", "-f", "ini"], 562 | ) 563 | assert result.exit_code == 0 564 | assert ( 565 | result.stdout 566 | == "[default]\naws_access_key_id=access\naws_secret_access_key=secret\n" 567 | ) 568 | 569 | 570 | def test_create_format_duration_ini(mocked_for_duration): 571 | runner = CliRunner(mix_stderr=False) 572 | result = runner.invoke( 573 | cli, 574 | ["create", "test-bucket", "-c", "--duration", "15m", "-f", "ini"], 575 | catch_exceptions=False, 576 | ) 577 | assert result.exit_code == 0 578 | assert result.stdout == ( 579 | "[default]\n" 580 | "aws_access_key_id=access\n" 581 | "aws_secret_access_key=secret\n" 582 | "aws_session_token=session\n" 583 | ) 584 | 585 | 586 | def test_list_user_policies(mocker): 587 | boto3 = mocker.patch("boto3.client") 588 | boto3.return_value = Mock() 589 | boto3.return_value.get_user_policy.return_value = { 590 | "PolicyDocument": {"policy": "here"} 591 | } 592 | 593 | def get_paginator(type): 594 | m = Mock() 595 | if type == "list_users": 596 | m.paginate.return_value = [ 597 | {"Users": [{"UserName": "one"}, {"UserName": "two"}]} 598 | ] 599 | elif type == "list_user_policies": 600 | m.paginate.return_value = [{"PolicyNames": ["policy-one", "policy-two"]}] 601 | return m 602 | 603 | boto3().get_paginator.side_effect = get_paginator 604 | runner = CliRunner() 605 | with runner.isolated_filesystem(): 606 | result = runner.invoke(cli, ["list-user-policies"], catch_exceptions=False) 607 | assert result.exit_code == 0 608 | assert result.output == ( 609 | "User: one\n" 610 | "PolicyName: policy-one\n" 611 | "{\n" 612 | ' "policy": "here"\n' 613 | "}\n" 614 | "PolicyName: policy-two\n" 615 | "{\n" 616 | ' "policy": "here"\n' 617 | "}\n" 618 | "User: two\n" 619 | "PolicyName: policy-one\n" 620 | "{\n" 621 | ' "policy": "here"\n' 622 | "}\n" 623 | "PolicyName: policy-two\n" 624 | "{\n" 625 | ' "policy": "here"\n' 626 | "}\n" 627 | ) 628 | assert boto3.mock_calls == [ 629 | call(), 630 | call("iam"), 631 | call().get_paginator("list_users"), 632 | call().get_paginator("list_user_policies"), 633 | call().get_user_policy(UserName="one", PolicyName="policy-one"), 634 | call().get_user_policy(UserName="one", PolicyName="policy-two"), 635 | call().get_paginator("list_user_policies"), 636 | call().get_user_policy(UserName="two", PolicyName="policy-one"), 637 | call().get_user_policy(UserName="two", PolicyName="policy-two"), 638 | ] 639 | 640 | 641 | def test_delete_user(mocker): 642 | boto3 = mocker.patch("boto3.client") 643 | boto3.return_value = Mock() 644 | boto3.return_value.get_user_policy.return_value = { 645 | "PolicyDocument": {"policy": "here"} 646 | } 647 | 648 | def get_paginator(type): 649 | m = Mock() 650 | if type == "list_access_keys": 651 | m.paginate.return_value = [ 652 | {"AccessKeyMetadata": [{"AccessKeyId": "one"}, {"AccessKeyId": "two"}]} 653 | ] 654 | elif type == "list_user_policies": 655 | m.paginate.return_value = [{"PolicyNames": ["policy-one"]}] 656 | return m 657 | 658 | boto3().get_paginator.side_effect = get_paginator 659 | runner = CliRunner() 660 | with runner.isolated_filesystem(): 661 | result = runner.invoke(cli, ["delete-user", "user-123"], catch_exceptions=False) 662 | assert result.exit_code == 0 663 | assert result.output == ( 664 | "User: user-123\n" 665 | " Deleted policy: policy-one\n" 666 | " Deleted access key: one\n" 667 | " Deleted access key: two\n" 668 | " Deleted user\n" 669 | ) 670 | assert boto3.mock_calls == [ 671 | call(), 672 | call("iam"), 673 | call().get_paginator("list_user_policies"), 674 | call().delete_user_policy(UserName="user-123", PolicyName="policy-one"), 675 | call().get_paginator("list_access_keys"), 676 | call().delete_access_key(UserName="user-123", AccessKeyId="one"), 677 | call().delete_access_key(UserName="user-123", AccessKeyId="two"), 678 | call().delete_user(UserName="user-123"), 679 | ] 680 | 681 | 682 | def test_get_cors_policy(mocker): 683 | boto3 = mocker.patch("boto3.client") 684 | boto3.return_value = Mock() 685 | boto3.return_value.get_bucket_cors.return_value = { 686 | "CORSRules": [ 687 | { 688 | "ID": "set-by-s3-credentials", 689 | "AllowedMethods": ["GET"], 690 | "AllowedOrigins": ["*"], 691 | } 692 | ] 693 | } 694 | runner = CliRunner() 695 | with runner.isolated_filesystem(): 696 | result = runner.invoke( 697 | cli, ["get-cors-policy", "my-bucket"], catch_exceptions=False 698 | ) 699 | assert result.exit_code == 0 700 | assert result.output == ( 701 | "[" 702 | "\n {" 703 | '\n "ID": "set-by-s3-credentials",' 704 | '\n "AllowedMethods": [' 705 | '\n "GET"' 706 | "\n ]," 707 | '\n "AllowedOrigins": [' 708 | '\n "*"' 709 | "\n ]" 710 | "\n }" 711 | "\n]\n" 712 | ) 713 | 714 | assert boto3.mock_calls == [ 715 | call("s3"), 716 | call().get_bucket_cors(Bucket="my-bucket"), 717 | ] 718 | 719 | 720 | @pytest.mark.parametrize( 721 | "options,expected_json", 722 | ( 723 | ( 724 | [], 725 | { 726 | "ID": "set-by-s3-credentials", 727 | "AllowedOrigins": ["*"], 728 | "AllowedHeaders": (), 729 | "AllowedMethods": ["GET"], 730 | "ExposeHeaders": (), 731 | }, 732 | ), 733 | ( 734 | [ 735 | "--allowed-method", 736 | "GET", 737 | "--allowed-method", 738 | "PUT", 739 | "--allowed-origin", 740 | "https://www.example.com/", 741 | "--expose-header", 742 | "ETag", 743 | ], 744 | { 745 | "ID": "set-by-s3-credentials", 746 | "AllowedOrigins": ("https://www.example.com/",), 747 | "AllowedHeaders": (), 748 | "AllowedMethods": ("GET", "PUT"), 749 | "ExposeHeaders": ("ETag",), 750 | }, 751 | ), 752 | ( 753 | ["--max-age-seconds", 60], 754 | { 755 | "ID": "set-by-s3-credentials", 756 | "AllowedOrigins": ["*"], 757 | "AllowedHeaders": (), 758 | "AllowedMethods": ["GET"], 759 | "ExposeHeaders": (), 760 | "MaxAgeSeconds": 60, 761 | }, 762 | ), 763 | ), 764 | ) 765 | def test_set_cors_policy(mocker, options, expected_json): 766 | boto3 = mocker.patch("boto3.client") 767 | boto3.return_value = Mock() 768 | boto3.return_value.put_bucket_cors.return_value = {} 769 | runner = CliRunner() 770 | with runner.isolated_filesystem(): 771 | result = runner.invoke( 772 | cli, ["set-cors-policy", "my-bucket"] + options, catch_exceptions=False 773 | ) 774 | assert result.exit_code == 0 775 | assert result.output == "" 776 | assert boto3.mock_calls == [ 777 | call("s3"), 778 | call().head_bucket(Bucket="my-bucket"), 779 | call().put_bucket_cors( 780 | Bucket="my-bucket", CORSConfiguration={"CORSRules": [expected_json]} 781 | ), 782 | ] 783 | 784 | 785 | @pytest.mark.parametrize( 786 | "strategy,expected_error", 787 | ( 788 | ("stdin", "Input contained invalid JSON"), 789 | ("filepath", "File contained invalid JSON"), 790 | ("string", "Invalid JSON string"), 791 | ), 792 | ) 793 | @pytest.mark.parametrize("use_valid_string", (True, False)) 794 | def test_verify_create_policy_option( 795 | tmpdir, mocker, strategy, expected_error, use_valid_string 796 | ): 797 | # Ensure "bucket does not exist" error to terminate after verification 798 | boto3 = mocker.patch("boto3.client") 799 | boto3.return_value.head_bucket.side_effect = botocore.exceptions.ClientError( 800 | error_response={}, operation_name="" 801 | ) 802 | if use_valid_string: 803 | content = '{"policy": "..."}' 804 | else: 805 | content = "{Invalid JSON" 806 | # Only used by strategy==filepath 807 | filepath = str(tmpdir / "policy.json") 808 | open(filepath, "w").write(content) 809 | 810 | runner = CliRunner() 811 | args = ["create", "my-bucket", "--policy"] 812 | kwargs = {} 813 | if strategy == "stdin": 814 | args.append("-") 815 | kwargs["input"] = content 816 | elif strategy == "filepath": 817 | args.append(filepath) 818 | elif strategy == "string": 819 | args.append(content) 820 | 821 | result = runner.invoke(cli, args, **kwargs) 822 | if use_valid_string: 823 | assert result.exit_code == 1 824 | assert ( 825 | result.output 826 | == "Error: Bucket does not exist: my-bucket - try --create-bucket to create it\n" 827 | ) 828 | else: 829 | assert result.exit_code 830 | assert ( 831 | "Error: Invalid value for '--policy': {}".format(expected_error) 832 | in result.output 833 | ) 834 | 835 | 836 | @pytest.mark.parametrize( 837 | "content", 838 | ( 839 | '{"AccessKeyId": "access", "SecretAccessKey": "secret"}', 840 | "[default]\naws_access_key_id=access\naws_secret_access_key=secret", 841 | ), 842 | ) 843 | @pytest.mark.parametrize("use_stdin", (True, False)) 844 | def test_auth_option(tmpdir, mocker, content, use_stdin): 845 | boto3 = mocker.patch("boto3.client") 846 | boto3.return_value = Mock() 847 | boto3().get_paginator().paginate.return_value = [{"Users": []}] 848 | 849 | filepath = None 850 | if use_stdin: 851 | input = content 852 | arg = "-" 853 | else: 854 | input = None 855 | filepath = str(tmpdir / "input") 856 | open(filepath, "w").write(content) 857 | arg = filepath 858 | 859 | runner = CliRunner() 860 | with runner.isolated_filesystem(): 861 | result = runner.invoke( 862 | cli, ["list-users", "-a", arg], catch_exceptions=False, input=input 863 | ) 864 | assert result.exit_code == 0 865 | 866 | assert boto3.mock_calls == [ 867 | call(), 868 | call().get_paginator(), 869 | call("iam", aws_access_key_id="access", aws_secret_access_key="secret"), 870 | call().get_paginator("list_users"), 871 | call().get_paginator().paginate(), 872 | ] 873 | 874 | 875 | @pytest.mark.parametrize( 876 | "extra_option", ["--access-key", "--secret-key", "--session-token"] 877 | ) 878 | def test_auth_option_errors(extra_option): 879 | runner = CliRunner() 880 | result = runner.invoke( 881 | cli, 882 | ["list-users", "-a", "-", extra_option, "blah"], 883 | catch_exceptions=False, 884 | input="", 885 | ) 886 | assert result.exit_code == 1 887 | assert ( 888 | result.output 889 | == "Error: --auth cannot be used with --access-key, --secret-key or --session-token\n" 890 | ) 891 | 892 | 893 | @pytest.mark.parametrize( 894 | "options,expected", 895 | ( 896 | ([], READ_WRITE_POLICY), 897 | (["--read-only"], READ_ONLY_POLICY), 898 | (["--write-only"], WRITE_ONLY_POLICY), 899 | (["--prefix", "my-prefix/"], PREFIX_POLICY), 900 | ( 901 | [ 902 | "--statement", 903 | '{"Effect": "Allow", "Action": "textract:*", "Resource": "*"}', 904 | ], 905 | EXTRA_STATEMENTS_POLICY, 906 | ), 907 | ), 908 | ) 909 | def test_policy(options, expected): 910 | runner = CliRunner() 911 | result = runner.invoke( 912 | cli, 913 | ["policy", "pytest-bucket-simonw-1"] + options, 914 | catch_exceptions=False, 915 | ) 916 | assert json.loads(result.output) == json.loads(expected) 917 | 918 | 919 | @pytest.mark.parametrize( 920 | "options,expected", 921 | ( 922 | ( 923 | [], 924 | ( 925 | "[\n" 926 | " {\n" 927 | ' "Key": "yolo-causeway-1.jpg",\n' 928 | ' "LastModified": "2019-12-26 17:00:22+00:00",\n' 929 | ' "ETag": "\\"87abea888b22089cabe93a0e17cf34a4\\"",\n' 930 | ' "Size": 5923104,\n' 931 | ' "StorageClass": "STANDARD"\n' 932 | " },\n" 933 | " {\n" 934 | ' "Key": "yolo-causeway-2.jpg",\n' 935 | ' "LastModified": "2019-12-26 17:00:22+00:00",\n' 936 | ' "ETag": "\\"87abea888b22089cabe93a0e17cf34a4\\"",\n' 937 | ' "Size": 5923104,\n' 938 | ' "StorageClass": "STANDARD"\n' 939 | " }\n" 940 | "]\n" 941 | ), 942 | ), 943 | ( 944 | ["--nl"], 945 | ( 946 | '{"Key": "yolo-causeway-1.jpg", "LastModified": "2019-12-26 17:00:22+00:00", "ETag": "\\"87abea888b22089cabe93a0e17cf34a4\\"", "Size": 5923104, "StorageClass": "STANDARD"}\n' 947 | '{"Key": "yolo-causeway-2.jpg", "LastModified": "2019-12-26 17:00:22+00:00", "ETag": "\\"87abea888b22089cabe93a0e17cf34a4\\"", "Size": 5923104, "StorageClass": "STANDARD"}\n' 948 | ), 949 | ), 950 | ( 951 | ["--tsv"], 952 | ( 953 | "Key\tLastModified\tETag\tSize\tStorageClass\tOwner\n" 954 | 'yolo-causeway-1.jpg\t2019-12-26 17:00:22+00:00\t"""87abea888b22089cabe93a0e17cf34a4"""\t5923104\tSTANDARD\t\n' 955 | 'yolo-causeway-2.jpg\t2019-12-26 17:00:22+00:00\t"""87abea888b22089cabe93a0e17cf34a4"""\t5923104\tSTANDARD\t\n' 956 | ), 957 | ), 958 | ( 959 | ["--csv"], 960 | ( 961 | "Key,LastModified,ETag,Size,StorageClass,Owner\n" 962 | 'yolo-causeway-1.jpg,2019-12-26 17:00:22+00:00,"""87abea888b22089cabe93a0e17cf34a4""",5923104,STANDARD,\n' 963 | 'yolo-causeway-2.jpg,2019-12-26 17:00:22+00:00,"""87abea888b22089cabe93a0e17cf34a4""",5923104,STANDARD,\n' 964 | ), 965 | ), 966 | ), 967 | ) 968 | def test_list_bucket(stub_s3, options, expected): 969 | stub_s3.add_response( 970 | "list_objects_v2", 971 | { 972 | "Contents": [ 973 | { 974 | "Key": "yolo-causeway-1.jpg", 975 | "LastModified": "2019-12-26 17:00:22+00:00", 976 | "ETag": '"87abea888b22089cabe93a0e17cf34a4"', 977 | "Size": 5923104, 978 | "StorageClass": "STANDARD", 979 | }, 980 | { 981 | "Key": "yolo-causeway-2.jpg", 982 | "LastModified": "2019-12-26 17:00:22+00:00", 983 | "ETag": '"87abea888b22089cabe93a0e17cf34a4"', 984 | "Size": 5923104, 985 | "StorageClass": "STANDARD", 986 | }, 987 | ] 988 | }, 989 | ) 990 | runner = CliRunner() 991 | with runner.isolated_filesystem(): 992 | result = runner.invoke(cli, ["list-bucket", "test-bucket"] + options) 993 | assert result.exit_code == 0 994 | assert result.output == expected 995 | 996 | 997 | def test_list_bucket_empty(stub_s3): 998 | stub_s3.add_response("list_objects_v2", {}) 999 | runner = CliRunner() 1000 | with runner.isolated_filesystem(): 1001 | result = runner.invoke(cli, ["list-bucket", "test-bucket"]) 1002 | assert result.exit_code == 0 1003 | assert result.output == "[]\n" 1004 | 1005 | 1006 | @pytest.fixture 1007 | def stub_iam_for_list_roles(stub_iam): 1008 | stub_iam.add_response( 1009 | "list_roles", 1010 | { 1011 | "Roles": [ 1012 | { 1013 | "RoleName": "role-one", 1014 | "Path": "/", 1015 | "Arn": "arn:aws:iam::462092780466:role/role-one", 1016 | "RoleId": "36b2eeee501c5952a8ac119f9e521", 1017 | "CreateDate": "2020-01-01 00:00:00+00:00", 1018 | } 1019 | ] 1020 | }, 1021 | ) 1022 | stub_iam.add_response( 1023 | "list_role_policies", 1024 | {"PolicyNames": ["policy-one"]}, 1025 | ) 1026 | stub_iam.add_response( 1027 | "get_role_policy", 1028 | { 1029 | "RoleName": "role-one", 1030 | "PolicyName": "policy-one", 1031 | "PolicyDocument": '{"foo": "bar}', 1032 | }, 1033 | ) 1034 | stub_iam.add_response( 1035 | "list_attached_role_policies", 1036 | {"AttachedPolicies": [{"PolicyArn": "arn:123:must-be-at-least-tweny-chars"}]}, 1037 | ) 1038 | stub_iam.add_response( 1039 | "get_policy", 1040 | {"Policy": {"DefaultVersionId": "v1"}}, 1041 | ) 1042 | stub_iam.add_response( 1043 | "get_policy_version", 1044 | {"PolicyVersion": {"CreateDate": "2020-01-01 00:00:00+00:00"}}, 1045 | ) 1046 | 1047 | 1048 | @pytest.mark.parametrize("details", (False, True)) 1049 | def test_list_roles_details(stub_iam_for_list_roles, details): 1050 | runner = CliRunner() 1051 | with runner.isolated_filesystem(): 1052 | result = runner.invoke(cli, ["list-roles"] + (["--details"] if details else [])) 1053 | assert result.exit_code == 0 1054 | expected = { 1055 | "RoleName": "role-one", 1056 | "Path": "/", 1057 | "Arn": "arn:aws:iam::462092780466:role/role-one", 1058 | "RoleId": "36b2eeee501c5952a8ac119f9e521", 1059 | "CreateDate": "2020-01-01 00:00:00+00:00", 1060 | "inline_policies": [ 1061 | { 1062 | "RoleName": "role-one", 1063 | "PolicyName": "policy-one", 1064 | "PolicyDocument": '{"foo": "bar}', 1065 | } 1066 | ], 1067 | "attached_policies": [ 1068 | { 1069 | "DefaultVersionId": "v1", 1070 | "PolicyVersion": {"CreateDate": "2020-01-01 00:00:00+00:00"}, 1071 | } 1072 | ], 1073 | } 1074 | if not details: 1075 | expected.pop("inline_policies") 1076 | expected.pop("attached_policies") 1077 | assert json.loads(result.output) == [expected] 1078 | 1079 | 1080 | def test_list_roles_csv(stub_iam_for_list_roles): 1081 | runner = CliRunner() 1082 | with runner.isolated_filesystem(): 1083 | result = runner.invoke(cli, ["list-roles", "--csv", "--details"]) 1084 | assert result.exit_code == 0 1085 | assert result.output == ( 1086 | "Path,RoleName,RoleId,Arn,CreateDate,AssumeRolePolicyDocument,Description,MaxSessionDuration,PermissionsBoundary,Tags,RoleLastUsed,inline_policies,attached_policies\n" 1087 | '/,role-one,36b2eeee501c5952a8ac119f9e521,arn:aws:iam::462092780466:role/role-one,2020-01-01 00:00:00+00:00,,,,,,,"[\n' 1088 | " {\n" 1089 | ' ""RoleName"": ""role-one"",\n' 1090 | ' ""PolicyName"": ""policy-one"",\n' 1091 | ' ""PolicyDocument"": ""{\\""foo\\"": \\""bar}""\n' 1092 | " }\n" 1093 | ']","[\n' 1094 | " {\n" 1095 | ' ""DefaultVersionId"": ""v1"",\n' 1096 | ' ""PolicyVersion"": {\n' 1097 | ' ""CreateDate"": ""2020-01-01 00:00:00+00:00""\n' 1098 | " }\n" 1099 | " }\n" 1100 | ']"\n' 1101 | ) 1102 | 1103 | 1104 | @pytest.mark.parametrize( 1105 | "files,patterns,expected,error", 1106 | ( 1107 | # Without arguments return everything 1108 | (None, None, {"one.txt", "directory/two.txt", "directory/three.json"}, None), 1109 | # Positional arguments returns files 1110 | (["one.txt"], None, {"one.txt"}, None), 1111 | (["directory/two.txt"], None, {"directory/two.txt"}, None), 1112 | (["one.txt"], None, {"one.txt"}, None), 1113 | ( 1114 | ["directory/two.txt", "directory/three.json"], 1115 | None, 1116 | {"directory/two.txt", "directory/three.json"}, 1117 | None, 1118 | ), 1119 | # Invalid positional argument downloads file and shows error 1120 | ( 1121 | ["directory/two.txt", "directory/bad.json"], 1122 | None, 1123 | {"directory/two.txt"}, 1124 | "Not found: directory/bad.json", 1125 | ), 1126 | # --pattern returns files matching pattern 1127 | (None, ["*e.txt"], {"one.txt"}, None), 1128 | (None, ["*e.txt", "invalid-pattern"], {"one.txt"}, None), 1129 | (None, ["directory/*"], {"directory/two.txt", "directory/three.json"}, None), 1130 | # positional and patterns can be combined 1131 | (["one.txt"], ["directory/*.json"], {"one.txt", "directory/three.json"}, None), 1132 | ), 1133 | ) 1134 | @pytest.mark.parametrize("output", (None, "out")) 1135 | def test_get_objects(moto_s3_populated, output, files, patterns, expected, error): 1136 | runner = CliRunner() 1137 | with runner.isolated_filesystem(): 1138 | args = ["get-objects", "my-bucket"] + (files or []) 1139 | if patterns: 1140 | for pattern in patterns: 1141 | args.extend(["--pattern", pattern]) 1142 | if output: 1143 | args.extend(["--output", output]) 1144 | result = runner.invoke(cli, args, catch_exceptions=False) 1145 | if error: 1146 | assert result.exit_code != 0 1147 | else: 1148 | assert result.exit_code == 0 1149 | # Build list of all files in output directory using glob 1150 | output_dir = pathlib.Path(output or ".") 1151 | all_files = { 1152 | str(p.relative_to(output_dir)) 1153 | for p in output_dir.glob("**/*") 1154 | if p.is_file() 1155 | } 1156 | assert all_files == expected 1157 | if error: 1158 | assert error in result.output 1159 | 1160 | 1161 | @pytest.mark.parametrize( 1162 | "args,expected,expected_output", 1163 | ( 1164 | (["."], {"one.txt", "directory/two.txt", "directory/three.json"}, None), 1165 | (["one.txt"], {"one.txt"}, None), 1166 | (["directory"], {"directory/two.txt", "directory/three.json"}, None), 1167 | ( 1168 | ["directory", "--prefix", "o"], 1169 | {"o/directory/two.txt", "o/directory/three.json"}, 1170 | None, 1171 | ), 1172 | # --dry-run tests 1173 | ( 1174 | ["directory", "--prefix", "o", "--dry-run"], 1175 | None, 1176 | ( 1177 | "directory/two.txt => s3://my-bucket/o/directory/two.txt\n" 1178 | "directory/three.json => s3://my-bucket/o/directory/three.json\n" 1179 | ), 1180 | ), 1181 | ( 1182 | [".", "--prefix", "p"], 1183 | {"p/one.txt", "p/directory/two.txt", "p/directory/three.json"}, 1184 | None, 1185 | ), 1186 | ), 1187 | ) 1188 | def test_put_objects(moto_s3, args, expected, expected_output): 1189 | runner = CliRunner(mix_stderr=False) 1190 | with runner.isolated_filesystem(): 1191 | # Create files 1192 | pathlib.Path("one.txt").write_text("one") 1193 | pathlib.Path("directory").mkdir() 1194 | pathlib.Path("directory/two.txt").write_text("two") 1195 | pathlib.Path("directory/three.json").write_text('{"three": 3}') 1196 | result = runner.invoke( 1197 | cli, ["put-objects", "my-bucket"] + args, catch_exceptions=False 1198 | ) 1199 | assert result.exit_code == 0, result.output 1200 | assert set(result.output.split("\n")) == set( 1201 | (expected_output or "").split("\n") 1202 | ) 1203 | # Check files were uploaded 1204 | keys = { 1205 | obj["Key"] 1206 | for obj in moto_s3.list_objects(Bucket="my-bucket").get("Contents") or [] 1207 | } 1208 | assert keys == (expected or set()) 1209 | 1210 | 1211 | @pytest.mark.parametrize( 1212 | "args,expected,expected_error", 1213 | ( 1214 | ([], None, "Error: Specify one or more keys or use --prefix"), 1215 | ( 1216 | ["one.txt", "--prefix", "directory/"], 1217 | None, 1218 | "Cannot pass both keys and --prefix", 1219 | ), 1220 | (["one.txt"], ["directory/two.txt", "directory/three.json"], None), 1221 | (["one.txt", "directory/two.txt"], ["directory/three.json"], None), 1222 | (["--prefix", "directory/"], ["one.txt"], None), 1223 | ), 1224 | ) 1225 | def test_delete_objects(moto_s3_populated, args, expected, expected_error): 1226 | runner = CliRunner(mix_stderr=False) 1227 | with runner.isolated_filesystem(): 1228 | result = runner.invoke( 1229 | cli, ["delete-objects", "my-bucket"] + args, catch_exceptions=False 1230 | ) 1231 | if expected_error: 1232 | assert result.exit_code != 0 1233 | assert expected_error in result.stderr 1234 | else: 1235 | assert result.exit_code == 0, result.output 1236 | # Check expected files are left in bucket 1237 | keys = { 1238 | obj["Key"] 1239 | for obj in moto_s3_populated.list_objects(Bucket="my-bucket").get( 1240 | "Contents" 1241 | ) 1242 | or [] 1243 | } 1244 | assert keys == set(expected) 1245 | 1246 | 1247 | @pytest.mark.parametrize("arg", ("-d", "--dry-run")) 1248 | def test_delete_objects_dry_run(moto_s3_populated, arg): 1249 | runner = CliRunner(mix_stderr=False) 1250 | 1251 | def get_keys(): 1252 | return { 1253 | obj["Key"] 1254 | for obj in moto_s3_populated.list_objects(Bucket="my-bucket").get( 1255 | "Contents" 1256 | ) 1257 | or [] 1258 | } 1259 | 1260 | with runner.isolated_filesystem(): 1261 | before_keys = get_keys() 1262 | result = runner.invoke( 1263 | cli, ["delete-objects", "my-bucket", "--prefix", "directory/", arg] 1264 | ) 1265 | assert result.exit_code == 0 1266 | assert result.output == ( 1267 | "The following keys would be deleted:\n" 1268 | "directory/three.json\n" 1269 | "directory/two.txt\n" 1270 | ) 1271 | after_keys = get_keys() 1272 | assert before_keys == after_keys 1273 | --------------------------------------------------------------------------------