├── aioboto3 ├── s3 │ └── __init__.py ├── dynamodb │ ├── __init__.py │ └── table.py ├── experimental │ ├── __init__.py │ └── async_chalice.py ├── resources │ ├── __init__.py │ ├── base.py │ ├── response.py │ ├── action.py │ ├── collection.py │ └── factory.py ├── __init__.py └── session.py ├── docs ├── readme.rst ├── changelog.rst ├── contributing.rst ├── .gitignore ├── index.rst ├── installation.rst ├── chalice.rst ├── cse.rst ├── Makefile ├── make.bat ├── conf.py └── usage.rst ├── Makefile ├── MANIFEST.in ├── shell.nix ├── .github ├── ISSUE_TEMPLATE.md ├── dependabot.yml └── workflows │ └── CI.yml ├── .editorconfig ├── tests ├── chalice_app │ └── __init__.py ├── mock_server.py ├── test_basic.py ├── test_experimental_chalice.py ├── conftest.py ├── test_patches.py ├── test_dynamo.py ├── test_s3_cse.py └── test_s3.py ├── .readthedocs.yml ├── resources ├── S3-CSE │ ├── settings.gradle │ ├── README.md │ ├── build.gradle │ └── src │ │ └── main │ │ └── java │ │ └── demo │ │ └── UploadObjectKMSKey.java └── make_pr.py ├── .gitignore ├── pyproject.toml ├── CONTRIBUTING.rst ├── README.rst ├── LICENSE └── CHANGELOG.rst /aioboto3/s3/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aioboto3/dynamodb/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aioboto3/experimental/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aioboto3/resources/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docs/readme.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../README.rst 2 | -------------------------------------------------------------------------------- /docs/changelog.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../CHANGELOG.rst 2 | -------------------------------------------------------------------------------- /docs/contributing.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../CONTRIBUTING.rst 2 | -------------------------------------------------------------------------------- /docs/.gitignore: -------------------------------------------------------------------------------- 1 | /aioboto3.rst 2 | /aioboto3.*.rst 3 | /modules.rst 4 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | 2 | lint: 3 | uv run python -m flake8 aioboto3 tests 4 | 5 | test: 6 | uv run pytest 7 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include CONTRIBUTING.rst 2 | include CHANGELOG.rst 3 | include LICENSE 4 | include README.rst 5 | 6 | recursive-include tests * 7 | recursive-exclude * __pycache__ 8 | recursive-exclude * *.py[co] 9 | 10 | recursive-include docs *.rst conf.py Makefile make.bat *.jpg *.png *.gif 11 | -------------------------------------------------------------------------------- /shell.nix: -------------------------------------------------------------------------------- 1 | { pkgs ? import {} }: 2 | pkgs.mkShell { 3 | # nativeBuildInputs is usually what you want -- tools you need to run 4 | nativeBuildInputs = with pkgs; [ 5 | gnumake 6 | git 7 | 8 | python312 9 | # poetry 10 | uv 11 | ]; 12 | hardeningDisable = [ "all" ]; 13 | } 14 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | * Async AWS SDK for Python version: 2 | * Python version: 3 | * Operating System: 4 | 5 | ### Description 6 | 7 | Describe what you were trying to get done. 8 | Tell us what happened, what went wrong, and what you expected to happen. 9 | 10 | ### What I Did 11 | 12 | ``` 13 | Paste the command(s) you ran and the output. 14 | If there was a crash, please include the traceback here. 15 | ``` 16 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | Welcome to Async AWS SDK for Python's documentation! 2 | ==================================================== 3 | 4 | Contents: 5 | 6 | .. toctree:: 7 | :maxdepth: 2 8 | 9 | readme 10 | installation 11 | usage 12 | cse 13 | chalice 14 | contributing 15 | changelog 16 | 17 | Indices and tables 18 | ================== 19 | 20 | * :ref:`genindex` 21 | * :ref:`modindex` 22 | * :ref:`search` 23 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | # http://editorconfig.org 2 | 3 | root = true 4 | 5 | [*] 6 | indent_style = space 7 | indent_size = 4 8 | trim_trailing_whitespace = true 9 | insert_final_newline = true 10 | charset = utf-8 11 | end_of_line = lf 12 | 13 | [.github/workflows/*.yml] 14 | indent_size = 2 15 | 16 | [*.bat] 17 | indent_style = tab 18 | end_of_line = crlf 19 | 20 | [LICENSE] 21 | insert_final_newline = false 22 | 23 | [Makefile] 24 | indent_style = tab 25 | -------------------------------------------------------------------------------- /tests/chalice_app/__init__.py: -------------------------------------------------------------------------------- 1 | from aioboto3.experimental.async_chalice import AsyncChalice 2 | 3 | app = AsyncChalice(app_name='testclient') 4 | 5 | 6 | @app.route('/hello/{name}') 7 | async def hello(name): 8 | return {'hello': name} 9 | 10 | 11 | @app.route('/list_buckets') 12 | async def get_list_buckets(): 13 | async with app.aioboto3.client("s3") as s3: 14 | resp = await s3.list_buckets() 15 | 16 | return {"buckets": [bucket['Name'] for bucket in resp['Buckets']]} 17 | -------------------------------------------------------------------------------- /.readthedocs.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | 3 | build: 4 | os: "ubuntu-24.04" 5 | tools: 6 | python: "3.13" 7 | jobs: 8 | pre_create_environment: 9 | - asdf plugin add uv 10 | - asdf install uv latest 11 | - asdf global uv latest 12 | create_environment: 13 | - uv venv "${READTHEDOCS_VIRTUALENV_PATH}" 14 | install: 15 | - UV_PROJECT_ENVIRONMENT="${READTHEDOCS_VIRTUALENV_PATH}" uv sync --frozen --group docs 16 | 17 | sphinx: 18 | configuration: docs/conf.py 19 | -------------------------------------------------------------------------------- /aioboto3/resources/base.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import warnings 3 | 4 | from boto3.resources.base import ServiceResource 5 | 6 | logger = logging.getLogger(__name__) 7 | 8 | 9 | class AIOBoto3ServiceResource(ServiceResource): 10 | async def __aenter__(self): 11 | return self 12 | 13 | async def __aexit__(self, exc_type, exc_val, exc_tb): 14 | await self.meta.client.__aexit__(exc_type, exc_val, exc_tb) 15 | 16 | def close(self): 17 | warnings.warn("This should not be called anymore", DeprecationWarning) 18 | return self.meta.client.close() 19 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # Keep GitHub Actions up to date with GitHub's Dependabot... 2 | # https://docs.github.com/en/code-security/dependabot/working-with-dependabot/keeping-your-actions-up-to-date-with-dependabot 3 | # https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file#package-ecosystem 4 | version: 2 5 | updates: 6 | - package-ecosystem: github-actions 7 | directory: / 8 | groups: 9 | github-actions: 10 | patterns: 11 | - "*" # Group all Actions updates into a single larger pull request 12 | schedule: 13 | interval: weekly 14 | -------------------------------------------------------------------------------- /resources/S3-CSE/settings.gradle: -------------------------------------------------------------------------------- 1 | /* 2 | * This settings file was generated by the Gradle 'init' task. 3 | * 4 | * The settings file is used to specify which projects to include in your build. 5 | * In a single project build this file can be empty or even removed. 6 | * 7 | * Detailed information about configuring a multi-project build in Gradle can be found 8 | * in the user guide at https://docs.gradle.org/4.3/userguide/multi_project_builds.html 9 | */ 10 | 11 | /* 12 | // To declare projects as part of a multi-project build use the 'include' method 13 | include 'shared' 14 | include 'api' 15 | include 'services:webservice' 16 | */ 17 | 18 | rootProject.name = 's3cse' 19 | -------------------------------------------------------------------------------- /aioboto3/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | """Top-level package for Async AWS SDK for Python.""" 4 | import logging 5 | from aioboto3.session import Session 6 | 7 | __author__ = """Terri Cain""" 8 | __email__ = 'terri@dolphincorp.co.uk' 9 | 10 | 11 | try: 12 | from aioboto3._version import __version__ 13 | except PackageNotFoundError: 14 | __version__ = "0.0.0" 15 | 16 | 17 | # Set up logging to ``/dev/null`` like a library is supposed to. 18 | # http://docs.python.org/3.3/howto/logging.html#configuring-logging-for-a-library 19 | class NullHandler(logging.Handler): 20 | def emit(self, record): 21 | pass 22 | 23 | 24 | logging.getLogger('boto3').addHandler(NullHandler()) 25 | -------------------------------------------------------------------------------- /tests/mock_server.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from moto.server import ThreadedMotoServer 4 | 5 | _proxy_bypass = { 6 | "http": None, 7 | "https": None, 8 | } 9 | 10 | 11 | # TODO this in theory can run for all tests not 1 per service 12 | def start_service(host, port) -> ThreadedMotoServer: 13 | server = ThreadedMotoServer(ip_address=host, port=port, verbose=False) 14 | server.start() 15 | return server 16 | 17 | 18 | def stop_process(server: ThreadedMotoServer): 19 | server.stop() 20 | 21 | 22 | @pytest.fixture(scope="session") 23 | def moto_server(): 24 | host = "localhost" 25 | port = 5001 26 | url = "http://{host}:{port}".format(host=host, port=port) 27 | process = start_service(host, port) 28 | yield url 29 | stop_process(process) 30 | -------------------------------------------------------------------------------- /tests/test_basic.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | """Tests for `aioboto3` package.""" 5 | 6 | import pytest 7 | from aiobotocore.client import AioBaseClient 8 | 9 | import aioboto3 10 | 11 | 12 | @pytest.mark.asyncio 13 | async def test_getting_client(): 14 | """Simple getting of client.""" 15 | session = aioboto3.Session() 16 | 17 | async with session.client('ssm', region_name='eu-central-1') as client: 18 | assert isinstance(client, AioBaseClient) 19 | 20 | 21 | @pytest.mark.asyncio 22 | async def test_getting_resource_cm(): 23 | """Simple getting of resource.""" 24 | session = aioboto3.Session() 25 | 26 | async with session.resource('dynamodb', region_name='eu-central-1') as resource: 27 | assert isinstance(resource.meta.client, AioBaseClient) 28 | -------------------------------------------------------------------------------- /resources/S3-CSE/README.md: -------------------------------------------------------------------------------- 1 | # JAVA S3 Client-Side Encryption Example 2 | 3 | Its horrible java 4 | 5 | You'll also need bouncycastle set up, then you can run `gradle fatJar` 6 | 7 | ## Options 8 | 9 | This will output the options of the jar 10 | 11 | ``` 12 | java -jar build/libs/s3cse-1.0.jar -h 13 | ``` 14 | 15 | ## KMS Encryption 16 | 17 | ``` 18 | java -jar build/libs/s3cse-1.0.jar --crypto-type kms --bucket-name bucket1 --key-name test-cse-kms \ 19 | --region eu-west-1 --kms-key-id alias/someKey --authenticated-crypto 20 | ``` 21 | 22 | 23 | ## Symmetric Encryption 24 | 25 | ``` 26 | java -jar build/libs/s3cse-1.0.jar --crypto-type symmetric --bucket-name bucket1 --key-name test-cse-symmetric \ 27 | --region eu-west-1 --key-dir ./keys 28 | ``` 29 | 30 | ## Asymmetric Encryption 31 | 32 | ``` 33 | java -jar build/libs/s3cse-1.0.jar --crypto-type asymmetric --bucket-name bucket1 --key-name test-cse-asymmetric \ 34 | --region eu-west-1 --key-dir ./keys 35 | ``` 36 | -------------------------------------------------------------------------------- /tests/test_experimental_chalice.py: -------------------------------------------------------------------------------- 1 | import aioboto3 2 | import boto3 3 | import pytest 4 | from chalice_app import app 5 | 6 | from chalice.test import Client 7 | 8 | 9 | def test_chalice_async_http(moto_patch, region, bucket_name): 10 | session = aioboto3.Session() 11 | 12 | app.aioboto3 = session 13 | 14 | with Client(app) as client: 15 | response = client.http.get('/hello/myname') 16 | assert response.status_code == 200 17 | assert response.json_body['hello'] == 'myname' 18 | 19 | 20 | def test_chalice_async_http_s3_client(moto_patch, region, bucket_name): 21 | session = aioboto3.Session() 22 | 23 | app.aioboto3 = session 24 | 25 | s3 = boto3.client('s3', region_name=region) 26 | s3.create_bucket(Bucket=bucket_name, CreateBucketConfiguration={'LocationConstraint': region}) 27 | resp = s3.list_buckets() 28 | bucket_response = [bucket['Name'] for bucket in resp['Buckets']] 29 | 30 | with Client(app) as client: 31 | response = client.http.get('/list_buckets') 32 | assert response.status_code == 200 33 | assert response.json_body['buckets'] == bucket_response 34 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | resources/S3-CSE/.gradle/ 2 | resources/S3-CSE/keys 3 | resources/S3-CSE/.idea 4 | resources/S3-CSE/out 5 | 6 | aioboto3/_version.py 7 | 8 | # Byte-compiled / optimized / DLL files 9 | __pycache__/ 10 | *.py[cod] 11 | *$py.class 12 | 13 | # C extensions 14 | *.so 15 | 16 | # Distribution / packaging 17 | .Python 18 | env/ 19 | build/ 20 | develop-eggs/ 21 | dist/ 22 | downloads/ 23 | eggs/ 24 | .eggs/ 25 | lib/ 26 | lib64/ 27 | parts/ 28 | sdist/ 29 | var/ 30 | *.egg-info/ 31 | .installed.cfg 32 | *.egg 33 | 34 | # PyInstaller 35 | # Usually these files are written by a python script from a template 36 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 37 | *.manifest 38 | *.spec 39 | 40 | # Installer logs 41 | pip-log.txt 42 | pip-delete-this-directory.txt 43 | 44 | # Unit test / coverage reports 45 | htmlcov/ 46 | .tox/ 47 | .coverage 48 | .coverage.* 49 | .cache 50 | nosetests.xml 51 | coverage.xml 52 | *,cover 53 | .hypothesis/ 54 | 55 | # Translations 56 | *.mo 57 | *.pot 58 | 59 | # Django stuff: 60 | *.log 61 | 62 | # Sphinx documentation 63 | docs/_build/ 64 | 65 | # PyBuilder 66 | target/ 67 | 68 | # pyenv python configuration file 69 | .python-version 70 | .pytest_cache/ 71 | 72 | # idea 73 | .idea/ 74 | -------------------------------------------------------------------------------- /docs/installation.rst: -------------------------------------------------------------------------------- 1 | .. highlight:: shell 2 | 3 | ============ 4 | Installation 5 | ============ 6 | 7 | 8 | Stable release 9 | -------------- 10 | 11 | To install Async AWS SDK for Python, run this command in your terminal: 12 | 13 | .. code-block:: console 14 | 15 | $ pip install aioboto3 16 | 17 | This is the preferred method to install Async AWS SDK for Python, as it will always install the most recent stable release. 18 | 19 | If you don't have `pip`_ installed, this `Python installation guide`_ can guide 20 | you through the process. 21 | 22 | .. _pip: https://pip.pypa.io 23 | .. _Python installation guide: http://docs.python-guide.org/en/latest/starting/installation/ 24 | 25 | 26 | From sources 27 | ------------ 28 | 29 | The sources for Async AWS SDK for Python can be downloaded from the `Github repo`_. 30 | 31 | You can either clone the public repository: 32 | 33 | .. code-block:: console 34 | 35 | $ git clone git://github.com/terrycain/aioboto3 36 | 37 | Or download the `tarball`_: 38 | 39 | .. code-block:: console 40 | 41 | $ curl -OL https://github.com/terrycain/aioboto3/tarball/master 42 | 43 | Once you have a copy of the source, you can install it with: 44 | 45 | .. code-block:: console 46 | 47 | $ python setup.py install 48 | 49 | 50 | .. _Github repo: https://github.com/terrycain/aioboto3 51 | .. _tarball: https://github.com/terrycain/aioboto3/tarball/master 52 | -------------------------------------------------------------------------------- /docs/chalice.rst: -------------------------------------------------------------------------------- 1 | ====================================== 2 | AWS Chalice Integration (EXPERIMENTAL) 3 | ====================================== 4 | 5 | How it works 6 | ------------ 7 | 8 | Using ``aioboto3.experimental.async_chalice.AsyncChalice`` as the main app entrypoint for a chalice app adds some shims in so 9 | that you can use ``async def`` functions with HTTP routes normally. Additionally a ``app.aioboto3`` contains an aioboto3 Session 10 | object which can be used to get s3 clients etc... Passing in a session to ``AsyncChalice`` overrides the default empty session. 11 | 12 | Chalice has some interesting quirks to how it works, most notably the eventloop can disappear between invocations so storing references 13 | to anything which could store the current event loop is not recommended. Because of this, caching aioboto3 clients and resources is not 14 | a good idea and realistically because this code is designed to be ran in a lambda, said caching buys you little. 15 | 16 | The Chalice integration is very experimental, until someone runs it for a while and has faith in it, I would not recommend using this for 17 | anything critical. 18 | 19 | Example 20 | ------- 21 | 22 | .. code-block:: python 23 | 24 | from aioboto3.experimental.async_chalice import AsyncChalice 25 | 26 | app = AsyncChalice(app_name='testclient') 27 | 28 | 29 | @app.route('/hello/{name}') 30 | async def hello(name): 31 | return {'hello': name} 32 | 33 | 34 | @app.route('/list_buckets') 35 | async def get_list_buckets(): 36 | async with app.aioboto3.client("s3") as s3: 37 | resp = await s3.list_buckets() 38 | 39 | return {"buckets": [bucket['Name'] for bucket in resp['Buckets']]} 40 | -------------------------------------------------------------------------------- /aioboto3/experimental/async_chalice.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | from typing import Optional 3 | 4 | from chalice import Chalice 5 | from chalice.app import RestAPIEventHandler 6 | 7 | from aioboto3 import Session 8 | 9 | 10 | class AsyncRestAPIEventHandler(RestAPIEventHandler): 11 | def _get_view_function_response(self, view_function, function_args): 12 | # Wrap the view_function so that we can return either the normal response 13 | # or if its a co-routine, run it in an event loop first. 14 | # Saves duplicating the whole function. 15 | def _fake_view_function(**kwargs): 16 | response = view_function(**kwargs) 17 | if asyncio.iscoroutine(response): 18 | # Always run in a new loop as chalice would close an existing one anyway 19 | new_loop = asyncio.new_event_loop() 20 | response = new_loop.run_until_complete(response) 21 | new_loop.close() 22 | 23 | return response 24 | return super(AsyncRestAPIEventHandler, self)._get_view_function_response(_fake_view_function, function_args) 25 | 26 | 27 | class AsyncChalice(Chalice): 28 | def __init__(self, *args, aioboto3_session: Optional[Session] = None, **kwargs): 29 | super(AsyncChalice, self).__init__(*args, **kwargs) 30 | 31 | self.aioboto3 = aioboto3_session or Session() 32 | 33 | def __call__(self, event, context): 34 | self.lambda_context = context 35 | handler = AsyncRestAPIEventHandler( 36 | self.routes, self.api, self.log, self.debug, 37 | middleware_handlers=self._get_middleware_handlers('http') 38 | ) 39 | self.current_request = handler.create_request_object(event, context) 40 | return handler(event, context) 41 | 42 | -------------------------------------------------------------------------------- /resources/S3-CSE/build.gradle: -------------------------------------------------------------------------------- 1 | /* 2 | * This build file was generated by the Gradle 'init' task. 3 | * 4 | * This generated file contains a sample Java Library project to get you started. 5 | * For more details take a look at the Java Libraries chapter in the Gradle 6 | * user guide available at https://docs.gradle.org/4.3/userguide/java_library_plugin.html 7 | */ 8 | 9 | version = '1.0' 10 | 11 | // Apply the java-library plugin to add support for Java Library 12 | apply plugin: 'java-library' 13 | 14 | // In this section you declare where to find the dependencies of your project 15 | repositories { 16 | // Use jcenter for resolving your dependencies. 17 | // You can declare any Maven/Ivy/file repository here. 18 | jcenter() 19 | } 20 | 21 | dependencies { 22 | // This dependency is exported to consumers, that is to say found on their compile classpath. 23 | api 'org.apache.commons:commons-math3:3.6.1' 24 | 25 | // This dependency is used internally, and not exposed to consumers on their own compile classpath. 26 | implementation 'com.google.guava:guava:23.0' 27 | 28 | compile group: 'commons-cli', name: 'commons-cli', version: '1.4' 29 | compile 'com.amazonaws:aws-java-sdk-bom:1.11.503' 30 | compile 'com.amazonaws:aws-java-sdk-s3:1.11.503' 31 | } 32 | 33 | sourceSets { 34 | main.java.srcDir "src/main/java" 35 | } 36 | 37 | task run(type:JavaExec) { 38 | main = 'demo.UploadObjectKMSKey' 39 | classpath = sourceSets.main.runtimeClasspath 40 | 41 | } 42 | 43 | task fatJar(type: Jar) { 44 | manifest { 45 | attributes 'Implementation-Title': 'S3 CSE Example', 'Implementation-Version': version, 'Main-Class': 'demo.UploadObjectKMSKey' 46 | } 47 | baseName = project.name 48 | from { configurations.compile.collect { it.isDirectory() ? it : zipTree(it) } } 49 | with jar 50 | } -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "aioboto3" 3 | description = "Async boto3 wrapper" 4 | authors = [ 5 | {name = "Terri Cain", email = "terri@dolphincorp.co.uk"}, 6 | ] 7 | license = {text = "Apache-2.0"} 8 | requires-python = ">=3.9" 9 | dependencies = [ 10 | "aiobotocore[boto3]==2.25.1", 11 | "aiofiles>=23.2.1", 12 | ] 13 | 14 | readme = "README.rst" 15 | keywords = [ 16 | "aioboto3", 17 | "boto3", 18 | "aws", 19 | ] 20 | classifiers = [ 21 | "Development Status :: 5 - Production/Stable", 22 | "Intended Audience :: Developers", 23 | "License :: OSI Approved :: Apache Software License", 24 | "Natural Language :: English", 25 | "Programming Language :: Python :: 3", 26 | "Programming Language :: Python :: 3.9", 27 | "Programming Language :: Python :: 3.10", 28 | "Programming Language :: Python :: 3.11", 29 | "Programming Language :: Python :: 3.12", 30 | "Programming Language :: Python :: 3.13", 31 | "Programming Language :: Python :: 3.14", 32 | ] 33 | dynamic = ["version"] 34 | 35 | [project.urls] 36 | homepage = "https://github.com/terricain/aioboto3" 37 | repository = "https://github.com/terricain/aioboto3" 38 | documentation = "https://readthedocs.org/projects/aioboto3/" 39 | 40 | [project.optional-dependencies] 41 | s3cse = [ 42 | "cryptography>=44.0.1", 43 | ] 44 | chalice = [ 45 | "chalice>=1.24.0", 46 | ] 47 | 48 | [dependency-groups] 49 | dev = [ 50 | "pytest", 51 | "pytest-cov", 52 | "flake8", 53 | "dill", 54 | "pygithub", 55 | "requests", 56 | "aiofiles", 57 | "moto[server]", 58 | "chalice<2.0.0,>=1.24.0", 59 | "pytest-asyncio<1.0.0,>=0.19.0", 60 | ] 61 | docs = [ 62 | "sphinx", 63 | "sphinx-rtd-theme<3.0.2,>=3.0.0", 64 | "sphinx-autodoc-typehints", 65 | ] 66 | 67 | [tool.uv] 68 | default-groups = [ 69 | "docs", 70 | "dev", 71 | ] 72 | required-version = ">=0.6.1" 73 | cache-keys = [{ file = "pyproject.toml" }, { git = { commit = true, tags = true } }] 74 | 75 | [build-system] 76 | requires = ["setuptools>=68.2.0", "setuptools-scm>=8"] 77 | build-backend = "setuptools.build_meta" 78 | 79 | [tool.setuptools.dynamic] 80 | readme = { file = ["README.rst", "CHANGELOG.rst"] } 81 | 82 | [tool.setuptools.packages.find] 83 | include = ["aioboto3*"] 84 | 85 | [tool.setuptools_scm] 86 | version_file = "aioboto3/_version.py" 87 | 88 | [tool.pytest.ini_options] 89 | testpaths = ["tests"] 90 | asyncio_default_fixture_loop_scope = "session" 91 | -------------------------------------------------------------------------------- /CONTRIBUTING.rst: -------------------------------------------------------------------------------- 1 | .. highlight:: shell 2 | 3 | ============ 4 | Contributing 5 | ============ 6 | 7 | Contributions are welcome, and they are greatly appreciated! Every 8 | little bit helps, and credit will always be given. 9 | 10 | You can contribute in many ways: 11 | 12 | Types of Contributions 13 | ---------------------- 14 | 15 | Report Bugs 16 | ~~~~~~~~~~~ 17 | 18 | Report bugs at https://github.com/terrycain/aioboto3/issues. 19 | 20 | If you are reporting a bug, please include: 21 | 22 | * Your operating system name and version. 23 | * Any details about your local setup that might be helpful in troubleshooting. 24 | * Detailed steps to reproduce the bug. 25 | 26 | Fix Bugs 27 | ~~~~~~~~ 28 | 29 | Look through the GitHub issues for bugs. Anything tagged with "bug" 30 | and "help wanted" is open to whoever wants to implement it. 31 | 32 | Implement Features 33 | ~~~~~~~~~~~~~~~~~~ 34 | 35 | Look through the GitHub issues for features. Anything tagged with "enhancement" 36 | and "help wanted" is open to whoever wants to implement it. 37 | 38 | Write Documentation 39 | ~~~~~~~~~~~~~~~~~~~ 40 | 41 | Async AWS SDK for Python could always use more documentation, whether as part of the 42 | official Async AWS SDK for Python docs, in docstrings, or even on the web in blog posts, 43 | articles, and such. 44 | 45 | Submit Feedback 46 | ~~~~~~~~~~~~~~~ 47 | 48 | The best way to send feedback is to file an issue at https://github.com/terrycain/aioboto3/issues. 49 | 50 | If you are proposing a feature: 51 | 52 | * Explain in detail how it would work. 53 | * Keep the scope as narrow as possible, to make it easier to implement. 54 | * Remember that this is a volunteer-driven project, and that contributions 55 | are welcome :) 56 | 57 | Get Started! 58 | ------------ 59 | 60 | Ready to contribute? Here's how to set up `aioboto3` for local development. 61 | 62 | 1. Fork the `aioboto3` repo on GitHub. 63 | 2. Clone your fork locally:: 64 | 65 | git clone git@github.com:your_name_here/aioboto3.git 66 | 67 | 3. Install your local copy into a virtualenv:: 68 | 69 | cd aioboto3/ 70 | uv sync 71 | 72 | 4. Create a branch for local development:: 73 | 74 | git checkout -b name-of-your-bugfix-or-feature 75 | 76 | Now you can make your changes locally. 77 | 78 | 5. When you're done making changes, check that your changes pass flake8 and the tests, including testing other Python versions with tox:: 79 | 80 | make lint 81 | make test 82 | 83 | 6. Commit your changes and push your branch to GitHub:: 84 | 85 | git add . 86 | git commit -m "Your detailed description of your changes." 87 | git push origin name-of-your-bugfix-or-feature 88 | 89 | 7. Submit a pull request through the GitHub website. 90 | 91 | Pull Request Guidelines 92 | ----------------------- 93 | 94 | Before you submit a pull request, check that it meets these guidelines: 95 | 96 | 1. The pull request should include tests. 97 | 2. If the pull request adds functionality, the docs should be updated. Put 98 | your new functionality into a function with a docstring, and add the 99 | feature to the list in README.rst. 100 | 3. The pull request should work for Python 3.7 through 3.11. 101 | 4. Check https://github.com/terrycain/aioboto3/actions/workflows/CI.yml 102 | and make sure that the tests pass. 103 | 104 | Tips 105 | ---- 106 | 107 | To run a subset of the tests you can specify a specific test module or function, e.g.:: 108 | 109 | uv run pytest tests/test_basic.py 110 | uv run pytest tests/test_basic.py::test_getting_client 111 | -------------------------------------------------------------------------------- /.github/workflows/CI.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | push: 5 | branches: 6 | - '*' 7 | tags: 8 | - '*' 9 | pull_request: 10 | branches: 11 | - 'main' 12 | 13 | jobs: 14 | build: 15 | name: Build distribution 📦 16 | runs-on: ubuntu-24.04 17 | timeout-minutes: 5 18 | steps: 19 | - uses: actions/checkout@v5 20 | - name: Install uv 21 | uses: astral-sh/setup-uv@v7 22 | - name: Build distribution 📦 23 | run: uv build 24 | - name: Check distribution 📦 25 | run: uvx twine check --strict dist/* 26 | - name: Upload distribution 📦 27 | uses: actions/upload-artifact@v5 28 | with: 29 | name: python-package-distributions 30 | path: dist/ 31 | 32 | test: 33 | name: Test Python ${{ matrix.python-version }} on ${{ matrix.os }} 34 | runs-on: ${{ matrix.os }} 35 | continue-on-error: ${{ matrix.experimental }} 36 | strategy: 37 | matrix: 38 | python-version: 39 | - "3.9" 40 | - "3.10" 41 | - "3.11" 42 | - "3.12" 43 | - "3.13" 44 | # - "3.14" # Disabled until moto[server] updates its dependencies so that pydantic rust parts stop erroring. 45 | # pydantic-core` (v2.33.2) <- `pydantic` (v2.11.7) <- `aws-sam-translator` (v1.99.0) <- `cfn-lint` (v1.38.3) <- `moto[server]` (v5.1.10) 46 | os: 47 | - ubuntu-24.04 48 | - ubuntu-24.04-arm 49 | include: 50 | - experimental: false 51 | - experimental: true 52 | os: ubuntu-24.04-arm # deal with flaky runners 53 | - upload-coverage: false 54 | - upload-coverage: true 55 | python-version: 3.11 56 | os: ubuntu-24.04 57 | fail-fast: false 58 | env: 59 | UV_FROZEN: 1 60 | timeout-minutes: 10 61 | steps: 62 | - name: Checkout 63 | uses: actions/checkout@v5 64 | with: 65 | submodules: true 66 | - name: Install uv 67 | uses: astral-sh/setup-uv@v7 68 | with: 69 | python-version: ${{ matrix.python-version }} 70 | - name: Run unittests 71 | env: 72 | COLOR: 'yes' 73 | run: | 74 | uv run pytest 75 | 76 | check: # This job does nothing and is only used for the branch protection 77 | if: always() 78 | needs: 79 | - build 80 | - test 81 | runs-on: ubuntu-24.04 82 | timeout-minutes: 5 83 | 84 | steps: 85 | - name: Decide whether the needed jobs succeeded or failed 86 | uses: re-actors/alls-green@release/v1 87 | with: 88 | jobs: ${{ toJSON(needs) }} 89 | 90 | publish: 91 | name: Publish Python 🐍 distribution 📦 to PyPI 92 | if: github.ref_type == 'tag' 93 | needs: 94 | - check 95 | runs-on: ubuntu-24.04 96 | environment: 97 | name: pypi 98 | url: https://pypi.org/project/aioboto3/${{ github.ref_name }} 99 | permissions: 100 | id-token: write 101 | timeout-minutes: 5 102 | steps: 103 | - name: Download distribution 📦 104 | uses: actions/download-artifact@v6 105 | with: 106 | name: python-package-distributions 107 | path: dist/ 108 | - name: Check if distribution 📦 names match git tag 109 | run: | 110 | VERSION=${GITHUB_REF_NAME#v} # This removes the 'v' from the tag 111 | test -f "dist/aioboto3-${VERSION}.tar.gz" 112 | test -f "dist/aioboto3-${VERSION}-py3-none-any.whl" 113 | - name: Publish distribution 📦 to PyPI 114 | uses: pypa/gh-action-pypi-publish@release/v1 115 | -------------------------------------------------------------------------------- /resources/make_pr.py: -------------------------------------------------------------------------------- 1 | import importlib.machinery 2 | import importlib.util 3 | import setuptools 4 | import pkg_resources 5 | import requests 6 | import sys 7 | import os 8 | from github import Github 9 | 10 | QUIT_EARLY_EXIT_CODE = 38 11 | 12 | 13 | def extract_values_from_setuptools(): 14 | # Wont work if setup.py doesnt use setuptools 15 | loader = importlib.machinery.SourceFileLoader('tmp', 'setup.py') 16 | spec = importlib.util.spec_from_loader(loader.name, loader) 17 | mod = importlib.util.module_from_spec(spec) 18 | 19 | setup_results = {} 20 | 21 | def fakesetup(**kwargs): 22 | setup_results.update(kwargs) 23 | 24 | setuptools.setup = fakesetup 25 | loader.exec_module(mod) 26 | return setup_results 27 | 28 | 29 | # Get current required version for aiobotocore 30 | print('Getting aiobotocore dependency version') 31 | setup_kwargs = extract_values_from_setuptools() 32 | install_requires = [dep for dep in pkg_resources.parse_requirements(setup_kwargs['install_requires']) if dep.name == 'aiobotocore'] 33 | aiobotocore_dep = install_requires[0] 34 | print('Found: {0}'.format(aiobotocore_dep)) 35 | 36 | # Get latest aiobotocore verison 37 | print('Getting aiobotocore current version') 38 | resp = requests.get('https://pypi.org/pypi/aiobotocore/json').json() 39 | current_aiobotocore_version = resp['info']['version'] 40 | #current_aiobotocore_version = '2.0.0' 41 | print('Current aiobotocore version: {0}'.format(current_aiobotocore_version)) 42 | 43 | if current_aiobotocore_version in aiobotocore_dep: 44 | print('We\'re good, skip') 45 | print('::set-output name=do_pr::false') 46 | sys.exit(0) 47 | 48 | # By this point we're going to open a pr 49 | # Check that PR isnt already open for this 50 | prefix = '[prbot][depupdate] Aiobotocore' 51 | new_title = prefix + current_aiobotocore_version 52 | 53 | # go through prs, also make a list of existing prs that are resolved by this 54 | g = Github(os.environ['GITHUB_TOKEN']) 55 | repo = g.get_repo('terrycain/aioboto3') 56 | pulls = repo.get_pulls(state='open') 57 | found_pr = False 58 | fixes = [] 59 | for pr in pulls: 60 | if pr.title == new_title: 61 | print('Found existing PR, quitting') 62 | found_pr = True 63 | elif pr.title.startswith(prefix): 64 | fixes.append(pr.number) 65 | 66 | if found_pr: 67 | print('::set-output name=do_pr::false') 68 | sys.exit(0) 69 | 70 | print('::set-output name=pr_title::{0}'.format(new_title)) 71 | body = """Aiobotocore depenency update. Version {0}""" 72 | if fixes: 73 | body += '\n\n' 74 | for number in fixes: 75 | body += 'Resolves #{0}\n'.format(number) 76 | body = body.format(current_aiobotocore_version).replace('%', '%25').replace('\n', '%0A').replace('\r', '%0D') 77 | print('::set-output name=pr_body::{0}'.format(body)) 78 | 79 | # update setup.py 80 | print('Updating setup.py') 81 | search = str(aiobotocore_dep) 82 | replace = search.replace(str(aiobotocore_dep.specifier), '') + '==' + current_aiobotocore_version # Does aiobotocore[boto3] + == + version 83 | with open('setup.py', 'r') as fp: 84 | new_setup_py = fp.read().replace(search, replace) 85 | with open('setup.py', 'w') as fp: 86 | fp.write(new_setup_py) 87 | 88 | # update pipfile 89 | print('Updating Pipfile') 90 | search = str(aiobotocore_dep.specifier) 91 | replace = '==' + current_aiobotocore_version 92 | with open('Pipfile', 'r') as fp: 93 | new_pipfile = '' 94 | for line in fp: 95 | if line.startswith('aiobotocore'): 96 | line = line.replace(search, replace) 97 | new_pipfile += line 98 | with open('Pipfile', 'w') as fp: 99 | fp.write(new_pipfile) 100 | 101 | print('::set-output name=do_pr::true') 102 | -------------------------------------------------------------------------------- /aioboto3/resources/response.py: -------------------------------------------------------------------------------- 1 | from boto3.resources.response import RawHandler, ResourceHandler, build_identifiers, build_empty_response, all_not_none, jmespath 2 | 3 | 4 | class AIOResourceHandler(ResourceHandler): 5 | async def __call__(self, parent, params, response): 6 | """ 7 | :type parent: ServiceResource 8 | :param parent: The resource instance to which this action is attached. 9 | :type params: dict 10 | :param params: Request parameters sent to the service. 11 | :type response: dict 12 | :param response: Low-level operation response. 13 | """ 14 | resource_name = self.resource_model.type 15 | json_definition = self.service_context.resource_json_definitions.get( 16 | resource_name 17 | ) 18 | 19 | # Load the new resource class that will result from this action. 20 | resource_cls = await self.factory.load_from_definition( 21 | resource_name=resource_name, 22 | single_resource_json_definition=json_definition, 23 | service_context=self.service_context 24 | ) 25 | raw_response = response 26 | search_response = None 27 | 28 | # Anytime a path is defined, it means the response contains the 29 | # resource's attributes, so resource_data gets set here. It 30 | # eventually ends up in resource.meta.data, which is where 31 | # the attribute properties look for data. 32 | if self.search_path: 33 | search_response = jmespath.search(self.search_path, raw_response) 34 | 35 | # First, we parse all the identifiers, then create the individual 36 | # response resources using them. Any identifiers that are lists 37 | # will have one item consumed from the front of the list for each 38 | # resource that is instantiated. Items which are not a list will 39 | # be set as the same value on each new resource instance. 40 | identifiers = dict( 41 | build_identifiers( 42 | self.resource_model.identifiers, parent, params, raw_response 43 | ) 44 | ) 45 | 46 | # If any of the identifiers is a list, then the response is plural 47 | plural = [v for v in identifiers.values() if isinstance(v, list)] 48 | 49 | if plural: 50 | response = [] 51 | 52 | # The number of items in an identifier that is a list will 53 | # determine how many resource instances to create. 54 | for i in range(len(plural[0])): 55 | # Response item data is *only* available if a search path 56 | # was given. This prevents accidentally loading unrelated 57 | # data that may be in the response. 58 | response_item = None 59 | if search_response: 60 | response_item = search_response[i] 61 | response.append( 62 | self.handle_response_item( 63 | resource_cls, parent, identifiers, response_item 64 | ) 65 | ) 66 | elif all_not_none(identifiers.values()): 67 | # All identifiers must always exist, otherwise the resource 68 | # cannot be instantiated. 69 | response = self.handle_response_item( 70 | resource_cls, parent, identifiers, search_response 71 | ) 72 | else: 73 | # The response should be empty, but that may mean an 74 | # empty dict, list, or None based on whether we make 75 | # a remote service call and what shape it is expected 76 | # to return. 77 | response = None 78 | if self.operation_name is not None: 79 | # A remote service call was made, so try and determine 80 | # its shape. 81 | response = build_empty_response( 82 | self.search_path, 83 | self.operation_name, 84 | self.service_context.service_model 85 | ) 86 | 87 | return response 88 | 89 | 90 | class AIORawHandler(RawHandler): 91 | async def __call__(self, parent, params, response): 92 | return super(AIORawHandler, self).__call__(parent, params, response) 93 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | import pytest 3 | import random 4 | import string 5 | import uuid 6 | from unittest import mock 7 | from typing import Dict, Type, TypeVar 8 | 9 | import pytest_asyncio 10 | 11 | from aiobotocore.config import AioConfig 12 | from aioboto3.session import Session 13 | 14 | 15 | @pytest.fixture(scope="session", params=[True, False], 16 | ids=['debug[true]', 'debug[false]']) 17 | def debug(request): 18 | return request.param 19 | 20 | 21 | def moto_config() -> Dict[str, str]: 22 | return { 23 | 'aws_secret_access_key': 'xxx', 24 | 'aws_access_key_id': 'xxx' 25 | } 26 | 27 | 28 | @pytest.fixture 29 | def region() -> str: 30 | return 'eu-central-1' 31 | 32 | 33 | @pytest.fixture 34 | def signature_version() -> str: 35 | return 'v4' 36 | 37 | 38 | @pytest.fixture 39 | def config(signature_version: str) -> AioConfig: 40 | return AioConfig( 41 | signature_version=signature_version, 42 | read_timeout=5, 43 | connect_timeout=5 44 | ) 45 | 46 | 47 | @pytest.fixture 48 | def random_table_name() -> str: 49 | return 'test_' + ''.join([random.choice(string.hexdigits) for _ in range(0, 8)]) 50 | 51 | 52 | @pytest.fixture 53 | def bucket_name() -> str: 54 | return 'test-bucket-' + str(uuid.uuid4()) 55 | 56 | 57 | @pytest.fixture 58 | def kms_key_alias() -> str: 59 | return 'alias/test-' + uuid.uuid4().hex 60 | 61 | 62 | @pytest.fixture 63 | def s3_key_name() -> str: 64 | return uuid.uuid4().hex 65 | 66 | 67 | @pytest_asyncio.fixture 68 | async def dynamodb_resource(request, region: str, config: AioConfig, moto_server: str) -> "ServiceResource": 69 | session = Session(region_name=region, **moto_config()) 70 | 71 | async with session.resource('dynamodb', region_name=region, endpoint_url=moto_server, config=config) as resource: 72 | yield resource 73 | 74 | 75 | @pytest_asyncio.fixture 76 | async def s3_client(request, region: str, config: AioConfig, moto_server: str, bucket_name: str) -> "S3": 77 | session = Session(region_name=region, **moto_config()) 78 | 79 | async with session.client('s3', region_name=region, endpoint_url=moto_server, config=config) as client: 80 | yield client 81 | 82 | 83 | @pytest_asyncio.fixture 84 | async def s3_resource(request, region: str, config: AioConfig, moto_server: str, bucket_name: str) -> "ServiceResource": 85 | session = Session(region_name=region, **moto_config()) 86 | 87 | async with session.resource('s3', region_name=region, endpoint_url=moto_server, config=config) as resource: 88 | yield resource 89 | 90 | 91 | T = TypeVar('T') 92 | 93 | 94 | def create_fake_session(base_class: Type[T], url_overrides: Dict[str, str]) -> Type[T]: 95 | class FakeSession(base_class): 96 | def __init__(self, *args, **kwargs): 97 | super(FakeSession, self).__init__(*args, **kwargs) 98 | 99 | self.__url_overrides = url_overrides 100 | self.__secret_key = 'ABCDEFGABCDEFGABCDEF' 101 | self.__access_key = 'YTYHRSshtrsTRHSrsTHRSTrthSRThsrTHsr' 102 | 103 | def client(self, *args, **kwargs): 104 | 105 | if 'endpoint_url' not in kwargs and args[0] in self.__url_overrides: 106 | kwargs['endpoint_url'] = self.__url_overrides[args[0]] 107 | 108 | kwargs['aws_access_key_id'] = self.__secret_key 109 | kwargs['aws_secret_access_key'] = self.__access_key 110 | 111 | return super(FakeSession, self).client(*args, **kwargs) 112 | 113 | def resource(self, *args, **kwargs): 114 | 115 | if 'endpoint_url' not in kwargs and args[0] in self.__url_overrides: 116 | kwargs['endpoint_url'] = self.__url_overrides[args[0]] 117 | 118 | kwargs['aws_access_key_id'] = self.__secret_key 119 | kwargs['aws_secret_access_key'] = self.__access_key 120 | 121 | return super(FakeSession, self).resource(*args, **kwargs) 122 | return FakeSession 123 | 124 | 125 | @pytest.fixture(scope='function') 126 | def moto_patch(request, region, config, moto_server): 127 | FakeAioboto3Session = create_fake_session(Session, { 128 | 's3': moto_server, 129 | 'kms': moto_server 130 | }) 131 | FakeBoto3Session = create_fake_session(boto3.Session, { 132 | 's3': moto_server, 133 | }) 134 | 135 | sessions = [ 136 | mock.patch('aioboto3.Session', FakeAioboto3Session), 137 | mock.patch('aioboto3.session.Session', FakeAioboto3Session), 138 | mock.patch('boto3.Session', FakeBoto3Session), 139 | mock.patch('boto3.session.Session', FakeBoto3Session) 140 | ] 141 | for session in sessions: 142 | session.start() 143 | 144 | yield 145 | 146 | for session in sessions: 147 | session.stop() 148 | 149 | 150 | pytest_plugins = ['mock_server'] 151 | -------------------------------------------------------------------------------- /aioboto3/resources/action.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from boto3.resources.action import ServiceAction, WaiterAction 4 | from boto3.resources.params import create_request_parameters 5 | from boto3.resources.action import xform_name 6 | 7 | from aioboto3.resources.response import AIOResourceHandler, AIORawHandler 8 | 9 | logger = logging.getLogger(__name__) 10 | 11 | 12 | class AIOServiceAction(ServiceAction): 13 | def __init__(self, action_model, factory=None, service_context=None): 14 | self._action_model = action_model 15 | 16 | # In the simplest case we just return the response, but if a 17 | # resource is defined, then we must create these before returning. 18 | resource_response_model = action_model.resource 19 | if resource_response_model: 20 | self._response_handler = AIOResourceHandler( 21 | search_path=resource_response_model.path, 22 | factory=factory, 23 | resource_model=resource_response_model, 24 | service_context=service_context, 25 | operation_name=action_model.request.operation 26 | ) 27 | else: 28 | self._response_handler = AIORawHandler(action_model.path) 29 | 30 | async def __call__(self, parent, *args, **kwargs): 31 | operation_name = xform_name(self._action_model.request.operation) 32 | 33 | # First, build predefined params and then update with the 34 | # user-supplied kwargs, which allows overriding the pre-built 35 | # params if needed. 36 | params = create_request_parameters(parent, self._action_model.request) 37 | params.update(kwargs) 38 | 39 | logger.debug('Calling %s:%s with %r', parent.meta.service_name, 40 | operation_name, params) 41 | 42 | response = await getattr(parent.meta.client, operation_name)(*args, **params) 43 | 44 | logger.debug('Response: %r', response) 45 | 46 | return await self._response_handler(parent, params, response) 47 | 48 | 49 | class AioBatchAction(ServiceAction): 50 | async def __call__(self, parent, *args, **kwargs): 51 | service_name = None 52 | client = None 53 | responses = [] 54 | operation_name = xform_name(self._action_model.request.operation) 55 | 56 | # Unlike the simple action above, a batch action must operate 57 | # on batches (or pages) of items. So we get each page, construct 58 | # the necessary parameters and call the batch operation. 59 | async for page in parent.pages(): 60 | params = {} 61 | for index, resource in enumerate(page): 62 | # There is no public interface to get a service name 63 | # or low-level client from a collection, so we get 64 | # these from the first resource in the collection. 65 | if service_name is None: 66 | service_name = resource.meta.service_name 67 | if client is None: 68 | client = resource.meta.client 69 | 70 | create_request_parameters( 71 | resource, self._action_model.request, 72 | params=params, index=index) 73 | 74 | if not params: 75 | # There are no items, no need to make a call. 76 | break 77 | 78 | params.update(kwargs) 79 | 80 | logger.debug('Calling %s:%s with %r', 81 | service_name, operation_name, params) 82 | 83 | response = await (getattr(client, operation_name)(*args, **params)) 84 | 85 | logger.debug('Response: %r', response) 86 | 87 | responses.append( 88 | self._response_handler(parent, params, response)) 89 | 90 | return responses 91 | 92 | 93 | class AIOWaiterAction(WaiterAction): 94 | async def __call__(self, parent, *args, **kwargs): 95 | """ 96 | Perform the wait operation after building operation 97 | parameters. 98 | 99 | :type parent: :py:class:`~boto3.resources.base.ServiceResource` 100 | :param parent: The resource instance to which this action is attached. 101 | """ 102 | client_waiter_name = xform_name(self._waiter_model.waiter_name) 103 | 104 | # First, build predefined params and then update with the 105 | # user-supplied kwargs, which allows overriding the pre-built 106 | # params if needed. 107 | params = create_request_parameters(parent, self._waiter_model) 108 | params.update(kwargs) 109 | 110 | logger.debug('Calling %s:%s with %r', 111 | parent.meta.service_name, 112 | self._waiter_resource_name, params) 113 | 114 | client = parent.meta.client 115 | waiter = client.get_waiter(client_waiter_name) 116 | response = await waiter.wait(**params) 117 | 118 | logger.debug('Response: %r', response) 119 | -------------------------------------------------------------------------------- /docs/cse.rst: -------------------------------------------------------------------------------- 1 | ============================= 2 | AWS S3 Client-side Encryption 3 | ============================= 4 | 5 | How it works (KMS Managed Keys) 6 | ------------------------------- 7 | 8 | Overall the entire procedure isn't incredibly complex, just not very well documented (unless my google skills are failing me). 9 | And I may be wrong, but the Java SDK decrypts files made with this and the library can decrypt the Java made files. 10 | 11 | Decryption 12 | ++++++++++ 13 | 14 | Firstly get the object from S3, it'll have various crypto goodies in the object's metadata. 15 | 16 | - metadata: ``x-amz-unencrypted-content-length`` - Resultant length of the plaintext 17 | - metadata: ``x-amz-key-v2`` - this is the base64'd kms encrypted aes key. 18 | - metadata: ``x-amz-matdesc`` - JSON KMS encryption context, has which KMS key encrypted the aes key 19 | - metadata: ``x-amz-iv`` - AES IVs 20 | - metadata: ``x-amz-cek-alg`` - Which AES aglorithm was used, AES/CBC/PKCS5Padding or AES/GCM/NoPadding 21 | - metadata: ``x-amz-tag-len`` - If using AES-GCM then this is a fixed value of 128 otherwise it is not present 22 | - metadata: ``x-amz-wrap-alg`` - Always KMS when using KMS managed master keys 23 | 24 | Send ``x-amz-key-v2`` and ``x-amz-matdesc`` to KMS, that will return the decrypted AES key 25 | 26 | Decode the file with either CBC or GCM based on ``x-amz-cek-alg``. 27 | 28 | If CBC was used, you'll also need to remove the PKCS5Padding. This snippet would do that ``a = a[:-a[-1]]``, what it does is removes N bytes off the end of 29 | the bytestring, the padding is a fixed value (less than 256) which is the same number as how many bytes to remove (lookup PKCS5Padding). 30 | 31 | If GCM was used, during the decryption a tag appened to the plaintext is also verified for some added protection. 32 | 33 | Encryption 34 | ++++++++++ 35 | 36 | Simply enough, you do the majority of the above... backwards. 37 | 38 | Call the ``generate_data_key`` KMS API (with the encryption context) to get both an encrypted AES key and decypted AES key. 39 | Generete IV's. Encrypt your data. Assemble all the required metadata (use the KMS provided encrypted AES key for ``x-amz-key-v2``), then push to S3. 40 | 41 | 42 | How it works (Symmetric Keys) 43 | ----------------------------- 44 | 45 | The method is pretty similar. The encryption key is stored in ``x-amz-key``, its encrypted with AES/ECB/PKCS5Padding :/ 46 | Then the object's data is always encrypted with AES/CBC/PKCS5Padding which means no range downloads. 47 | 48 | 49 | How it works (Asymmetric Keys) 50 | ------------------------------ 51 | 52 | Once again its pretty similar, but this time the encryption key is encrypted/decrypted with RSA/ECB/PKCS1Padding 53 | 54 | 55 | CryptoContext Class 56 | ------------------- 57 | 58 | This class performs 2 main functions. It converts the objects encrypted key metadata into a decryption key and it will generate an 59 | encryption key with corresponding encrypted encryption key that's base64 encoded. 60 | 61 | For example when decrypting a file using KMS managed client side encryption. It would pass the encrypted key to ``KMS.decrypt`` along 62 | with the "material description" (``x-amz-matdesc`` metadata header) and KMS will return the original AES key. 63 | 64 | Similar for encrypting a file, it will call KMS to generate a data key, it will then return an AES key, appropiate material description 65 | metadata and a base64'd encrypted form of the AES key. 66 | 67 | +---------------------------------------------------+-------------------------------------------+ 68 | | CryptoContext Class | Description | 69 | +===================================================+===========================================+ 70 | | :class:`aioboto3.s3.cse.KMSCryptoContext` | Performs CSE using KMS managed keys | 71 | +---------------------------------------------------+-------------------------------------------+ 72 | | :class:`aioboto3.s3.cse.AsymmetricCryptoContext` | Performs CSE using public / private keys | 73 | +---------------------------------------------------+-------------------------------------------+ 74 | | :class:`aioboto3.s3.cse.SymmetricCryptoContext` | Performs CSE using a single symmetric key | 75 | +---------------------------------------------------+-------------------------------------------+ 76 | 77 | 78 | Example 79 | ------- 80 | 81 | .. code-block:: python 82 | 83 | import asyncio 84 | import aioboto3 85 | from aioboto3.s3.cse import S3CSE, KMSCryptoContext 86 | 87 | async def main(): 88 | ctx = KMSCryptoContext(keyid='alias/someKey', kms_client_args={'region_name': 'eu-central-1'}) 89 | 90 | some_data = b'Some sensitive data for S3' 91 | 92 | async with S3CSE(crypto_context=ctx, s3_client_args={'region_name': 'eu-central-1'}) as s3_cse: 93 | # Upload some binary data 94 | await s3_cse.put_object( 95 | Body=some_data, 96 | Bucket='some-bucket', 97 | Key='encrypted_file', 98 | ) 99 | 100 | response = await s3_cse.get_object( 101 | Bucket='some-bucket', 102 | Key='encrypted_file' 103 | ) 104 | data = await response['Body'].read() 105 | print(data) 106 | 107 | loop = asyncio.get_event_loop() 108 | loop.run_until_complete(main()) 109 | 110 | # Outputs: 111 | # b'Some sensitive data for S3' 112 | -------------------------------------------------------------------------------- /aioboto3/dynamodb/table.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import logging 3 | 4 | from boto3.dynamodb.table import TableResource 5 | 6 | logger = logging.getLogger(__name__) 7 | 8 | 9 | def register_table_methods(base_classes, **kwargs): 10 | base_classes.insert(0, CustomTableResource) 11 | 12 | 13 | class CustomTableResource(TableResource): 14 | def batch_writer(self, overwrite_by_pkeys=None, flush_amount=25, on_exit_loop_sleep=0): 15 | return BatchWriter( 16 | self.name, self.meta.client, 17 | flush_amount=flush_amount, 18 | overwrite_by_pkeys=overwrite_by_pkeys, 19 | on_exit_loop_sleep=on_exit_loop_sleep 20 | ) 21 | 22 | 23 | class BatchWriter(object): 24 | """ 25 | Modified so that it does async 26 | Automatically handle batch writes to DynamoDB for a single table. 27 | """ 28 | 29 | def __init__( 30 | self, table_name, client, flush_amount=25, overwrite_by_pkeys=None, on_exit_loop_sleep=0 31 | ): 32 | """ 33 | 34 | :type table_name: str 35 | :param table_name: The name of the table. The class handles 36 | batch writes to a single table. 37 | 38 | :type client: ``botocore.client.Client`` 39 | :param client: A botocore client. Note this client 40 | **must** have the dynamodb customizations applied 41 | to it for transforming AttributeValues into the 42 | wire protocol. What this means in practice is that 43 | you need to use a client that comes from a DynamoDB 44 | resource if you're going to instantiate this class 45 | directly, i.e 46 | ``boto3.resource('dynamodb').Table('foo').meta.client``. 47 | 48 | :type flush_amount: int 49 | :param flush_amount: The number of items to keep in 50 | a local buffer before sending a batch_write_item 51 | request to DynamoDB. 52 | 53 | :type overwrite_by_pkeys: list(string) 54 | :param overwrite_by_pkeys: De-duplicate request items in buffer 55 | if match new request item on specified primary keys. i.e 56 | ``["partition_key1", "sort_key2", "sort_key3"]`` 57 | 58 | :type on_exit_loop_sleep: int 59 | :param on_exit_loop_sleep: When aexit is called by exiting the 60 | context manager, if the value is > 0 then every time flush 61 | is called a sleep will also be called. 62 | 63 | """ 64 | self._table_name = table_name 65 | self._client = client 66 | self._items_buffer = [] 67 | self._flush_amount = flush_amount 68 | self._overwrite_by_pkeys = overwrite_by_pkeys 69 | self._on_exit_loop_sleep = on_exit_loop_sleep 70 | 71 | async def put_item(self, Item): 72 | await self._add_request_and_process({'PutRequest': {'Item': Item}}) 73 | 74 | async def delete_item(self, Key): 75 | await self._add_request_and_process({'DeleteRequest': {'Key': Key}}) 76 | 77 | async def _add_request_and_process(self, request): 78 | if self._overwrite_by_pkeys: 79 | self._remove_dup_pkeys_request_if_any(request) 80 | self._items_buffer.append(request) 81 | await self._flush_if_needed() 82 | 83 | def _remove_dup_pkeys_request_if_any(self, request): 84 | pkey_values_new = self._extract_pkey_values(request) 85 | for item in self._items_buffer: 86 | if self._extract_pkey_values(item) == pkey_values_new: 87 | self._items_buffer.remove(item) 88 | logger.debug("With overwrite_by_pkeys enabled, skipping request:%s", item) 89 | 90 | def _extract_pkey_values(self, request): 91 | if request.get('PutRequest'): 92 | return [ 93 | request['PutRequest']['Item'][key] 94 | for key in self._overwrite_by_pkeys 95 | ] 96 | elif request.get('DeleteRequest'): 97 | return [ 98 | request['DeleteRequest']['Key'][key] 99 | for key in self._overwrite_by_pkeys 100 | ] 101 | return None 102 | 103 | async def _flush_if_needed(self): 104 | if len(self._items_buffer) >= self._flush_amount: 105 | await self._flush() 106 | 107 | async def _flush(self): 108 | items_to_send = self._items_buffer[:self._flush_amount] 109 | self._items_buffer = self._items_buffer[self._flush_amount:] 110 | response = await self._client.batch_write_item( 111 | RequestItems={self._table_name: items_to_send}) 112 | unprocessed_items = response['UnprocessedItems'] 113 | 114 | if not unprocessed_items: 115 | unprocessed_items = {} 116 | item_list = unprocessed_items.get(self._table_name, []) 117 | # Any unprocessed_items are immediately added to the 118 | # next batch we send. 119 | self._items_buffer.extend(item_list) 120 | logger.debug( 121 | "Batch write sent %s, unprocessed: %s, buffer %s", 122 | len(items_to_send), len(item_list), len(self._items_buffer) 123 | ) 124 | 125 | async def __aenter__(self): 126 | return self 127 | 128 | async def __aexit__(self, exc_type, exc_value, tb): 129 | # When we exit, we need to keep flushing whatever's left 130 | # until there's nothing left in our items buffer. 131 | while self._items_buffer: 132 | await self._flush() 133 | if self._items_buffer and self._on_exit_loop_sleep: 134 | await asyncio.sleep(self._on_exit_loop_sleep) 135 | -------------------------------------------------------------------------------- /tests/test_patches.py: -------------------------------------------------------------------------------- 1 | import hashlib 2 | 3 | import boto3 4 | from boto3.session import Session 5 | from boto3.resources.action import BatchAction, ServiceAction, WaiterAction 6 | from boto3.resources.response import ResourceHandler, RawHandler 7 | from boto3.resources.collection import ResourceCollection, CollectionManager, CollectionFactory 8 | from boto3.resources.factory import ResourceFactory 9 | from boto3.dynamodb.table import register_table_methods, TableResource, BatchWriter 10 | from boto3.s3.inject import inject_s3_transfer_methods, download_file, download_fileobj, upload_file, \ 11 | upload_fileobj, copy, inject_object_summary_methods, inject_bucket_methods, object_summary_load, \ 12 | bucket_load 13 | from boto3.s3.transfer import S3TransferConfig 14 | from dill.source import getsource 15 | from chalice.app import Chalice, RestAPIEventHandler, __version__ as chalice_version 16 | 17 | import aiobotocore 18 | 19 | 20 | _API_DIGESTS = { 21 | # __init__.py 22 | boto3.setup_default_session: {'3600170f2c4dbd1896f636a21524b3b027405de1'}, 23 | boto3.set_stream_logger: {'42a3ca2d28b00e219acfd03ae110a970eb2b9045'}, 24 | boto3._get_default_session: {'5249535ea408e9497b1363f73b9fd46dcb068b06'}, 25 | boto3.client: {'20c73aeb9feb10d1e5d7f6b3f7dedcab00c7fbcf'}, 26 | boto3.resource: {'316deeb96e6af699be73670c7478357c6882eab3'}, 27 | boto3.NullHandler: {'6e0c2ed3ca7e42b851afb79694bdfb0e4fea26f2'}, 28 | 29 | # resources/action.py 30 | ServiceAction.__init__: {'b8b759abbe8fbfa9bad332b8ce8d30f55daf97f3', '21c079bf5e234c3fcb632c7f689eccf0c4d2935b'}, 31 | ServiceAction.__call__: {'f3cb58a5e36bf3355723c69ec244990180a2d5bc', '56c98dd3a54b2859a8834a1e4d676fe38fae013e'}, 32 | BatchAction.__call__: {'ea58ac6ba436740cc3766b8c115ee0222b665c9a', '63387ccf7f57ffc39be7fde1de187776622bb1c4'}, 33 | WaiterAction.__call__: {'d3c379619530e8f2b3b7cb8a3421dcb0acfd0f73', '616339d5d6af86431108d84118f490d879dd9fa2'}, 34 | 35 | # resources/collection.py 36 | ResourceCollection.__iter__: {'6631cf4c177643738acff01aa7f3fa324a246ba9'}, # Logic inside anext 37 | ResourceCollection.pages: {'a26745155edd73004004af12e8fa8f617d2989b0', '28ae6e6fe35b930bbf65a162225bb4e23fc9eec0', '5e57180839503cdd6de71cefe5b8c8b862273ad1'}, 38 | CollectionManager.__init__: {'f40c0a368b747518a7b6998eab98920cb4d7d233', '7007f88626a41fec98a5efd79c24395d89ded879'}, 39 | CollectionFactory.load_from_definition: {'eadb8897327b2faf812b2a2d6fbf643c8f4f029a', '06c878d737216948ef9cfda476594466d34b5d97', '143dccdee71618317880686f3b3ae8f31eee5d2e'}, 40 | CollectionFactory._create_batch_action: {'435ff19f24325a515563fd9716b89158ac676a02', 'a911563aaf703994b63c5e2b51c0205b82f05673'}, 41 | 42 | # resources/factory.py 43 | ResourceFactory.__init__: {'dc2b647537ce3cecfe76e172cc4042eca4ed5b86'}, 44 | ResourceFactory.load_from_definition: {'1f6c0b9298d63d3d50c64abdb3c7025c03cbbdf9', 'c995f96439b1837d6caaf461e37f01580cd840d5'}, 45 | ResourceFactory._create_autoload_property: {'62793a404067069d499246389f1b97601cb9b7a8', '812f8f8cd1445582c83b09ff2fce1e799daba419', '49c51a5503d40a8be2aba6cf99b3896cd8f97bac'}, 46 | ResourceFactory._create_waiter: {'69d8bd493fde2f6e3b32c5a6cf89059885832cff', 'abb12827964c8bab17f4d99466d1a60ab97ec0a9'}, 47 | ResourceFactory._create_class_partial: {'5e421387dd4a4a40e871dc1597af21149eccf85a', 'cba44eb792b11f2ff47146f0f610e0bfb17de1b5'}, 48 | ResourceFactory._create_action: {'1cbbe9ee45eeff7b40d3cde21df34f5cff540c94'}, 49 | 50 | # resources/response.py 51 | ResourceHandler.__call__: {'4927077955466d5ef3558b2148ba8ff8d19094bf', 'e3bdc52aa8d22642d1118921d984808b9019ce63'}, 52 | RawHandler.__call__: {'5ea91e39ab1dc3587a4038805ee90235990b866d'}, 53 | 54 | # session.py 55 | Session.__init__: {'3f494c3eb0987251cae373873499e917f264571b'}, 56 | Session._register_default_handlers: {'04f247de526b7a0af15737e04019ade52cc65446', '74fa15629c9ea69f79f3a5285357dbf53f734f2d', 'e30e5c3a0f6bc8f002ba679d4bae831914fc67a0'}, 57 | Session.resource: {'5e3568b28281a75eaf9725fab67c33dc16a18144', 'b110781f5a5d148dd1d614e7611650a16cbea372'}, 58 | 59 | # dynamodb/table.py 60 | register_table_methods: {'1d9191de712871b92e1e87f94c6583166a315113'}, 61 | TableResource: {'a65f5e64ecca7d3cee3f6f337db36313d84dbad1', '2b803c9437bbee6b369369a279fcb0e34c821ab2', 'b9d2f960fbffafdd8b88f7036c4dbe1a76e93f66'}, 62 | BatchWriter: {'bc1994154ceefd6c50b778eb497eb613c1b49f67'}, # Class was pretty much rewritten so wasn't subclassed. 63 | 64 | # s3/inject.py 65 | inject_s3_transfer_methods: {'8540c89847b80cc1fb34627989eba14972c158d5', '19e91a5002e1d5b30a08024f25a9ba875010bacc'}, 66 | inject_object_summary_methods: {'a9e2005d1663a5eb17b6b9667835fa251864ccef'}, 67 | inject_bucket_methods: {'63316226fdd4d7c043eaf35e07b6b2ac331b4872', 'dfe1c2219ced56b0aaa74c4a84210fd20463392e'}, 68 | object_summary_load: {'3e4db1310105ced8ac2af17598603812ca18cbbe', '98a5a726f105388322a845ba97e08f1e53ee9d69'}, 69 | bucket_load: {'2d40d03ca9ec91eb5de8a8f40e0f35634ab1d987'}, 70 | download_file.__wrapped__: {'0cb74058f3d771b69ffa55c449915b8ae2d79d5a'}, 71 | download_fileobj.__wrapped__: {'3987566bbd712aa81c332b1c2684327a9fd0de38'}, 72 | upload_fileobj.__wrapped__: {'7d344505b3ea95168603e534c75a1a51551b35d5'}, 73 | upload_file.__wrapped__: {'9949e77ef9c98c5017388d8150c3cbf00e412077'}, 74 | copy.__wrapped__: {'534f68820a410ba41072026994badca7f66fe1b9'}, 75 | S3TransferConfig.__init__: {'f418b3dab3c6f073f19feaf1172359bdc3863e22'}, 76 | } 77 | 78 | _CHALICE_API_DIGESTS = { 79 | # experimental/async_chalice.py 80 | Chalice.__call__: {'d1d4f2b1a1bd6574500dec1f181fcfe8345f5ac6'}, 81 | RestAPIEventHandler._get_view_function_response: {'ccf22bac60d89704c445baa9c2c881f525b70652'} 82 | } 83 | 84 | def test_patches(): 85 | print("Boto3 version: {} aiobotocore version: {}".format(boto3.__version__, aiobotocore.__version__)) 86 | 87 | success = True 88 | for obj, digests in _API_DIGESTS.items(): 89 | digest = hashlib.sha1(getsource(obj).encode('utf-8')).hexdigest() 90 | if digest not in digests: 91 | print("Digest of {}:{} not found in: {}".format(obj.__qualname__, digest, digests)) 92 | success = False 93 | 94 | assert success 95 | 96 | 97 | def test_chalice_patches(): 98 | print("Chalice version: {}".format(chalice_version)) 99 | 100 | success = True 101 | for obj, digests in _CHALICE_API_DIGESTS.items(): 102 | digest = hashlib.sha1(getsource(obj).encode('utf-8')).hexdigest() 103 | if digest not in digests: 104 | print("Digest of {}:{} not found in: {}".format(obj.__qualname__, digest, digests)) 105 | success = False 106 | 107 | assert success 108 | -------------------------------------------------------------------------------- /aioboto3/resources/collection.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from typing import AsyncIterator, Any, cast 3 | 4 | from boto3.docs import docstring 5 | from boto3.resources.collection import CollectionFactory, ResourceCollection, CollectionManager, merge_dicts 6 | from boto3.resources.params import create_request_parameters 7 | 8 | from aioboto3.resources.action import AioBatchAction, AIOResourceHandler 9 | 10 | logger = logging.getLogger(__name__) 11 | 12 | 13 | class AIOResourceCollection(ResourceCollection): 14 | """ 15 | Converted the ResourceCollection.pages() function to an async generator so that we can do 16 | async for on a paginator inside that function 17 | 18 | Converted the __iter__ 19 | """ 20 | async def __anext__(self): 21 | limit = self._params.get('limit', None) 22 | 23 | count = 0 24 | async for page in cast(AsyncIterator[Any], self.pages()): 25 | for item in page: 26 | yield item 27 | 28 | count += 1 29 | if limit is not None and count >= limit: 30 | return 31 | 32 | def __aiter__(self): 33 | return self.__anext__() 34 | 35 | def __iter__(self): 36 | raise NotImplementedError('Use async-for instead') 37 | 38 | async def pages(self): 39 | client = self._parent.meta.client 40 | cleaned_params = self._params.copy() 41 | limit = cleaned_params.pop('limit', None) 42 | page_size = cleaned_params.pop('page_size', None) 43 | params = create_request_parameters(self._parent, self._model.request) 44 | merge_dicts(params, cleaned_params, append_lists=True) 45 | 46 | # Is this a paginated operation? If so, we need to get an 47 | # iterator for the various pages. If not, then we simply 48 | # call the operation and return the result as a single 49 | # page in a list. For non-paginated results, we just ignore 50 | # the page size parameter. 51 | if client.can_paginate(self._py_operation_name): 52 | logger.debug( 53 | 'Calling paginated %s:%s with %r', 54 | self._parent.meta.service_name, 55 | self._py_operation_name, 56 | params 57 | ) 58 | paginator = client.get_paginator(self._py_operation_name) 59 | pages = paginator.paginate( 60 | PaginationConfig={'MaxItems': limit, 'PageSize': page_size}, 61 | **params 62 | ) 63 | else: 64 | async def _aiopaginatordummy(): 65 | yield await getattr(client, self._py_operation_name)(**params) 66 | 67 | logger.debug( 68 | 'Calling %s:%s with %r', 69 | self._parent.meta.service_name, 70 | self._py_operation_name, 71 | params 72 | ) 73 | pages = _aiopaginatordummy() 74 | 75 | # Now that we have a page iterator or single page of results 76 | # we start processing and yielding individual items. 77 | count = 0 78 | async for page in pages: 79 | page_items = [] 80 | for item in await self._handler(self._parent, params, page): 81 | page_items.append(item) 82 | 83 | # If the limit is set and has been reached, then 84 | # we stop processing items here. 85 | count += 1 86 | if limit is not None and count >= limit: 87 | break 88 | 89 | yield page_items 90 | 91 | # Stop reading pages if we've reached out limit 92 | if limit is not None and count >= limit: 93 | break 94 | 95 | 96 | class AIOCollectionManager(CollectionManager): 97 | _collection_cls = AIOResourceCollection 98 | 99 | def __init__(self, collection_model, parent, factory, service_context): 100 | self._model = collection_model 101 | operation_name = self._model.request.operation 102 | self._parent = parent 103 | 104 | search_path = collection_model.resource.path 105 | self._handler = AIOResourceHandler( 106 | search_path=search_path, 107 | factory=factory, 108 | resource_model=collection_model.resource, 109 | service_context=service_context, 110 | operation_name=operation_name 111 | ) 112 | 113 | 114 | class AIOCollectionFactory(CollectionFactory): 115 | def load_from_definition( 116 | self, resource_name, collection_model, service_context, event_emitter 117 | ): 118 | attrs = {} 119 | collection_name = collection_model.name 120 | 121 | # Create the batch actions for a collection 122 | self._load_batch_actions( 123 | attrs, 124 | resource_name, 125 | collection_model, 126 | service_context.service_model, 127 | event_emitter 128 | ) 129 | # Add the documentation to the collection class's methods 130 | self._load_documented_collection_methods( 131 | attrs=attrs, 132 | resource_name=resource_name, 133 | collection_model=collection_model, 134 | service_model=service_context.service_model, 135 | event_emitter=event_emitter, 136 | base_class=AIOResourceCollection 137 | ) 138 | 139 | if service_context.service_name == resource_name: 140 | cls_name = ( 141 | f'{service_context.service_name}.{collection_name}Collection' 142 | ) 143 | else: 144 | cls_name = f'{service_context.service_name}.{resource_name}.{collection_name}Collection' 145 | 146 | collection_cls = type(str(cls_name), (AIOResourceCollection,), attrs) 147 | 148 | # Add the documentation to the collection manager's methods 149 | self._load_documented_collection_methods( 150 | attrs=attrs, 151 | resource_name=resource_name, 152 | collection_model=collection_model, 153 | service_model=service_context.service_model, 154 | event_emitter=event_emitter, 155 | base_class=AIOCollectionManager 156 | ) 157 | attrs['_collection_cls'] = collection_cls 158 | cls_name += 'Manager' 159 | 160 | return type(str(cls_name), (AIOCollectionManager,), attrs) 161 | 162 | def _create_batch_action( 163 | factory_self, 164 | resource_name, 165 | snake_cased, 166 | action_model, 167 | collection_model, 168 | service_model, 169 | event_emitter 170 | ): 171 | """ 172 | Creates a new method which makes a batch operation request 173 | to the underlying service API. 174 | """ 175 | action = AioBatchAction(action_model) 176 | 177 | def batch_action(self, *args, **kwargs): 178 | return action(self, *args, **kwargs) 179 | 180 | batch_action.__name__ = str(snake_cased) 181 | batch_action.__doc__ = docstring.BatchActionDocstring( 182 | resource_name=resource_name, 183 | event_emitter=event_emitter, 184 | batch_action_model=action_model, 185 | service_model=service_model, 186 | collection_model=collection_model, 187 | include_signature=False 188 | ) 189 | return batch_action 190 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = _build 9 | 10 | # User-friendly check for sphinx-build 11 | ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) 12 | $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) 13 | endif 14 | 15 | # Internal variables. 16 | PAPEROPT_a4 = -D latex_paper_size=a4 17 | PAPEROPT_letter = -D latex_paper_size=letter 18 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 19 | # the i18n builder cannot share the environment and doctrees with the others 20 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 21 | 22 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext 23 | 24 | help: 25 | @echo "Please use \`make ' where is one of" 26 | @echo " html to make standalone HTML files" 27 | @echo " dirhtml to make HTML files named index.html in directories" 28 | @echo " singlehtml to make a single large HTML file" 29 | @echo " pickle to make pickle files" 30 | @echo " json to make JSON files" 31 | @echo " htmlhelp to make HTML files and a HTML help project" 32 | @echo " qthelp to make HTML files and a qthelp project" 33 | @echo " devhelp to make HTML files and a Devhelp project" 34 | @echo " epub to make an epub" 35 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 36 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 37 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" 38 | @echo " text to make text files" 39 | @echo " man to make manual pages" 40 | @echo " texinfo to make Texinfo files" 41 | @echo " info to make Texinfo files and run them through makeinfo" 42 | @echo " gettext to make PO message catalogs" 43 | @echo " changes to make an overview of all changed/added/deprecated items" 44 | @echo " xml to make Docutils-native XML files" 45 | @echo " pseudoxml to make pseudoxml-XML files for display purposes" 46 | @echo " linkcheck to check all external links for integrity" 47 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 48 | 49 | clean: 50 | rm -rf $(BUILDDIR)/* 51 | 52 | html: 53 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 54 | @echo 55 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 56 | 57 | dirhtml: 58 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 59 | @echo 60 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 61 | 62 | singlehtml: 63 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 64 | @echo 65 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 66 | 67 | pickle: 68 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 69 | @echo 70 | @echo "Build finished; now you can process the pickle files." 71 | 72 | json: 73 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 74 | @echo 75 | @echo "Build finished; now you can process the JSON files." 76 | 77 | htmlhelp: 78 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 79 | @echo 80 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 81 | ".hhp project file in $(BUILDDIR)/htmlhelp." 82 | 83 | qthelp: 84 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 85 | @echo 86 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 87 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 88 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/aioboto3.qhcp" 89 | @echo "To view the help file:" 90 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/aioboto3.qhc" 91 | 92 | devhelp: 93 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 94 | @echo 95 | @echo "Build finished." 96 | @echo "To view the help file:" 97 | @echo "# mkdir -p $$HOME/.local/share/devhelp/aioboto3" 98 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/aioboto3" 99 | @echo "# devhelp" 100 | 101 | epub: 102 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 103 | @echo 104 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 105 | 106 | latex: 107 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 108 | @echo 109 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 110 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 111 | "(use \`make latexpdf' here to do that automatically)." 112 | 113 | latexpdf: 114 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 115 | @echo "Running LaTeX files through pdflatex..." 116 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 117 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 118 | 119 | latexpdfja: 120 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 121 | @echo "Running LaTeX files through platex and dvipdfmx..." 122 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja 123 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 124 | 125 | text: 126 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 127 | @echo 128 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 129 | 130 | man: 131 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 132 | @echo 133 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 134 | 135 | texinfo: 136 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 137 | @echo 138 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 139 | @echo "Run \`make' in that directory to run these through makeinfo" \ 140 | "(use \`make info' here to do that automatically)." 141 | 142 | info: 143 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 144 | @echo "Running Texinfo files through makeinfo..." 145 | make -C $(BUILDDIR)/texinfo info 146 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 147 | 148 | gettext: 149 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 150 | @echo 151 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 152 | 153 | changes: 154 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 155 | @echo 156 | @echo "The overview file is in $(BUILDDIR)/changes." 157 | 158 | linkcheck: 159 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 160 | @echo 161 | @echo "Link check complete; look for any errors in the above output " \ 162 | "or in $(BUILDDIR)/linkcheck/output.txt." 163 | 164 | doctest: 165 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 166 | @echo "Testing of doctests in the sources finished, look at the " \ 167 | "results in $(BUILDDIR)/doctest/output.txt." 168 | 169 | xml: 170 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml 171 | @echo 172 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml." 173 | 174 | pseudoxml: 175 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml 176 | @echo 177 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." 178 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | REM Command file for Sphinx documentation 4 | 5 | if "%SPHINXBUILD%" == "" ( 6 | set SPHINXBUILD=sphinx-build 7 | ) 8 | set BUILDDIR=_build 9 | set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . 10 | set I18NSPHINXOPTS=%SPHINXOPTS% . 11 | if NOT "%PAPER%" == "" ( 12 | set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% 13 | set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% 14 | ) 15 | 16 | if "%1" == "" goto help 17 | 18 | if "%1" == "help" ( 19 | :help 20 | echo.Please use `make ^` where ^ is one of 21 | echo. html to make standalone HTML files 22 | echo. dirhtml to make HTML files named index.html in directories 23 | echo. singlehtml to make a single large HTML file 24 | echo. pickle to make pickle files 25 | echo. json to make JSON files 26 | echo. htmlhelp to make HTML files and a HTML help project 27 | echo. qthelp to make HTML files and a qthelp project 28 | echo. devhelp to make HTML files and a Devhelp project 29 | echo. epub to make an epub 30 | echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter 31 | echo. text to make text files 32 | echo. man to make manual pages 33 | echo. texinfo to make Texinfo files 34 | echo. gettext to make PO message catalogs 35 | echo. changes to make an overview over all changed/added/deprecated items 36 | echo. xml to make Docutils-native XML files 37 | echo. pseudoxml to make pseudoxml-XML files for display purposes 38 | echo. linkcheck to check all external links for integrity 39 | echo. doctest to run all doctests embedded in the documentation if enabled 40 | goto end 41 | ) 42 | 43 | if "%1" == "clean" ( 44 | for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i 45 | del /q /s %BUILDDIR%\* 46 | goto end 47 | ) 48 | 49 | 50 | %SPHINXBUILD% 2> nul 51 | if errorlevel 9009 ( 52 | echo. 53 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 54 | echo.installed, then set the SPHINXBUILD environment variable to point 55 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 56 | echo.may add the Sphinx directory to PATH. 57 | echo. 58 | echo.If you don't have Sphinx installed, grab it from 59 | echo.http://sphinx-doc.org/ 60 | exit /b 1 61 | ) 62 | 63 | if "%1" == "html" ( 64 | %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html 65 | if errorlevel 1 exit /b 1 66 | echo. 67 | echo.Build finished. The HTML pages are in %BUILDDIR%/html. 68 | goto end 69 | ) 70 | 71 | if "%1" == "dirhtml" ( 72 | %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml 73 | if errorlevel 1 exit /b 1 74 | echo. 75 | echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. 76 | goto end 77 | ) 78 | 79 | if "%1" == "singlehtml" ( 80 | %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml 81 | if errorlevel 1 exit /b 1 82 | echo. 83 | echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. 84 | goto end 85 | ) 86 | 87 | if "%1" == "pickle" ( 88 | %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle 89 | if errorlevel 1 exit /b 1 90 | echo. 91 | echo.Build finished; now you can process the pickle files. 92 | goto end 93 | ) 94 | 95 | if "%1" == "json" ( 96 | %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json 97 | if errorlevel 1 exit /b 1 98 | echo. 99 | echo.Build finished; now you can process the JSON files. 100 | goto end 101 | ) 102 | 103 | if "%1" == "htmlhelp" ( 104 | %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp 105 | if errorlevel 1 exit /b 1 106 | echo. 107 | echo.Build finished; now you can run HTML Help Workshop with the ^ 108 | .hhp project file in %BUILDDIR%/htmlhelp. 109 | goto end 110 | ) 111 | 112 | if "%1" == "qthelp" ( 113 | %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp 114 | if errorlevel 1 exit /b 1 115 | echo. 116 | echo.Build finished; now you can run "qcollectiongenerator" with the ^ 117 | .qhcp project file in %BUILDDIR%/qthelp, like this: 118 | echo.^> qcollectiongenerator %BUILDDIR%\qthelp\aioboto3.qhcp 119 | echo.To view the help file: 120 | echo.^> assistant -collectionFile %BUILDDIR%\qthelp\aioboto3.ghc 121 | goto end 122 | ) 123 | 124 | if "%1" == "devhelp" ( 125 | %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp 126 | if errorlevel 1 exit /b 1 127 | echo. 128 | echo.Build finished. 129 | goto end 130 | ) 131 | 132 | if "%1" == "epub" ( 133 | %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub 134 | if errorlevel 1 exit /b 1 135 | echo. 136 | echo.Build finished. The epub file is in %BUILDDIR%/epub. 137 | goto end 138 | ) 139 | 140 | if "%1" == "latex" ( 141 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 142 | if errorlevel 1 exit /b 1 143 | echo. 144 | echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. 145 | goto end 146 | ) 147 | 148 | if "%1" == "latexpdf" ( 149 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 150 | cd %BUILDDIR%/latex 151 | make all-pdf 152 | cd %BUILDDIR%/.. 153 | echo. 154 | echo.Build finished; the PDF files are in %BUILDDIR%/latex. 155 | goto end 156 | ) 157 | 158 | if "%1" == "latexpdfja" ( 159 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 160 | cd %BUILDDIR%/latex 161 | make all-pdf-ja 162 | cd %BUILDDIR%/.. 163 | echo. 164 | echo.Build finished; the PDF files are in %BUILDDIR%/latex. 165 | goto end 166 | ) 167 | 168 | if "%1" == "text" ( 169 | %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text 170 | if errorlevel 1 exit /b 1 171 | echo. 172 | echo.Build finished. The text files are in %BUILDDIR%/text. 173 | goto end 174 | ) 175 | 176 | if "%1" == "man" ( 177 | %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man 178 | if errorlevel 1 exit /b 1 179 | echo. 180 | echo.Build finished. The manual pages are in %BUILDDIR%/man. 181 | goto end 182 | ) 183 | 184 | if "%1" == "texinfo" ( 185 | %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo 186 | if errorlevel 1 exit /b 1 187 | echo. 188 | echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. 189 | goto end 190 | ) 191 | 192 | if "%1" == "gettext" ( 193 | %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale 194 | if errorlevel 1 exit /b 1 195 | echo. 196 | echo.Build finished. The message catalogs are in %BUILDDIR%/locale. 197 | goto end 198 | ) 199 | 200 | if "%1" == "changes" ( 201 | %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes 202 | if errorlevel 1 exit /b 1 203 | echo. 204 | echo.The overview file is in %BUILDDIR%/changes. 205 | goto end 206 | ) 207 | 208 | if "%1" == "linkcheck" ( 209 | %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck 210 | if errorlevel 1 exit /b 1 211 | echo. 212 | echo.Link check complete; look for any errors in the above output ^ 213 | or in %BUILDDIR%/linkcheck/output.txt. 214 | goto end 215 | ) 216 | 217 | if "%1" == "doctest" ( 218 | %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest 219 | if errorlevel 1 exit /b 1 220 | echo. 221 | echo.Testing of doctests in the sources finished, look at the ^ 222 | results in %BUILDDIR%/doctest/output.txt. 223 | goto end 224 | ) 225 | 226 | if "%1" == "xml" ( 227 | %SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml 228 | if errorlevel 1 exit /b 1 229 | echo. 230 | echo.Build finished. The XML files are in %BUILDDIR%/xml. 231 | goto end 232 | ) 233 | 234 | if "%1" == "pseudoxml" ( 235 | %SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml 236 | if errorlevel 1 exit /b 1 237 | echo. 238 | echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml. 239 | goto end 240 | ) 241 | 242 | :end 243 | -------------------------------------------------------------------------------- /tests/test_dynamo.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | 3 | from boto3.dynamodb.conditions import Key 4 | import pytest 5 | 6 | 7 | @pytest.mark.asyncio 8 | async def test_dynamo_resource_query(dynamodb_resource, random_table_name): 9 | 10 | await dynamodb_resource.create_table( 11 | TableName=random_table_name, 12 | KeySchema=[{'AttributeName': 'pk', 'KeyType': 'HASH'}], 13 | AttributeDefinitions=[{'AttributeName': 'pk', 'AttributeType': 'S'}], 14 | ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} 15 | ) 16 | 17 | table = await dynamodb_resource.Table(random_table_name) 18 | await table.put_item( 19 | Item={'pk': 'test', 'test_col1': 'col'} 20 | ) 21 | 22 | result = await table.query( 23 | KeyConditionExpression=Key('pk').eq('test') 24 | ) 25 | assert result['Count'] == 1 26 | 27 | 28 | @pytest.mark.asyncio 29 | async def test_dynamo_resource_put(dynamodb_resource, random_table_name): 30 | await dynamodb_resource.create_table( 31 | TableName=random_table_name, 32 | KeySchema=[{'AttributeName': 'pk', 'KeyType': 'HASH'}], 33 | AttributeDefinitions=[{'AttributeName': 'pk', 'AttributeType': 'S'}], 34 | ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} 35 | ) 36 | 37 | table = await dynamodb_resource.Table(random_table_name) 38 | await table.put_item( 39 | Item={'pk': 'test', 'test_col1': 'col'} 40 | ) 41 | 42 | result = await table.scan() 43 | assert result['Count'] == 1 44 | 45 | 46 | @pytest.mark.asyncio 47 | async def test_dynamo_resource_batch_write_flush_on_exit_context(dynamodb_resource, random_table_name): 48 | await dynamodb_resource.create_table( 49 | TableName=random_table_name, 50 | KeySchema=[{'AttributeName': 'pk', 'KeyType': 'HASH'}], 51 | AttributeDefinitions=[{'AttributeName': 'pk', 'AttributeType': 'S'}], 52 | ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} 53 | ) 54 | 55 | table = await dynamodb_resource.Table(random_table_name) 56 | async with table.batch_writer() as dynamo_writer: 57 | await dynamo_writer.put_item(Item={'pk': 'test', 'test_col1': 'col'}) 58 | 59 | result = await table.scan() 60 | assert result['Count'] == 1 61 | 62 | 63 | @pytest.mark.asyncio 64 | async def test_dynamo_resource_batch_write_flush_amount(dynamodb_resource, random_table_name): 65 | await dynamodb_resource.create_table( 66 | TableName=random_table_name, 67 | KeySchema=[{'AttributeName': 'pk', 'KeyType': 'HASH'}], 68 | AttributeDefinitions=[{'AttributeName': 'pk', 'AttributeType': 'S'}], 69 | ProvisionedThroughput={'ReadCapacityUnits': 2, 'WriteCapacityUnits': 1} 70 | ) 71 | 72 | table = await dynamodb_resource.Table(random_table_name) 73 | async with table.batch_writer(flush_amount=5, on_exit_loop_sleep=0.1) as dynamo_writer: 74 | await dynamo_writer.put_item(Item={'pk': 'test1', 'test_col1': 'col'}) 75 | 76 | result = await table.scan() 77 | assert result['Count'] == 0 78 | 79 | await dynamo_writer.put_item(Item={'pk': 'test2', 'test_col1': 'col'}) 80 | await dynamo_writer.put_item(Item={'pk': 'test3', 'test_col1': 'col'}) 81 | await dynamo_writer.put_item(Item={'pk': 'test4', 'test_col1': 'col'}) 82 | await dynamo_writer.put_item(Item={'pk': 'test5', 'test_col1': 'col'}) 83 | await dynamo_writer.put_item(Item={'pk': 'test6', 'test_col1': 'col'}) 84 | await dynamo_writer.put_item(Item={'pk': 'test7', 'test_col1': 'col'}) 85 | await dynamo_writer.put_item(Item={'pk': 'test8', 'test_col1': 'col'}) 86 | await dynamo_writer.put_item(Item={'pk': 'test9', 'test_col1': 'col'}) 87 | 88 | # Flush should of happened after test5 so count should be 5 not 6 89 | result = await table.scan() 90 | assert result['Count'] == 5 91 | 92 | # On exit it should flush so count should be 6 93 | result = await table.scan() 94 | assert result['Count'] == 9 95 | 96 | 97 | @pytest.mark.asyncio 98 | async def test_flush_doesnt_reset_item_buffer(dynamodb_resource, random_table_name): 99 | await dynamodb_resource.create_table( 100 | TableName=random_table_name, 101 | KeySchema=[{'AttributeName': 'pk', 'KeyType': 'HASH'}], 102 | AttributeDefinitions=[{'AttributeName': 'pk', 'AttributeType': 'S'}], 103 | ProvisionedThroughput={'ReadCapacityUnits': 2, 'WriteCapacityUnits': 1} 104 | ) 105 | 106 | table = await dynamodb_resource.Table(random_table_name) 107 | async with table.batch_writer(flush_amount=5, on_exit_loop_sleep=0.1) as dynamo_writer: 108 | dynamo_writer._items_buffer.extend([ 109 | {'PutRequest': {'Item': {'pk': 'test1', 'test_col1': 'col'}}}, 110 | {'PutRequest': {'Item': {'pk': 'test2', 'test_col1': 'col'}}}, 111 | {'PutRequest': {'Item': {'pk': 'test3', 'test_col1': 'col'}}}, 112 | {'PutRequest': {'Item': {'pk': 'test4', 'test_col1': 'col'}}}, 113 | {'PutRequest': {'Item': {'pk': 'test5', 'test_col1': 'col'}}}, 114 | {'PutRequest': {'Item': {'pk': 'test6', 'test_col1': 'col'}}}, 115 | ]) 116 | result = await table.scan() 117 | assert result['Count'] == 0 118 | 119 | await dynamo_writer.put_item(Item={'pk': 'test7', 'test_col1': 'col'}) 120 | 121 | # Flush amount is 5 so count should be 5 not 6 122 | result = await table.scan() 123 | assert result['Count'] == 5 124 | 125 | assert len(dynamo_writer._items_buffer) == 2 126 | # the buffer doesn't have unprocessed items deleted 127 | 128 | # add more items than the flush size to check exit iterates over all items 129 | dynamo_writer._items_buffer.extend([ 130 | {'PutRequest': {'Item': {'pk': 'test8', 'test_col1': 'col'}}}, 131 | {'PutRequest': {'Item': {'pk': 'test9', 'test_col1': 'col'}}}, 132 | {'PutRequest': {'Item': {'pk': 'test10', 'test_col1': 'col'}}}, 133 | {'PutRequest': {'Item': {'pk': 'test11', 'test_col1': 'col'}}}, 134 | ]) 135 | 136 | # On exit it should flush so count should be 11 137 | result = await table.scan() 138 | assert result['Count'] == 11 139 | 140 | 141 | @pytest.mark.asyncio 142 | async def test_dynamo_resource_property(dynamodb_resource, random_table_name): 143 | await dynamodb_resource.create_table( 144 | TableName=random_table_name, 145 | KeySchema=[{'AttributeName': 'pk', 'KeyType': 'HASH'}], 146 | AttributeDefinitions=[{'AttributeName': 'pk', 'AttributeType': 'S'}], 147 | ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} 148 | ) 149 | 150 | table = await dynamodb_resource.Table(random_table_name) 151 | 152 | table_arn = table.table_arn 153 | assert asyncio.iscoroutine(table_arn) 154 | 155 | result = await table_arn 156 | assert result is not None 157 | 158 | 159 | @pytest.mark.asyncio 160 | async def test_dynamo_resource_waiter(dynamodb_resource, random_table_name): 161 | await dynamodb_resource.create_table( 162 | TableName=random_table_name, 163 | KeySchema=[{'AttributeName': 'pk', 'KeyType': 'HASH'}], 164 | AttributeDefinitions=[{'AttributeName': 'pk', 'AttributeType': 'S'}], 165 | ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} 166 | ) 167 | 168 | table = await dynamodb_resource.Table(random_table_name) 169 | 170 | await table.wait_until_exists() 171 | 172 | result = await table.table_arn 173 | assert result is not None 174 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | ======================== 2 | Async AWS SDK for Python 3 | ======================== 4 | 5 | 6 | .. image:: https://img.shields.io/pypi/v/aioboto3.svg 7 | :target: https://pypi.python.org/pypi/aioboto3 8 | 9 | .. image:: https://github.com/terrycain/aioboto3/actions/workflows/CI.yml/badge.svg 10 | :target: https://github.com/terrycain/aioboto3/actions 11 | 12 | .. image:: https://readthedocs.org/projects/aioboto3/badge/?version=latest 13 | :target: https://aioboto3.readthedocs.io 14 | :alt: Documentation Status 15 | 16 | .. image:: https://pyup.io/repos/github/terrycain/aioboto3/shield.svg 17 | :target: https://pyup.io/repos/github/terrycain/aioboto3/ 18 | :alt: Updates 19 | 20 | **Breaking changes for v11: The S3Transfer config passed into upload/download_file etc.. has been updated to that it matches what boto3 uses** 21 | 22 | **Breaking changes for v9: aioboto3.resource and aioboto3.client methods no longer exist, make a session then call session.client etc...** 23 | This was done for various reasons but mainly that it prevents the default session living longer than it should as that breaks situations where eventloops are replaced. 24 | 25 | **The .client and .resource functions must now be used as async context managers.** 26 | 27 | Now that aiobotocore has reached version 1.0.1, a side effect of the work put in to fix various issues like bucket region redirection and 28 | supporting web assume role type credentials, the client must now be instantiated using a context manager, which by extension applies to 29 | the resource creator. You used to get away with calling ``res = aioboto3.resource('dynamodb')`` but that no longer works. If you really want 30 | to do that, you can do ``res = await aioboto3.resource('dynamodb').__aenter__()`` but you'll need to remember to call ``__aexit__``. 31 | 32 | There will most likely be some parts that dont work now which I've missed, just make an issue and we'll get them resoved quickly. 33 | 34 | Creating service resources must also be async now, e.g. 35 | 36 | .. code-block:: python 37 | 38 | async def main(): 39 | session = aioboto3.Session() 40 | async with session.resource("s3") as s3: 41 | bucket = await s3.Bucket('mybucket') # <---------------- 42 | async for s3_object in bucket.objects.all(): 43 | print(s3_object) 44 | 45 | 46 | Updating to aiobotocore 1.0.1 also brings with it support for running inside EKS as well as asyncifying ``get_presigned_url`` 47 | 48 | ---- 49 | 50 | This package is mostly just a wrapper combining the great work of boto3_ and aiobotocore_. 51 | 52 | aiobotocore allows you to use near enough all of the boto3 client commands in an async manner just by prefixing the command with ``await``. 53 | 54 | With aioboto3 you can now use the higher level APIs provided by boto3 in an asynchronous manner. Mainly I developed this as I wanted to use the boto3 dynamodb Table object in some async 55 | microservices. 56 | 57 | While all resources in boto3 should work I havent tested them all, so if what your after is not in the table below then try it out, if it works drop me an issue with a simple test case 58 | and I'll add it to the table. 59 | 60 | +---------------------------+--------------------+ 61 | | Services | Status | 62 | +===========================+====================+ 63 | | DynamoDB Service Resource | Tested and working | 64 | +---------------------------+--------------------+ 65 | | DynamoDB Table | Tested and working | 66 | +---------------------------+--------------------+ 67 | | S3 | Working | 68 | +---------------------------+--------------------+ 69 | | Kinesis | Working | 70 | +---------------------------+--------------------+ 71 | | SSM Parameter Store | Working | 72 | +---------------------------+--------------------+ 73 | | Athena | Working | 74 | +---------------------------+--------------------+ 75 | 76 | 77 | Example 78 | ------- 79 | 80 | Simple example of using aioboto3 to put items into a dynamodb table 81 | 82 | .. code-block:: python 83 | 84 | import asyncio 85 | import aioboto3 86 | from boto3.dynamodb.conditions import Key 87 | 88 | 89 | async def main(): 90 | session = aioboto3.Session() 91 | async with session.resource('dynamodb', region_name='eu-central-1') as dynamo_resource: 92 | table = await dynamo_resource.Table('test_table') 93 | 94 | await table.put_item( 95 | Item={'pk': 'test1', 'col1': 'some_data'} 96 | ) 97 | 98 | result = await table.query( 99 | KeyConditionExpression=Key('pk').eq('test1') 100 | ) 101 | 102 | # Example batch write 103 | more_items = [{'pk': 't2', 'col1': 'c1'}, \ 104 | {'pk': 't3', 'col1': 'c3'}] 105 | async with table.batch_writer() as batch: 106 | for item_ in more_items: 107 | await batch.put_item(Item=item_) 108 | 109 | loop = asyncio.get_event_loop() 110 | loop.run_until_complete(main()) 111 | 112 | # Outputs: 113 | # [{'col1': 'some_data', 'pk': 'test1'}] 114 | 115 | 116 | Things that either dont work or have been patched 117 | ------------------------------------------------- 118 | 119 | As this library literally wraps boto3, its inevitable that some things won't magically be async. 120 | 121 | Fixed: 122 | 123 | - ``s3_client.download_file*`` This is performed by the s3transfer module. -- Patched with get_object 124 | - ``s3_client.upload_file*`` This is performed by the s3transfer module. -- Patched with custom multipart upload 125 | - ``s3_client.copy`` This is performed by the s3transfer module. -- Patched to use get_object -> upload_fileobject 126 | - ``dynamodb_resource.Table.batch_writer`` This now returns an async context manager which performs the same function 127 | - Resource waiters - You can now await waiters which are part of resource objects, not just client waiters, e.g. ``await dynamodbtable.wait_until_exists()`` 128 | - Resource object properties are normally autoloaded, now they are all co-routines and the metadata they come from will be loaded on first await and then cached thereafter. 129 | - S3 Bucket.objects object now works and has been asyncified. Examples here - https://aioboto3.readthedocs.io/en/latest/usage.html#s3-resource-objects 130 | 131 | 132 | Amazon S3 Client-Side Encryption 133 | -------------------------------- 134 | 135 | Boto3 doesn't support AWS client-side encryption so until they do I've added basic support for it. Docs here CSE_ 136 | 137 | CSE requires the python ``cryptography`` library so if you do ``pip install aioboto3[s3cse]`` that'll also include cryptography. 138 | 139 | This library currently supports client-side encryption using KMS-Managed master keys performing envelope encryption 140 | using either AES/CBC/PKCS5Padding or preferably AES/GCM/NoPadding. The files generated are compatible with the Java Encryption SDK 141 | so I will assume they are compatible with the Ruby, PHP, Go and C++ libraries as well. 142 | 143 | Non-KMS managed keys are not yet supported but if you have use of that, raise an issue and i'll look into it. 144 | 145 | 146 | 147 | Documentation 148 | ------------- 149 | 150 | Docs are here - https://aioboto3.readthedocs.io/en/latest/ 151 | 152 | Examples here - https://aioboto3.readthedocs.io/en/latest/usage.html 153 | 154 | 155 | Features 156 | ======== 157 | 158 | * Closely mimics the usage of boto3. 159 | 160 | Todo 161 | ==== 162 | 163 | * More examples 164 | * Set up docs 165 | * Look into monkey-patching the aws xray sdk to be more async if it needs to be. 166 | 167 | 168 | Credits 169 | ------- 170 | 171 | This package was created with Cookiecutter_ and the `audreyr/cookiecutter-pypackage`_ project template. 172 | It also makes use of the aiobotocore_ and boto3_ libraries. All the credit goes to them, this is mainly a wrapper with some examples. 173 | 174 | .. _aiobotocore: https://github.com/aio-libs/aiobotocore 175 | .. _boto3: https://github.com/boto/boto3 176 | .. _Cookiecutter: https://github.com/audreyr/cookiecutter 177 | .. _`audreyr/cookiecutter-pypackage`: https://github.com/audreyr/cookiecutter-pypackage 178 | .. _CSE: https://aioboto3.readthedocs.io/en/latest/cse.html 179 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # 4 | # aioboto3 documentation build configuration file, created by 5 | # sphinx-quickstart on Tue Jul 9 22:26:36 2013. 6 | # 7 | # This file is execfile()d with the current directory set to its 8 | # containing dir. 9 | # 10 | # Note that not all possible configuration values are present in this 11 | # autogenerated file. 12 | # 13 | # All configuration values have a default; values that are commented out 14 | # serve to show the default. 15 | 16 | import sys 17 | import os 18 | 19 | # If extensions (or modules to document with autodoc) are in another 20 | # directory, add these directories to sys.path here. If the directory is 21 | # relative to the documentation root, use os.path.abspath to make it 22 | # absolute, like shown here. 23 | # sys.path.insert(0, os.path.abspath('..')) 24 | 25 | # Get the project root dir, which is the parent dir of this 26 | cwd = os.getcwd() 27 | project_root = os.path.dirname(cwd) 28 | 29 | # Insert the project root dir as the first element in the PYTHONPATH. 30 | # This lets us ensure that the source package is imported, and that its 31 | # version is used. 32 | sys.path.insert(0, project_root) 33 | 34 | import aioboto3 35 | 36 | # -- General configuration --------------------------------------------- 37 | 38 | # If your documentation needs a minimal Sphinx version, state it here. 39 | #needs_sphinx = '1.0' 40 | 41 | # Add any Sphinx extension module names here, as strings. They can be 42 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. 43 | extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'sphinx_autodoc_typehints'] 44 | 45 | # Add any paths that contain templates here, relative to this directory. 46 | templates_path = ['_templates'] 47 | 48 | # The suffix of source filenames. 49 | source_suffix = '.rst' 50 | 51 | # The encoding of source files. 52 | #source_encoding = 'utf-8-sig' 53 | 54 | # The master toctree document. 55 | master_doc = 'index' 56 | 57 | # General information about the project. 58 | project = u'Async AWS SDK for Python' 59 | copyright = u"2017, Terry Cain" 60 | 61 | # The version info for the project you're documenting, acts as replacement 62 | # for |version| and |release|, also used in various other places throughout 63 | # the built documents. 64 | # 65 | # The short X.Y version. 66 | version = aioboto3.__version__ 67 | # The full version, including alpha/beta/rc tags. 68 | release = aioboto3.__version__ 69 | 70 | # The language for content autogenerated by Sphinx. Refer to documentation 71 | # for a list of supported languages. 72 | #language = None 73 | 74 | # There are two options for replacing |today|: either, you set today to 75 | # some non-false value, then it is used: 76 | #today = '' 77 | # Else, today_fmt is used as the format for a strftime call. 78 | #today_fmt = '%B %d, %Y' 79 | 80 | # List of patterns, relative to source directory, that match files and 81 | # directories to ignore when looking for source files. 82 | exclude_patterns = ['_build'] 83 | 84 | # The reST default role (used for this markup: `text`) to use for all 85 | # documents. 86 | #default_role = None 87 | 88 | # If true, '()' will be appended to :func: etc. cross-reference text. 89 | #add_function_parentheses = True 90 | 91 | # If true, the current module name will be prepended to all description 92 | # unit titles (such as .. function::). 93 | #add_module_names = True 94 | 95 | # If true, sectionauthor and moduleauthor directives will be shown in the 96 | # output. They are ignored by default. 97 | #show_authors = False 98 | 99 | # The name of the Pygments (syntax highlighting) style to use. 100 | pygments_style = 'sphinx' 101 | 102 | # A list of ignored prefixes for module index sorting. 103 | #modindex_common_prefix = [] 104 | 105 | # If true, keep warnings as "system message" paragraphs in the built 106 | # documents. 107 | #keep_warnings = False 108 | 109 | 110 | # -- Options for HTML output ------------------------------------------- 111 | 112 | # The theme to use for HTML and HTML Help pages. See the documentation for 113 | # a list of builtin themes. 114 | html_theme = 'default' 115 | 116 | # Theme options are theme-specific and customize the look and feel of a 117 | # theme further. For a list of options available for each theme, see the 118 | # documentation. 119 | #html_theme_options = {} 120 | 121 | # Add any paths that contain custom themes here, relative to this directory. 122 | #html_theme_path = [] 123 | 124 | # The name for this set of Sphinx documents. If None, it defaults to 125 | # " v documentation". 126 | #html_title = None 127 | 128 | # A shorter title for the navigation bar. Default is the same as 129 | # html_title. 130 | #html_short_title = None 131 | 132 | # The name of an image file (relative to this directory) to place at the 133 | # top of the sidebar. 134 | #html_logo = None 135 | 136 | # The name of an image file (within the static path) to use as favicon 137 | # of the docs. This file should be a Windows icon file (.ico) being 138 | # 16x16 or 32x32 pixels large. 139 | #html_favicon = None 140 | 141 | # If not '', a 'Last updated on:' timestamp is inserted at every page 142 | # bottom, using the given strftime format. 143 | #html_last_updated_fmt = '%b %d, %Y' 144 | 145 | # If true, SmartyPants will be used to convert quotes and dashes to 146 | # typographically correct entities. 147 | #html_use_smartypants = True 148 | 149 | # Custom sidebar templates, maps document names to template names. 150 | #html_sidebars = {} 151 | 152 | # Additional templates that should be rendered to pages, maps page names 153 | # to template names. 154 | #html_additional_pages = {} 155 | 156 | # If false, no module index is generated. 157 | #html_domain_indices = True 158 | 159 | # If false, no index is generated. 160 | #html_use_index = True 161 | 162 | # If true, the index is split into individual pages for each letter. 163 | #html_split_index = False 164 | 165 | # If true, links to the reST sources are added to the pages. 166 | #html_show_sourcelink = True 167 | 168 | # If true, "Created using Sphinx" is shown in the HTML footer. 169 | # Default is True. 170 | #html_show_sphinx = True 171 | 172 | # If true, "(C) Copyright ..." is shown in the HTML footer. 173 | # Default is True. 174 | #html_show_copyright = True 175 | 176 | # If true, an OpenSearch description file will be output, and all pages 177 | # will contain a tag referring to it. The value of this option 178 | # must be the base URL from which the finished HTML is served. 179 | #html_use_opensearch = '' 180 | 181 | # This is the file name suffix for HTML files (e.g. ".xhtml"). 182 | #html_file_suffix = None 183 | 184 | # Output file base name for HTML help builder. 185 | htmlhelp_basename = 'aioboto3doc' 186 | 187 | 188 | # -- Options for LaTeX output ------------------------------------------ 189 | 190 | latex_elements = { 191 | # The paper size ('letterpaper' or 'a4paper'). 192 | #'papersize': 'letterpaper', 193 | 194 | # The font size ('10pt', '11pt' or '12pt'). 195 | #'pointsize': '10pt', 196 | 197 | # Additional stuff for the LaTeX preamble. 198 | #'preamble': '', 199 | } 200 | 201 | # Grouping the document tree into LaTeX files. List of tuples 202 | # (source start file, target name, title, author, documentclass 203 | # [howto/manual]). 204 | latex_documents = [ 205 | ('index', 'aioboto3.tex', 206 | u'Async AWS SDK for Python Documentation', 207 | u'Terry Cain', 'manual'), 208 | ] 209 | 210 | # The name of an image file (relative to this directory) to place at 211 | # the top of the title page. 212 | #latex_logo = None 213 | 214 | # For "manual" documents, if this is true, then toplevel headings 215 | # are parts, not chapters. 216 | #latex_use_parts = False 217 | 218 | # If true, show page references after internal links. 219 | #latex_show_pagerefs = False 220 | 221 | # If true, show URL addresses after external links. 222 | #latex_show_urls = False 223 | 224 | # Documents to append as an appendix to all manuals. 225 | #latex_appendices = [] 226 | 227 | # If false, no module index is generated. 228 | #latex_domain_indices = True 229 | 230 | 231 | # -- Options for manual page output ------------------------------------ 232 | 233 | # One entry per manual page. List of tuples 234 | # (source start file, name, description, authors, manual section). 235 | man_pages = [ 236 | ('index', 'aioboto3', 237 | u'Async AWS SDK for Python Documentation', 238 | [u'Terry Cain'], 1) 239 | ] 240 | 241 | # If true, show URL addresses after external links. 242 | #man_show_urls = False 243 | 244 | 245 | # -- Options for Texinfo output ---------------------------------------- 246 | 247 | # Grouping the document tree into Texinfo files. List of tuples 248 | # (source start file, target name, title, author, 249 | # dir menu entry, description, category) 250 | texinfo_documents = [ 251 | ('index', 'aioboto3', 252 | u'Async AWS SDK for Python Documentation', 253 | u'Terry Cain', 254 | 'aioboto3', 255 | 'One line description of project.', 256 | 'Miscellaneous'), 257 | ] 258 | 259 | # Documents to append as an appendix to all manuals. 260 | #texinfo_appendices = [] 261 | 262 | # If false, no module index is generated. 263 | #texinfo_domain_indices = True 264 | 265 | # How to display URL addresses: 'footnote', 'no', or 'inline'. 266 | #texinfo_show_urls = 'footnote' 267 | 268 | # If true, do not generate a @detailmenu in the "Top" node's menu. 269 | #texinfo_no_detailmenu = False 270 | 271 | # autodoc_mock_imports = ['aiohttp'] 272 | -------------------------------------------------------------------------------- /aioboto3/session.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | This class essentially overrides the boto3 session init, passing in 4 | an async botocore session 5 | """ 6 | 7 | import copy 8 | 9 | import boto3.session 10 | import boto3.resources.base 11 | import boto3.utils 12 | from boto3.session import DataNotFoundError, UnknownServiceError 13 | from boto3.exceptions import ResourceNotExistsError, UnknownAPIVersionError 14 | 15 | import aiobotocore.session 16 | from aiobotocore.config import AioConfig 17 | from botocore.exceptions import NoCredentialsError 18 | 19 | from aioboto3.resources.factory import AIOBoto3ResourceFactory 20 | 21 | 22 | class Session(boto3.session.Session): 23 | """ 24 | A session stores configuration state and allows you to create service 25 | clients and resources. 26 | 27 | :type aws_access_key_id: string 28 | :param aws_access_key_id: AWS access key ID 29 | :type aws_secret_access_key: string 30 | :param aws_secret_access_key: AWS secret access key 31 | :type aws_session_token: string 32 | :param aws_session_token: AWS temporary session token 33 | :type region_name: string 34 | :param region_name: Default region when creating new connections 35 | :type botocore_session: aiobotocore.session.AioSession 36 | :param botocore_session: Use this AioBotocore session instead of creating 37 | a new default one. 38 | :type profile_name: string 39 | :param profile_name: The name of a profile to use. If not given, then 40 | the default profile is used. 41 | :type aws_account_id: string 42 | :param aws_account_id: AWS account ID 43 | """ 44 | def __init__( 45 | self, 46 | aws_access_key_id=None, 47 | aws_secret_access_key=None, 48 | aws_session_token=None, 49 | region_name=None, 50 | botocore_session=None, 51 | profile_name=None, 52 | aws_account_id=None, 53 | ): 54 | if botocore_session is not None: 55 | self._session = botocore_session 56 | else: 57 | # Create a new default session 58 | self._session = aiobotocore.session.get_session() 59 | 60 | # Setup custom user-agent string if it isn't already customized 61 | if self._session.user_agent_name == 'Botocore': 62 | botocore_info = f'Botocore/{self._session.user_agent_version}' 63 | if self._session.user_agent_extra: 64 | self._session.user_agent_extra += ' ' + botocore_info 65 | else: 66 | self._session.user_agent_extra = botocore_info 67 | self._session.user_agent_name = 'Boto3' 68 | self._session.user_agent_version = boto3.__version__ 69 | 70 | if profile_name is not None: 71 | self._session.set_config_variable('profile', profile_name) 72 | 73 | credentials_kwargs = { 74 | "aws_access_key_id": aws_access_key_id, 75 | "aws_secret_access_key": aws_secret_access_key, 76 | "aws_session_token": aws_session_token, 77 | "aws_account_id": aws_account_id, 78 | } 79 | 80 | if any(credentials_kwargs.values()): 81 | if self._account_id_set_without_credentials(**credentials_kwargs): 82 | raise NoCredentialsError() 83 | 84 | if aws_account_id is None: 85 | del credentials_kwargs["aws_account_id"] 86 | 87 | # This only works as dictionaries happen to be ordered. 88 | self._session.set_credentials(*credentials_kwargs.values()) 89 | 90 | if region_name is not None: 91 | self._session.set_config_variable('region', region_name) 92 | 93 | self.resource_factory = AIOBoto3ResourceFactory( 94 | self._session.get_component('event_emitter') 95 | ) 96 | self._setup_loader() 97 | self._register_default_handlers() 98 | 99 | def resource( 100 | self, 101 | service_name, 102 | region_name=None, 103 | api_version=None, 104 | use_ssl=True, 105 | verify=None, 106 | endpoint_url=None, 107 | aws_access_key_id=None, 108 | aws_secret_access_key=None, 109 | aws_session_token=None, 110 | config=None 111 | ): 112 | try: 113 | resource_model = self._loader.load_service_model( 114 | service_name, 'resources-1', api_version 115 | ) 116 | except UnknownServiceError: 117 | available = self.get_available_resources() 118 | has_low_level_client = ( 119 | service_name in self.get_available_services() 120 | ) 121 | raise ResourceNotExistsError( 122 | service_name, available, has_low_level_client 123 | ) 124 | except DataNotFoundError: 125 | # This is because we've provided an invalid API version. 126 | available_api_versions = self._loader.list_api_versions( 127 | service_name, 'resources-1' 128 | ) 129 | raise UnknownAPIVersionError( 130 | service_name, api_version, ', '.join(available_api_versions) 131 | ) 132 | 133 | if api_version is None: 134 | # Even though botocore's load_service_model() can handle 135 | # using the latest api_version if not provided, we need 136 | # to track this api_version in boto3 in order to ensure 137 | # we're pairing a resource model with a client model 138 | # of the same API version. It's possible for the latest 139 | # API version of a resource model in boto3 to not be 140 | # the same API version as a service model in botocore. 141 | # So we need to look up the api_version if one is not 142 | # provided to ensure we load the same API version of the 143 | # client. 144 | # 145 | # Note: This is relying on the fact that 146 | # loader.load_service_model(..., api_version=None) 147 | # and loader.determine_latest_version(..., 'resources-1') 148 | # both load the same api version of the file. 149 | api_version = self._loader.determine_latest_version( 150 | service_name, 'resources-1' 151 | ) 152 | 153 | # Creating a new resource instance requires the low-level client 154 | # and service model, the resource version and resource JSON data. 155 | # We pass these to the factory and get back a class, which is 156 | # instantiated on top of the low-level client. 157 | if config is not None: 158 | if config.user_agent_extra is None: 159 | config = copy.deepcopy(config) 160 | config.user_agent_extra = 'Resource' 161 | else: 162 | config = AioConfig(user_agent_extra='Resource') 163 | 164 | # client = blah part has been moved into a dodgy context class 165 | return ResourceCreatorContext(self, service_name, region_name, api_version, 166 | use_ssl, verify, endpoint_url, aws_access_key_id, 167 | aws_secret_access_key, aws_session_token, config, 168 | resource_model) 169 | 170 | def _register_default_handlers(self): 171 | # S3 customizations 172 | self._session.register( 173 | 'creating-client-class.s3', 174 | boto3.utils.lazy_call( 175 | 'aioboto3.s3.inject.inject_s3_transfer_methods' 176 | ), 177 | ) 178 | self._session.register( 179 | 'creating-resource-class.s3.Bucket', 180 | boto3.utils.lazy_call('aioboto3.s3.inject.inject_bucket_methods'), 181 | ) 182 | self._session.register( 183 | 'creating-resource-class.s3.Object', 184 | boto3.utils.lazy_call('boto3.s3.inject.inject_object_methods'), 185 | ) 186 | self._session.register( 187 | 'creating-resource-class.s3.ObjectSummary', 188 | boto3.utils.lazy_call( 189 | 'aioboto3.s3.inject.inject_object_summary_methods' 190 | ), 191 | ) 192 | 193 | # DynamoDb customizations 194 | self._session.register( 195 | 'creating-resource-class.dynamodb', 196 | boto3.utils.lazy_call( 197 | 'boto3.dynamodb.transform.register_high_level_interface' 198 | ), 199 | unique_id='high-level-dynamodb', 200 | ) 201 | self._session.register( 202 | 'creating-resource-class.dynamodb.Table', 203 | boto3.utils.lazy_call( 204 | 'aioboto3.dynamodb.table.register_table_methods' 205 | ), 206 | unique_id='high-level-dynamodb-table', 207 | ) 208 | 209 | # EC2 Customizations 210 | self._session.register( 211 | 'creating-resource-class.ec2.ServiceResource', 212 | boto3.utils.lazy_call('boto3.ec2.createtags.inject_create_tags') 213 | ) 214 | 215 | self._session.register( 216 | 'creating-resource-class.ec2.Instance', 217 | boto3.utils.lazy_call( 218 | 'boto3.ec2.deletetags.inject_delete_tags', 219 | event_emitter=self.events 220 | ), 221 | ) 222 | 223 | 224 | class ResourceCreatorContext(object): 225 | def __init__(self, session, service_name, region_name, api_version, use_ssl, verify, 226 | endpoint_url, aws_access_key_id, aws_secret_access_key, aws_session_token, 227 | config, resource_model): 228 | self.service_name = service_name 229 | self.resource_model = resource_model 230 | self.session = session 231 | self.api_version = api_version 232 | self.cls = None 233 | self.client = session.client( 234 | service_name, region_name=region_name, api_version=api_version, 235 | use_ssl=use_ssl, verify=verify, endpoint_url=endpoint_url, 236 | aws_access_key_id=aws_access_key_id, 237 | aws_secret_access_key=aws_secret_access_key, 238 | aws_session_token=aws_session_token, config=config) 239 | 240 | async def __aenter__(self): 241 | client = await self.client.__aenter__() 242 | service_model = client.meta.service_model 243 | 244 | # Create a ServiceContext object to serve as a reference to 245 | # important read-only information about the general service. 246 | service_context = boto3.utils.ServiceContext( 247 | service_name=self.service_name, 248 | service_model=service_model, 249 | resource_json_definitions=self.resource_model['resources'], 250 | service_waiter_model=boto3.utils.LazyLoadedWaiterModel( 251 | self.session._session, self.service_name, self.api_version 252 | ), 253 | ) 254 | 255 | # Create the service resource class. 256 | self.cls = (await self.session.resource_factory.load_from_definition( 257 | resource_name=self.service_name, 258 | single_resource_json_definition=self.resource_model['service'], 259 | service_context=service_context 260 | ))(client=client) 261 | 262 | return self.cls 263 | 264 | async def __aexit__(self, exc_type, exc, tb): 265 | await self.cls.__aexit__(exc_type, exc, tb) 266 | -------------------------------------------------------------------------------- /aioboto3/resources/factory.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from functools import partial 3 | 4 | from boto3.resources.factory import ResourceFactory 5 | from boto3.resources.model import ResourceModel 6 | from boto3.resources.base import ResourceMeta 7 | from boto3.docs import docstring 8 | from boto3.exceptions import ResourceLoadException 9 | from boto3.resources.factory import build_identifiers 10 | 11 | from aioboto3.resources.collection import AIOCollectionFactory 12 | from aioboto3.resources.action import AIOServiceAction, AIOWaiterAction 13 | from aioboto3.resources.base import AIOBoto3ServiceResource 14 | 15 | logger = logging.getLogger(__name__) 16 | 17 | 18 | class AIOBoto3ResourceFactory(ResourceFactory): 19 | # noinspection PyMissingConstructor 20 | def __init__(self, emitter): 21 | self._collection_factory = AIOCollectionFactory() 22 | self._emitter = emitter 23 | 24 | async def load_from_definition(self, resource_name, 25 | single_resource_json_definition, service_context): 26 | logger.debug('Loading %s:%s', service_context.service_name, 27 | resource_name) 28 | 29 | # Using the loaded JSON create a ResourceModel object. 30 | resource_model = ResourceModel( 31 | resource_name, 32 | single_resource_json_definition, 33 | service_context.resource_json_definitions 34 | ) 35 | 36 | # Do some renaming of the shape if there was a naming collision 37 | # that needed to be accounted for. 38 | shape = None 39 | if resource_model.shape: 40 | shape = service_context.service_model.shape_for( 41 | resource_model.shape 42 | ) 43 | resource_model.load_rename_map(shape) 44 | 45 | # Set some basic info 46 | meta = ResourceMeta( 47 | service_context.service_name, resource_model=resource_model) 48 | attrs = { 49 | 'meta': meta, 50 | } 51 | 52 | # Create and load all of attributes of the resource class based 53 | # on the models. 54 | 55 | # Identifiers 56 | self._load_identifiers( 57 | attrs=attrs, 58 | meta=meta, 59 | resource_name=resource_name, 60 | resource_model=resource_model 61 | ) 62 | 63 | # Load/Reload actions 64 | self._load_actions( 65 | attrs=attrs, 66 | resource_name=resource_name, 67 | resource_model=resource_model, 68 | service_context=service_context 69 | ) 70 | 71 | # Attributes that get auto-loaded 72 | self._load_attributes( 73 | attrs=attrs, 74 | meta=meta, 75 | resource_name=resource_name, 76 | resource_model=resource_model, 77 | service_context=service_context) 78 | 79 | # Collections and their corresponding methods 80 | self._load_collections( 81 | attrs=attrs, 82 | resource_model=resource_model, 83 | service_context=service_context) 84 | 85 | # References and Subresources 86 | self._load_has_relations( 87 | attrs=attrs, 88 | resource_name=resource_name, 89 | resource_model=resource_model, 90 | service_context=service_context 91 | ) 92 | 93 | # Waiter resource actions 94 | self._load_waiters( 95 | attrs=attrs, 96 | resource_name=resource_name, 97 | resource_model=resource_model, 98 | service_context=service_context 99 | ) 100 | 101 | # Create the name based on the requested service and resource 102 | cls_name = resource_name 103 | if service_context.service_name == resource_name: 104 | cls_name = 'ServiceResource' 105 | cls_name = service_context.service_name + '.' + cls_name 106 | 107 | base_classes = [AIOBoto3ServiceResource] 108 | if self._emitter is not None: 109 | await self._emitter.emit( 110 | 'creating-resource-class.%s' % cls_name, 111 | class_attributes=attrs, 112 | base_classes=base_classes, 113 | service_context=service_context 114 | ) 115 | return type(str(cls_name), tuple(base_classes), attrs) 116 | 117 | def _create_autoload_property( 118 | factory_self, 119 | resource_name, 120 | name, 121 | snake_cased, 122 | member_model, 123 | service_context 124 | ): 125 | """ 126 | Creates a new property on the resource to lazy-load its value 127 | via the resource's ``load`` method (if it exists). 128 | """ 129 | # The property loader will check to see if this resource has already 130 | # been loaded and return the cached value if possible. If not, then 131 | # it first checks to see if it CAN be loaded (raise if not), then 132 | # calls the load before returning the value. 133 | async def property_loader(self): 134 | if self.meta.data is None: 135 | if hasattr(self, 'load'): 136 | await self.load() 137 | else: 138 | raise ResourceLoadException( 139 | '{0} has no load method'.format( 140 | self.__class__.__name__)) 141 | 142 | return self.meta.data.get(name) 143 | 144 | property_loader.__name__ = str(snake_cased) 145 | property_loader.__doc__ = docstring.AttributeDocstring( 146 | service_name=service_context.service_name, 147 | resource_name=resource_name, 148 | attr_name=snake_cased, 149 | event_emitter=factory_self._emitter, 150 | attr_model=member_model, 151 | include_signature=False 152 | ) 153 | 154 | return property(property_loader) 155 | 156 | def _create_waiter( 157 | factory_self, resource_waiter_model, resource_name, service_context 158 | ): 159 | """ 160 | Creates a new wait method for each resource where both a waiter and 161 | resource model is defined. 162 | """ 163 | waiter = AIOWaiterAction( 164 | resource_waiter_model, 165 | waiter_resource_name=resource_waiter_model.name 166 | ) 167 | 168 | async def do_waiter(self, *args, **kwargs): 169 | await waiter(self, *args, **kwargs) 170 | 171 | do_waiter.__name__ = str(resource_waiter_model.name) 172 | do_waiter.__doc__ = docstring.ResourceWaiterDocstring( 173 | resource_name=resource_name, 174 | event_emitter=factory_self._emitter, 175 | service_model=service_context.service_model, 176 | resource_waiter_model=resource_waiter_model, 177 | service_waiter_model=service_context.service_waiter_model, 178 | include_signature=False 179 | ) 180 | return do_waiter 181 | 182 | def _create_class_partial( 183 | factory_self, subresource_model, resource_name, service_context 184 | ): 185 | """ 186 | Creates a new method which acts as a functools.partial, passing 187 | along the instance's low-level `client` to the new resource 188 | class' constructor. 189 | """ 190 | name = subresource_model.resource.type 191 | 192 | async def create_resource(self, *args, **kwargs): 193 | # We need a new method here because we want access to the 194 | # instance's client. 195 | positional_args = [] 196 | 197 | # We lazy-load the class to handle circular references. 198 | json_def = service_context.resource_json_definitions.get(name, {}) 199 | resource_cls = await factory_self.load_from_definition( 200 | resource_name=name, 201 | single_resource_json_definition=json_def, 202 | service_context=service_context 203 | ) 204 | 205 | # Assumes that identifiers are in order, which lets you do 206 | # e.g. ``sqs.Queue('foo').Message('bar')`` to create a new message 207 | # linked with the ``foo`` queue and which has a ``bar`` receipt 208 | # handle. If we did kwargs here then future positional arguments 209 | # would lead to failure. 210 | identifiers = subresource_model.resource.identifiers 211 | if identifiers is not None: 212 | for identifier, value in build_identifiers(identifiers, self): 213 | positional_args.append(value) 214 | 215 | return partial( 216 | resource_cls, *positional_args, client=self.meta.client 217 | )(*args, **kwargs) 218 | 219 | create_resource.__name__ = str(name) 220 | create_resource.__doc__ = docstring.SubResourceDocstring( 221 | resource_name=resource_name, 222 | sub_resource_model=subresource_model, 223 | service_model=service_context.service_model, 224 | include_signature=False 225 | ) 226 | return create_resource 227 | 228 | def _create_action( 229 | factory_self, 230 | action_model, 231 | resource_name, 232 | service_context, 233 | is_load=False 234 | ): 235 | """ 236 | Creates a new method which makes a request to the underlying 237 | AWS service. 238 | """ 239 | # Create the action in in this closure but before the ``do_action`` 240 | # method below is invoked, which allows instances of the resource 241 | # to share the ServiceAction instance. 242 | action = AIOServiceAction( 243 | action_model, factory=factory_self, service_context=service_context 244 | ) 245 | 246 | # A resource's ``load`` method is special because it sets 247 | # values on the resource instead of returning the response. 248 | if is_load: 249 | # We need a new method here because we want access to the 250 | # instance via ``self``. 251 | async def do_action(self, *args, **kwargs): 252 | response = await action(self, *args, **kwargs) 253 | self.meta.data = response 254 | 255 | # Create the docstring for the load/reload mehtods. 256 | lazy_docstring = docstring.LoadReloadDocstring( 257 | action_name=action_model.name, 258 | resource_name=resource_name, 259 | event_emitter=factory_self._emitter, 260 | load_model=action_model, 261 | service_model=service_context.service_model, 262 | include_signature=False 263 | ) 264 | else: 265 | # We need a new method here because we want access to the 266 | # instance via ``self``. 267 | async def do_action(self, *args, **kwargs): 268 | response = await action(self, *args, **kwargs) 269 | 270 | if hasattr(self, 'load'): 271 | # Clear cached data. It will be reloaded the next 272 | # time that an attribute is accessed. 273 | # TODO: Make this configurable in the future? 274 | self.meta.data = None 275 | 276 | return response 277 | 278 | lazy_docstring = docstring.ActionDocstring( 279 | resource_name=resource_name, 280 | event_emitter=factory_self._emitter, 281 | action_model=action_model, 282 | service_model=service_context.service_model, 283 | include_signature=False 284 | ) 285 | 286 | do_action.__name__ = str(action_model.name) 287 | do_action.__doc__ = lazy_docstring 288 | return do_action 289 | 290 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2015-2016 Nikolai Novik 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. -------------------------------------------------------------------------------- /docs/usage.rst: -------------------------------------------------------------------------------- 1 | ===== 2 | Usage 3 | ===== 4 | 5 | Ok as the usage nearly mimics that of boto3, I thought it best just to throw lots of examples at you instead. 6 | The moral of the story is just prefix boto3 stuff with await. 7 | 8 | This library "should" work with Python3.3/3.4 but I havent tested it, so try yield from if you want. 9 | 10 | Slight differences 11 | ------------------ 12 | 13 | ``aioboto3.resource`` will return a boto3 like resource object, but it will also have an awaitable ``.close()`` and also 14 | has ``__aenter__`` and ``__aexit__`` which allows you to use the ``async with`` syntax. 15 | 16 | Service resources like ``s3.Bucket`` need to be created using await now, e.g. ``bucket = await s3_resource.Bucket('somebucket')`` 17 | 18 | 19 | DynamoDB Examples 20 | ----------------- 21 | 22 | Put an item into a DynamoDB table, then query it using the nice ``Key().eq()`` abstraction. 23 | 24 | .. code-block:: python3 25 | 26 | import asyncio 27 | import aioboto3 28 | from boto3.dynamodb.conditions import Key 29 | 30 | 31 | async def main(): 32 | session = aioboto3.Session() 33 | async with session.resource('dynamodb', region_name='eu-central-1') as dynamo_resource: 34 | table = await dynamo_resource.Table('test_table') 35 | 36 | await table.put_item( 37 | Item={'pk': 'test1', 'col1': 'some_data'} 38 | ) 39 | 40 | result = await table.query( 41 | KeyConditionExpression=Key('pk').eq('test1') 42 | ) 43 | 44 | print(result['Items']) 45 | 46 | loop = asyncio.get_event_loop() 47 | loop.run_until_complete(main()) 48 | 49 | # Outputs: 50 | # [{'col1': 'some_data', 'pk': 'test1'}] 51 | 52 | 53 | Use the batch writer to take care of dynamodb writing retries etc... 54 | 55 | .. code-block:: python3 56 | 57 | import asyncio 58 | import aioboto3 59 | from boto3.dynamodb.conditions import Key 60 | 61 | 62 | async def main(): 63 | session = aioboto3.Session() 64 | async with session.resource('dynamodb', region_name='eu-central-1') as dynamo_resource: 65 | table = await dynamo_resource.Table('test_table') 66 | 67 | # As the default batch size is 25, all of these will be written in one batch 68 | async with table.batch_writer() as dynamo_writer: 69 | await dynamo_writer.put_item(Item={'pk': 'test1', 'col1': 'some_data'}) 70 | await dynamo_writer.put_item(Item={'pk': 'test2', 'col1': 'some_data'}) 71 | await dynamo_writer.put_item(Item={'pk': 'test3', 'col1': 'some_data'}) 72 | await dynamo_writer.put_item(Item={'pk': 'test4', 'col1': 'some_data'}) 73 | await dynamo_writer.put_item(Item={'pk': 'test5', 'col1': 'some_data'}) 74 | 75 | result = await table.scan() 76 | 77 | print(result['Count']) 78 | 79 | loop = asyncio.get_event_loop() 80 | loop.run_until_complete(main()) 81 | 82 | # Outputs: 83 | # 5 84 | 85 | 86 | The ``batch_writer()`` can take a keyword argument of ``flush_amount`` which will change the desired flush amount and a keyword argument 87 | of ``on_exit_loop_sleep``. The ``on_exit_loop_sleep`` argument will add an async sleep in the flush loop when you exit the context manager. 88 | 89 | 90 | S3 Examples 91 | ----------- 92 | 93 | Here are some examples of uploading and streaming a file from S3, serving via aiohttp. 94 | 95 | Upload 96 | ~~~~~~ 97 | 98 | Here we upload from a file object and stream it from a file descriptor. 99 | 100 | .. code-block:: python3 101 | 102 | async def upload( 103 | suite: str, 104 | release: str, 105 | filename: str, 106 | staging_path: Path, 107 | bucket: str, 108 | ) -> str: 109 | blob_s3_key = f"{suite}/{release}/{filename}" 110 | 111 | session = aioboto3.Session() 112 | async with session.client("s3") as s3: 113 | try: 114 | with staging_path.open("rb") as spfp: 115 | LOG.info(f"Uploading {blob_s3_key} to s3") 116 | await s3.upload_fileobj(spfp, bucket, blob_s3_key) 117 | LOG.info(f"Finished Uploading {blob_s3_key} to s3") 118 | except Exception as e: 119 | LOG.error(f"Unable to s3 upload {staging_path} to {blob_s3_key}: {e} ({type(e)})") 120 | return "" 121 | 122 | return f"s3://{blob_s3_key}" 123 | 124 | Streaming Download 125 | ~~~~~~~~~~~~~~~~~~ 126 | 127 | Here we pull the object from S3 in chunks and serve it out to a HTTP request via `aiohttp `_ 128 | 129 | .. code-block:: python3 130 | 131 | from aiohttp import web 132 | from multidict import MultiDict 133 | 134 | 135 | async def serve_blob( 136 | suite: str, 137 | release: str, 138 | filename: str, 139 | bucket: str, 140 | request: web.Request, 141 | chunk_size: int = 69 * 1024 142 | ) -> web.StreamResponse: 143 | blob_s3_key = f"{suite}/{release}/{filename}" 144 | 145 | session = aioboto3.Session() 146 | async with session.client("s3") as s3: 147 | LOG.info(f"Serving {bucket} {blob_s3_key}") 148 | s3_ob = await s3.get_object(Bucket=bucket, Key=blob_s3_key) 149 | 150 | ob_info = s3_ob["ResponseMetadata"]["HTTPHeaders"] 151 | resp = web.StreamResponse( 152 | headers=MultiDict( 153 | { 154 | "CONTENT-DISPOSITION": ( 155 | f"attachment; filename='{filename}'" 156 | ), 157 | "Content-Type": ob_info["content-type"], 158 | } 159 | ) 160 | ) 161 | resp.content_type = ob_info["content-type"] 162 | resp.content_length = ob_info["content-length"] 163 | await resp.prepare(request) 164 | 165 | stream = s3_ob["Body"] 166 | while file_data := await stream.read(chunk_size): 167 | await resp.write(file_data) 168 | 169 | return resp 170 | 171 | S3 Resource Objects 172 | ~~~~~~~~~~~~~~~~~~~ 173 | 174 | The S3 Bucket object also works but its methods have been asyncified. E.g. 175 | 176 | .. code-block:: python3 177 | 178 | import aioboto3 179 | 180 | 181 | async def main(): 182 | session = aioboto3.Session() 183 | async with session.resource("s3") as s3: 184 | 185 | bucket = await s3.Bucket('mybucket') 186 | async for s3_object in bucket.objects.all(): 187 | print(s3_object) 188 | 189 | async for s3_object in bucket.objects.filter(Prefix='someprefix/'): 190 | print(s3_object) 191 | 192 | await bucket.objects.all().delete() 193 | 194 | # or 195 | await bucket.objects.filter(Prefix='test/').delete() 196 | 197 | 198 | Misc 199 | ---- 200 | 201 | Clients 202 | ~~~~~~~ 203 | 204 | As you can see, it also works for standard client connections too. 205 | 206 | .. code-block:: python3 207 | 208 | import asyncio 209 | import aioboto3 210 | 211 | 212 | async def main(): 213 | session = aioboto3.Session() 214 | async with session.client('ssm', region_name='eu-central-1') as ssm_client: 215 | result = await ssm_client.describe_parameters() 216 | 217 | print(result['Parameters']) 218 | 219 | 220 | loop = asyncio.get_event_loop() 221 | loop.run_until_complete(main()) 222 | 223 | # Outputs: 224 | # [] 225 | 226 | Retries 227 | ~~~~~~~ 228 | 229 | Use **AioConfig**, the async extension of `Config `_. Pass it to the client as you would with standard boto3. 230 | 231 | The code below will eventually retrieve a list of 20 copies of the organization root response. It prints a list of the number of retries required to get each response. 232 | 233 | `ListRoots `_ is a convenient test function because it only reads data and it has a low `throttling limit `_ (per account, 1 per second and 2 burst). 234 | 235 | .. code-block:: python3 236 | 237 | import asyncio 238 | from aioboto3 import Session 239 | from aiobotocore.config import AioConfig 240 | 241 | try_hard = AioConfig(retries={"max_attempts": 100}) 242 | 243 | async def main(): 244 | coro = Session().client("organizations", config=try_hard) 245 | async with coro as client: 246 | resp_list = await asyncio.gather( 247 | *[client.list_roots() for _ in range(20)] 248 | ) 249 | print([r["ResponseMetadata"]["RetryAttempts"] for r in resp_list]) 250 | 251 | asyncio.run(main()) 252 | 253 | It will return a list with objects like this: 254 | 255 | .. code-block:: python3 256 | 257 | [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 4, 6, 4, 4, 4, 4, 4, 5, 5] 258 | 259 | 260 | The list is ordered by response timestamp, earliest first. This time the first 10 responses don't retry. The next 10 responses each took between 4 and 6 retries. 261 | 262 | The default `max_retries` value is 4. That's not enough for 20 concurrent requests. If you remove the `config` parameter, the code will surely fail like this. 263 | 264 | ``botocore.errorfactory.TooManyRequestsException: An error occurred (TooManyRequestsException) when calling the ListRoots operation (reached max retries: 4): AWS Organizations can't complete your request because another request is already in progress. Try again later.`` 265 | 266 | AioHTTP Server Example 267 | ~~~~~~~~~~~~~~~~~~~~~~ 268 | 269 | Since aioboto3 v8.0.0+, ``.client`` and ``.resource`` are now async context managers, so it breaks some normal patterns when used with long 270 | running processes like web servers. 271 | 272 | This example creates an AsyncExitStack which essentially does ``async with`` on the context manager retuned by ``.resource``, saves the exit 273 | coroutine so that it can be called later to clean up. If you comment out and run ``_app.on_shutdown.append(shutdown_tasks)``, you'll 274 | receive a warning stating that an AioHTTP session was not closed. 275 | 276 | 277 | .. code-block:: python3 278 | 279 | """ 280 | contextlib.AsyncExitStack requires python 3.7 281 | """ 282 | import contextlib 283 | 284 | import aioboto3 285 | from boto3.dynamodb.conditions import Key 286 | from aiohttp import web 287 | 288 | routes = web.RouteTableDef() 289 | session = aioboto3.Session() 290 | 291 | 292 | @routes.get('/') 293 | async def hello(request): 294 | 295 | # request.app['table'] == Table object from boto3 docs 296 | response = await request.app['table'].query( 297 | KeyConditionExpression=Key('id').eq('lalalala') 298 | ) 299 | 300 | return web.Response(text=str(response)) 301 | 302 | 303 | async def startup_tasks(app: web.Application) -> None: 304 | context_stack = contextlib.AsyncExitStack() 305 | app['context_stack'] = context_stack 306 | 307 | app['dynamo_resource'] = await context_stack.enter_async_context( 308 | session.resource('dynamodb', region_name='eu-west-1') 309 | ) 310 | # By now, app['dynamo_resource'] will have methods like .Table() and list_tables() etc... 311 | 312 | # aioboto3 v8.0.0+ all service resources (aka Table(), Bucket() etc...) need to be awaited 313 | app['table'] = await app['dynamo_resource'].Table('somedynamodbtablename') 314 | 315 | 316 | async def shutdown_tasks(app: web.Application) -> None: 317 | await app['context_stack'].aclose() 318 | # By now, app['dynamo_resource'] would be closed 319 | 320 | 321 | _app = web.Application() 322 | _app.add_routes(routes) 323 | _app.on_startup.append(startup_tasks) 324 | _app.on_shutdown.append(shutdown_tasks) 325 | web.run_app(_app, port=8000) 326 | 327 | 328 | TODO 329 | ---- 330 | 331 | More examples 332 | -------------------------------------------------------------------------------- /CHANGELOG.rst: -------------------------------------------------------------------------------- 1 | ======= 2 | History 3 | ======= 4 | 5 | 15.5.0 (2025-10-29) 6 | ------------------- 7 | 8 | * Bumped `aiobotocore` to to 2.25.1 9 | 10 | 15.4.0 (2025-10-18) 11 | ------------------- 12 | 13 | * Bumped `aiobotocore` to to 2.25.0 14 | 15 | 15.3.0 (2025-10-18) 16 | ------------------- 17 | 18 | * Bumped `aiobotocore` to to 2.24.3 - thanks @thyhax 19 | * Contributing documentation updates - thanks @thyhax 20 | * Updated Makefile to use `uv` over `poetry` - thanks @thyhax 21 | 22 | 15.2.0 (2025-10-04) 23 | ------------------- 24 | 25 | * Bumped `aiobotocore` to to 2.24.2 - thanks @n8felton & @mweinelt 26 | * Fixed inconsistency in patched S3 download/upload method documentation - thanks @shari-ful 27 | 28 | 15.1.0 (2025-08-12) 29 | ------------------- 30 | 31 | * Bumped `aiobotocore` to to 2.24.0 - thanks @claytonparnell 32 | * Add Python 3.14 support - thanks @claytonparnell 33 | 34 | 15.0.0 (2025-06-26) 35 | ------------------- 36 | 37 | * Bumped `aiobotocore` to to 2.23.0 38 | * Dropped Python 3.8 support. 39 | 40 | 14.3.0 (2025-05-07) 41 | ------------------- 42 | 43 | * Improved `s3.upload_file*` cancellation handling - thanks @giancarloromeo 44 | 45 | 14.2.0 (2025-05-02) 46 | ------------------- 47 | 48 | * Bumped `aiobotocore` to 2.22.0 49 | * Fixes checksum use during S3 multipart upload using `s3.upload_file*` - thanks @jvrana-tetra 50 | 51 | 14.1.0 (2025-03-04) 52 | ------------------- 53 | 54 | * Bumped `aiobotocore` to 2.21.1 55 | 56 | 14.0.0 (2025-02-23) 57 | ------------------- 58 | 59 | * Bumped `aiobotocore` to 2.20.0 60 | * @zent1n0 updated upload_fileobj Callback to work with coroutines. 61 | * Fixed `aioboto3.session.Session` typehints. 62 | * Switched package manager to UV 63 | 64 | 13.4.0 (2025-01-19) 65 | ------------------- 66 | 67 | * Bumped `aiobotocore` to 2.18.0 68 | 69 | 13.3.0 (2024-12-21) 70 | ------------------- 71 | 72 | * Bumped `aiobotocore` to 2.16.0 73 | * Fixed missing `ExtraArgs` propagation on `s3.download_fileobj`. 74 | 75 | 13.2.0 (2024-10-13) 76 | ------------------- 77 | 78 | * Bumped `aiobotocore` to 2.15.2 79 | * Added `max_request_concurrency` semaphore to `s3.copy` to prevent starting potentially hundreds of uploads at once. 80 | 81 | 13.1.1 (2024-07-09) 82 | ------------------- 83 | 84 | * Removed `botocore` and `jmespath` imports and imported them indirectly via boto3 to make static analysers happier. 85 | 86 | 13.1.0 (2024-06-25) 87 | ------------------- 88 | 89 | * Bumped `aiobotocore` to 2.13.1 90 | 91 | 13.0.1 (2024-06-05) 92 | ------------------- 93 | 94 | * Fixed issue with `upload_fileobj` where uploads would be incomplete if the async file object returned less bytes than 95 | the read requested. This is noticeable when passing in async streams like that of `aiohttp`'s response content. 96 | 97 | 13.0.0 (2024-05-27) 98 | ------------------- 99 | 100 | * Bumped `aiobotocore` to 2.13.0 101 | * Added multipart download options to `s3.download_file` and `s3.download_fileobj` - thanks @kyboi 102 | * Cleaned up some docs examples - thanks @cuducos 103 | * Updated S3 transfer patched to handle `ExtraArgs` better. 104 | * Rewrote `s3.copy` to make use of `s3.copy_object` or `s3.upload_part_copy`. FYI the threshold of when to use `s3.copy` is based 105 | on if the file's size is below `Config.MultipartThreshold` which defaults to 8MiB, even though `s3.copy` can do up to 5GiB. 106 | 107 | 12.4.0 (2024-04-15) 108 | ------------------- 109 | 110 | * Bumped `aiobotocore` to 2.12.3 - thanks @zikphil 111 | 112 | 12.3.0 (2024-02-02) 113 | ------------------- 114 | 115 | * Bumped `aiobotocore` to 2.11.2 116 | * Fixed cryptography typing on the CSE module 117 | 118 | 12.2.0 (2024-01-16) 119 | ------------------- 120 | 121 | * Bumped `aiobotocore` to 2.9.0 - thanks @blotero 122 | 123 | 12.1.0 (2023-12-08) 124 | ------------------- 125 | 126 | * Bumped `aiobotocore` to 2.8.0 - thanks @huonw 127 | 128 | 12.0.0 (2023-10-25) 129 | ------------------- 130 | 131 | * Bumped `aiobotocore` to 2.7.0 132 | * Python 3.7 support dropped due to aiobotocore requirements. 133 | 134 | 11.3.1 (2023-10-14) 135 | ------------------- 136 | 137 | * Fixed stall in `s3.upload_fileobj` - thanks @rlindsberg 138 | 139 | 11.3.0 (2023-08-19) 140 | ------------------- 141 | 142 | * Bumped aiobotocore to 2.6.0 143 | 144 | 11.2.0 (2023-05-10) 145 | ------------------- 146 | 147 | * Upload_fileobj performance enhancements - thanks @JohnHBrock 148 | * Contributing documentation updates - thanks @JohnHBrock 149 | 150 | 11.1.1 (2023-09-25) 151 | ------------------- 152 | 153 | * Bumped aiobotocore to 2.5.4 154 | 155 | 11.1.0 (2023-03-30) 156 | ------------------- 157 | 158 | * Bumped aiobotocore to 2.5.0 159 | 160 | 11.0.1 (2023-03-06) 161 | ------------------- 162 | 163 | * Fixed erroneous change to the minimum python version. 164 | 165 | 11.0.0 (2023-03-05) 166 | ------------------- 167 | 168 | * Changed keyword arguments for S3Transfer config to match upstream. 169 | 170 | 10.4.0 (2023-01-31) 171 | ------------------- 172 | 173 | * Bumped aiobotocore to 2.4.2 174 | * Updated CI action versions 175 | 176 | 10.3.0 (2023-01-04) 177 | ------------------- 178 | 179 | * Added support for async fileobjects in download_fileobj - thanks @prodeveloper0 180 | 181 | 10.2.0 (2022-12-03) 182 | ------------------- 183 | 184 | * Updated S3 streaming example 185 | * Bumped aiobotocore to 2.4.1 186 | 187 | 10.1.0 (2022-09-21) 188 | ------------------- 189 | 190 | * Bumped aiobotocore to 2.4.0 - thanks @abivolmv 191 | 192 | 10.0.0 (2022-08-10) 193 | ------------------- 194 | 195 | * Bumped aiobotocore to 2.3.4 - thanks @dacevedo12 196 | * Fixed async pytest fixtures which now work in pytest-asyncio strict mode 197 | * Fixed edge case in dynamodb batch writer loosing uncommitted writes - see #270, thanks @JamesVerrill 198 | 199 | 9.6.0 (2022-05-06) 200 | ------------------ 201 | 202 | * Bumped aiobotocore to 2.3.0 203 | 204 | 9.5.0 (2022-03-29) 205 | ------------------ 206 | 207 | * Bumped aiobotocore to 2.2.0 - thanks @dacevedo12 208 | * Updated formatting in various places to match the boto3 equivalent 209 | 210 | 9.4.0 (2022-03-13) 211 | ------------------ 212 | 213 | * Bumped aiobotocore to 2.1.2 214 | * Updated asyncio.wait usage to be compatible with py3.11 - thanks @noblepayne 215 | * Fixed resource __aexit__ not being used properly - thanks @chrisBLIT 216 | * Added S3 CopyFrom test coverage 217 | * Bumped Moto to 3.1.0 218 | 219 | 9.3.1 (2022-01-10) 220 | ------------------ 221 | 222 | * Bumped aiobotocore to 2.1.0 - thanks @abivolmv 223 | 224 | 9.3.0 (2021-12-13) 225 | ------------------ 226 | 227 | * Bumped aiobotocore to 2.0.1 - thanks @mmaslowskicc 228 | 229 | 9.2.2 (2021-10-06) 230 | ------------------ 231 | 232 | * Fixed pyproject misconfiguration bringing in extra dependencies by default - thanks @and-semakin 233 | 234 | 9.2.1 (2021-10-05) 235 | ------------------ 236 | 237 | * Bumped aiobotocore to 1.4.2 238 | 239 | 9.2.0 (2021-07-22) 240 | ------------------ 241 | 242 | * Beta release promoted 243 | 244 | 9.2.0b0 (2021-07-19) Beta 0 245 | --------------------------- 246 | 247 | * Experimental AWS Chalice added. 248 | 249 | 9.1.0 (2021-07-16) 250 | ------------------ 251 | 252 | * Switched to using Poetry for dependency management and setup.py replacement 253 | * Bumped aiobotocore to 1.3.3 to fix some JSON parsing bugs on streams 254 | 255 | 9.0.0 (2021-06-27) 256 | ------------------ 257 | 258 | * Removed default session 259 | * Bumped aiobotocore to 1.3.1 - thanks @slipovenko 260 | 261 | 262 | 8.3.0 (2021-03-24) 263 | ------------------ 264 | 265 | * Fixed S3.Bucket injected load method 266 | * Updated CI 267 | 268 | 8.2.1 (2021-03-02) 269 | ------------------ 270 | 271 | * Better aiofiles support - thanks @frosthamster 272 | 273 | 8.2.0 (2020-12-02) 274 | ------------------ 275 | 276 | * Added file.read error handling in S3 upload_file/upload_fileobj 277 | 278 | 8.1.1 (2020-12-01) 279 | ------------------ 280 | 281 | * Fixed s3.ObjectSummary metadata properties loading 282 | 283 | 8.1.0 (2020-12-01) 284 | ------------------ 285 | 286 | * Bumped to use aiobotocore 1.1.2 287 | 288 | 8.0.5 (2020-07-08) 289 | ------------------ 290 | 291 | * @u-ashish Fixed a bug where ExtraArgs was ignored when doing s3.copy 292 | 293 | 8.0.4 (2020-07-07) 294 | ------------------ 295 | 296 | * @u-ashish Fixed a bug where ExtraArgs was ignored when doing s3.download_file/fileobj 297 | 298 | 8.0.3 (2020-04-25) 299 | ------------------ 300 | 301 | * Bumped aiobotocore version 302 | * @compscidr Fixed a bug where upload_file callback returned the wrong amount of bytes 303 | 304 | 8.0.2 (2020-04-10) 305 | ------------------ 306 | 307 | * Bumped aiobotocore version 308 | 309 | 8.0.1 (2020-04-08) 310 | ------------------ 311 | 312 | * Bumped aiobotocore version 313 | * Added aiohttp example 314 | 315 | 8.0.0 (2020-04-03) 316 | ------------------ 317 | 318 | * Major refactor to mirror boto3 file structure 319 | * Updated to support aiobotocore 1.0.1, a few breaking changes. 320 | * Switched to pipenv 321 | 322 | 7.1.0 (2020-03-31) 323 | ------------------ 324 | 325 | * Pinned aiobotocore version. Aiobotocore 1.0.0 requires changes. 326 | 327 | 7.0.0 (2020-03-12) 328 | ------------------ 329 | 330 | * Upgrade to aiobotocore 0.12 331 | * Bumped minimum python version to 3.6, adding support for 3.8 332 | * Eliminate use of deprecated loop arguments 333 | 334 | 6.5.0 (2020-02-20) 335 | ------------------ 336 | 337 | * @bact fixed some typos :) 338 | * Asyncified the S3 resource Bucket().objects API and by extension, anything else in boto3 that uses the same object structure 339 | * Bumped aiobotocore version so that eventstreams would now work 340 | 341 | 6.4.0 (2019-06-20) 342 | ------------------ 343 | 344 | * Updated ```upload_fileobj``` to upload multiple parts concurrently to make best use of the available bandwidth 345 | 346 | 6.2.0 (2019-05-07) 347 | ------------------ 348 | 349 | * @inadarei Added batch writing example 350 | * Added waiter support in resources 351 | * Made resource object properties coroutines and lazy load data when called 352 | 353 | 6.2.0 (2019-02-27) 354 | ------------------ 355 | 356 | * Added S3 Client side encryption functionality 357 | 358 | 6.1.0 (2019-02-13) 359 | ------------------ 360 | 361 | * nvllsvm cleaned up the packaging, requirements, travis, sphinx... 362 | * Unvendored aiobotocore 363 | 364 | 6.0.1 (2018-11-22) 365 | ------------------ 366 | 367 | * Fixed dependencies 368 | 369 | 6.0.0 (2018-11-21) 370 | ------------------ 371 | 372 | * Fixed readthedocs 373 | * Vendored aiobotocore for later botocore version 374 | 375 | 5.0.0 (2018-10-12) 376 | ------------------ 377 | 378 | * Updated lots of dependencies 379 | * Changed s3.upload_fileobj from using put_object to doing a multipart upload 380 | * Created s3.copy shim that runs get_object then does multipart upload, could do with a better implementation though. 381 | 382 | 4.1.2 (2018-08-28) 383 | ------------------ 384 | 385 | * updated pypi credentials 386 | 387 | 4.1.0 (2018-08-28) 388 | ------------------ 389 | 390 | * aiobotocore dependancy bump 391 | 392 | 4.0.2 (2018-08-03) 393 | ------------------ 394 | 395 | * Dependancy bump 396 | 397 | 4.0.0 (2018-05-09) 398 | ------------------ 399 | 400 | * Dependancy bump 401 | * Now using aiobotocore 0.8.0 402 | * Dropped < py3.5 support 403 | * Now using async def / await syntax 404 | * Fixed boto3 dependancy so it only uses a boto3 version supported by aiobotocore's max botocore dependancy 405 | * Important, ```__call__``` in ```AIOServiceAction``` tries to yield from a coroutine in a non-coroutine, this code shouldn't be hit 406 | anymore but I can't guarantee that, so instead ```__call__``` was duplicated and awaited properly so "should" be fine. 407 | Credit goes to Arnulfo Solis for doing PR. 408 | 409 | 3.0.0 (2018-03-29) 410 | ------------------ 411 | 412 | * Dependancy bump 413 | * Asyncified dynamodb Table Batch Writer + Tests 414 | * Added batch writer examples 415 | * Now using aiobotocore 0.6.0 416 | 417 | 2.2.0 (2018-01-24) 418 | ------------------ 419 | 420 | * Dependancy bump 421 | 422 | 2.1.0 (2018-01-23) 423 | ------------------ 424 | 425 | * Dependancy bump 426 | * Fix bug where extras isn't packaged 427 | 428 | 2.0.0 (2017-12-30) 429 | ------------------ 430 | 431 | * Patched most s3transfer functions 432 | 433 | 1.1.2 (2017-11-29) 434 | ------------------ 435 | 436 | * Fixup of lingering GPL license texts 437 | 438 | 0.1.0 (2017-09-25) 439 | ------------------ 440 | 441 | * First release on PyPI. 442 | -------------------------------------------------------------------------------- /tests/test_s3_cse.py: -------------------------------------------------------------------------------- 1 | import base64 2 | import json 3 | 4 | import pytest 5 | 6 | import aioboto3 7 | import aioboto3.s3.cse as cse 8 | 9 | # Need big chunk of data for range test 10 | DATA = b'Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed efficitur, turpis at molestie molestie, ' \ 11 | b'felis nunc consequat neque, a suscipit ipsum magna a lacus. Nam rhoncus pulvinar dignissim. Sed non sapien porta, ' \ 12 | b'fringilla ipsum vitae, rutrum lorem. Ut mi massa, ultricies eget auctor malesuada, euismod eu nisl. Quisque ' \ 13 | b'pellentesque egestas enim, at finibus nibh semper eget. Maecenas vestibulum massa id elit sagittis dignissim. ' \ 14 | b'Nullam felis ligula, pellentesque a odio quis, sagittis consectetur tortor.\n' \ 15 | b'Cras eget gravida nisl. Nulla nisi ex, facilisis a aliquet maximus, sodales sit amet ligula. Nulla ornare ante ' \ 16 | b'quis varius eleifend. Nunc elementum mi imperdiet, luctus lectus ut, bibendum nunc. Nunc placerat, diam et faucibus ' \ 17 | b'feugiat, lacus mi consequat lectus, in tincidunt nunc ex a massa. Suspendisse potenti. Phasellus congue diam nec ' \ 18 | b'mattis sagittis. Duis hendrerit bibendum dictum. Sed et sapien non urna ultrices vehicula. Curabitur id massa ut ' \ 19 | b'velit placerat tristique ac eu nisi.\n' \ 20 | b'Sed sollicitudin, lectus et dignissim sodales, turpis purus blandit neque, sit amet tempus sem massa id turpis. ' \ 21 | b'Integer porttitor rutrum orci, nec dapibus velit hendrerit vitae. Mauris pellentesque ipsum faucibus laoreet ' \ 22 | b'viverra. Fusce mattis, urna a ullamcorper condimentum, orci lectus pellentesque enim, non vestibulum leo tortor ' \ 23 | b'ut arcu. Donec fringilla gravida elit vel ullamcorper. Proin consectetur id eros in lacinia. Donec pellentesque ' \ 24 | b'nunc vitae viverra condimentum. Maecenas nec lacus elementum, tristique ipsum ut, dapibus velit. Donec tempus quam ' \ 25 | b'cursus, aliquam tellus vel, pretium lacus. Nulla ultrices ex ac felis sagittis malesuada. Aliquam sollicitudin ut ' \ 26 | b'turpis eget laoreet.' 27 | 28 | 29 | # Waiting for generate_data_key on kms 30 | # https://github.com/spulec/moto/pull/1555 31 | 32 | # @pytest.mark.skip(reason="no way of currently testing this") 33 | # @pytest.mark.asyncio 34 | # async def test_cse1(event_loop, moto_patch, region, bucket_name, kms_key_alias): 35 | # kms_client = kms_moto_patch('kms', region_name=region) 36 | # s3_client, s3_resource = s3_moto_patch 37 | # s3_client = s3_client('s3', region_name=region) 38 | # 39 | # # Setup KMS 40 | # resp = await kms_client.create_key(KeyUsage='ENCRYPT_DECRYPT', Origin='AWS_KMS') 41 | # key_id = resp['KeyMetadata']['KeyId'] 42 | # 43 | # await kms_client.create_alias(AliasName=kms_key_alias, TargetKeyId=key_id) 44 | # 45 | # # Setup bucket 46 | # await s3_client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration={'LocationConstraint': region}) 47 | # 48 | # s3_data = str(uuid.uuid4()).encode() 49 | # s3_file = uuid.uuid4().hex 50 | # 51 | # async with S3CSE(s3_region=region, kms_region=region) as s3_cse: 52 | # await s3_cse.put_object( 53 | # Body=s3_data, 54 | # Bucket=bucket_name, 55 | # Key=s3_file, 56 | # KMSKeyId=kms_key_alias, 57 | # AuthenticatedEncryption=True 58 | # ) 59 | # 60 | # s3_raw_object = await s3_client.get_object(Bucket=bucket_name, Key=s3_file) 61 | # s3_raw_object_data = await s3_raw_object['Body'].read() 62 | # 63 | # result = await s3_cse.get_object(Bucket=bucket_name, Key=s3_file) 64 | # 65 | # print() 66 | 67 | @pytest.mark.xfail(reason="Waiting for moto to accept PR and release") 68 | @pytest.mark.asyncio 69 | async def test_kms_crypto_context_success(moto_patch, region, bucket_name, kms_key_alias): 70 | session = aioboto3.Session() 71 | 72 | async with session.client('kms', region_name=region) as kms_client: 73 | resp = await kms_client.create_key(KeyUsage='ENCRYPT_DECRYPT', Origin='AWS_KMS') 74 | key_id = resp['KeyMetadata']['KeyId'] 75 | 76 | await kms_client.create_alias(AliasName=kms_key_alias, TargetKeyId=key_id) 77 | 78 | # Create context 79 | kms_context = cse.KMSCryptoContext(kms_key_alias, kms_client_args={'region_name': region}) 80 | assert kms_context.kms_key == kms_key_alias 81 | 82 | await kms_context.setup() 83 | assert kms_context._kms_client is not None 84 | 85 | aes_key, material_description, encrypted_aes_key = await kms_context.get_encryption_aes_key() 86 | 87 | # Material description should denote what key is used 88 | assert material_description['kms_cmk_id'] == kms_key_alias 89 | 90 | resp = await kms_client.decrypt(CiphertextBlob=encrypted_aes_key, EncryptionContext=material_description) 91 | assert aes_key == resp['Plaintext'] 92 | 93 | await kms_context.close() 94 | 95 | 96 | @pytest.mark.asyncio 97 | async def test_kms_crypto_context_decrypt_no_key(moto_patch, region, bucket_name, kms_key_alias): 98 | # Create context 99 | kms_context = cse.KMSCryptoContext(kms_client_args={'region_name': region}) 100 | await kms_context.setup() 101 | 102 | with pytest.raises(ValueError): 103 | # Cant get KMS encryption key without key id specified 104 | await kms_context.get_encryption_aes_key() 105 | 106 | await kms_context.close() 107 | 108 | 109 | @pytest.mark.asyncio 110 | async def test_kms_cse_encrypt_decrypt_aes_gcm(moto_patch, region, bucket_name, s3_key_name): 111 | session = aioboto3.Session() 112 | 113 | async with session.client('s3', region_name=region) as s3_client: 114 | await s3_client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration={'LocationConstraint': region}) 115 | 116 | aes_key = b'O\x8b\xdc\x92\x87k\x9aJ{m\x82\xb3\x96\xf7\x93]\xa1\xb2Cl\x86<5\xbe\x13\xaf\xa8\x94\xa2O3\xef' 117 | encrypted_aes_key = b'encrypted_aes_key' 118 | material_descrition = {'kms_cmk_id': 'alias/cmk_id'} 119 | 120 | kms_crypto_context = cse.MockKMSCryptoContext(aes_key, material_descrition, 121 | encrypted_aes_key, authenticated_encryption=True) 122 | s3_cse = cse.S3CSE(kms_crypto_context, s3_client_args={'region_name': region}) 123 | 124 | async with s3_cse: 125 | # Upload file 126 | await s3_cse.put_object(Body=DATA, Bucket=bucket_name, Key=s3_key_name) 127 | 128 | encrypted_resp = await s3_client.get_object(Bucket=bucket_name, Key=s3_key_name) 129 | encrypted_resp['Body'] = await encrypted_resp['Body'].read() 130 | 131 | # Check it doesnt start with lorem ipsum 132 | assert not encrypted_resp['Body'].startswith(DATA[:10]) 133 | 134 | # Check metadata for KMS encryption 135 | assert encrypted_resp['Metadata']['x-amz-cek-alg'] == 'AES/GCM/NoPadding' 136 | assert encrypted_resp['Metadata']['x-amz-tag-len'] == '128' 137 | assert encrypted_resp['Metadata']['x-amz-wrap-alg'] == 'kms' 138 | assert base64.b64decode(encrypted_resp['Metadata']['x-amz-key-v2']) == encrypted_aes_key 139 | assert encrypted_resp['Metadata']['x-amz-unencrypted-content-length'] == str(len(DATA)) 140 | assert encrypted_resp['Metadata']['x-amz-matdesc'] == json.dumps(material_descrition) 141 | assert 'x-amz-iv' in encrypted_resp['Metadata'] 142 | 143 | # This is a quick test to ensure decryption works, and resp['Body'] looks like an aiohttp obj 144 | unencrypted_resp = await s3_cse.get_object(Bucket=bucket_name, Key=s3_key_name) 145 | unencrypted_resp['Body'] = await unencrypted_resp['Body'].read() 146 | 147 | assert unencrypted_resp['Body'] == DATA 148 | 149 | # Test range get 150 | # TODO moto doesnt return metadata during range get 151 | # unencrypted_range_resp = await s3_cse.get_object(Bucket=bucket_name, Key=s3_key_name, Range='bytes=500-700') 152 | # unencrypted_range_resp['Body'] = await unencrypted_range_resp['Body'].read() 153 | # assert len(unencrypted_range_resp['Body']) == 200 154 | # assert unencrypted_range_resp['Body'] == DATA[500:700] 155 | 156 | 157 | @pytest.mark.asyncio 158 | async def test_symmetric_cse_encrypt_decrypt_aes_cbc(moto_patch, region, bucket_name, s3_key_name): 159 | session = aioboto3.Session() 160 | 161 | async with session.client('s3', region_name=region) as s3_client: 162 | await s3_client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration={'LocationConstraint': region}) 163 | 164 | aes_key = b'O\x8b\xdc\x92\x87k\x9aJ{m\x82\xb3\x96\xf7\x93]\xa1\xb2Cl\x86<5\xbe\x13\xaf\xa8\x94\xa2O3\xef' 165 | 166 | symmetric_crypto_context = cse.SymmetricCryptoContext(aes_key) 167 | s3_cse = cse.S3CSE(symmetric_crypto_context, s3_client_args={'region_name': region}) 168 | 169 | async with s3_cse: 170 | # Upload file 171 | await s3_cse.put_object(Body=DATA, Bucket=bucket_name, Key=s3_key_name) 172 | 173 | encrypted_resp = await s3_client.get_object(Bucket=bucket_name, Key=s3_key_name) 174 | encrypted_resp['Body'] = await encrypted_resp['Body'].read() 175 | 176 | # Check it doesnt start with lorem ipsum 177 | assert not encrypted_resp['Body'].startswith(DATA[:10]) 178 | 179 | # Check metadata for KMS encryption 180 | assert len(base64.b64decode(encrypted_resp['Metadata']['x-amz-key'])) == 48 181 | assert encrypted_resp['Metadata']['x-amz-unencrypted-content-length'] == str(len(DATA)) 182 | assert encrypted_resp['Metadata']['x-amz-matdesc'] == '{}' 183 | 184 | assert 'x-amz-iv' in encrypted_resp['Metadata'] 185 | assert 'x-amz-cek-alg' not in encrypted_resp['Metadata'] 186 | assert 'x-amz-key-v2' not in encrypted_resp['Metadata'] 187 | assert 'x-amz-wrap-alg' not in encrypted_resp['Metadata'] 188 | assert 'x-amz-tag-len' not in encrypted_resp['Metadata'] 189 | 190 | # This is a quick test to ensure decryption works, and resp['Body'] looks like an aiohttp obj 191 | unencrypted_resp = await s3_cse.get_object(Bucket=bucket_name, Key=s3_key_name) 192 | unencrypted_resp['Body'] = await unencrypted_resp['Body'].read() 193 | 194 | assert unencrypted_resp['Body'] == DATA 195 | 196 | # Whilst were here, try range get_object with AES/CBC, should fail 197 | # TODO moto doesnt return Metadata when doing range get 198 | # with pytest.raises(cse.DecryptError): 199 | # await s3_cse.get_object(Bucket=bucket_name, Key=s3_key_name, Range='bytes=20-30') 200 | 201 | 202 | @pytest.mark.asyncio 203 | async def test_asymmetric_cse_encrypt_decrypt_aes_cbc(moto_patch, region, bucket_name, s3_key_name): 204 | session = aioboto3.Session() 205 | 206 | async with session.client('s3', region_name=region) as s3_client: 207 | await s3_client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration={'LocationConstraint': region}) 208 | 209 | private_key = b'0\x82\x02w\x02\x01\x000\r\x06\t*\x86H\x86\xf7\r\x01\x01\x01\x05\x00\x04\x82\x02a0\x82\x02]\x02\x01\x00\x02\x81\x81\x00\xbb x \x88x\xa6\x1b\x94\r\x93\x82\x9bU4j\x90//4\x97\xfd\x0c\xdf\xd3\x10\xab}\x99\x19\xe4\xfe\xf1=\x8aM\xca\x06\xa6\xf3\xa5\xce8\x19Q\xcc\x12\x1a\xc2\xc4\xd9w\xeex\xf6\xbc\x1f\xb2u\xb3Z\x0e!fsLJ>\x7fi\xdcc\xb9:\xee2\xf8h5h\x1f\x96\xab\xa4\xfc\x02\x12=D\xde\xde}i~\xe8\xe1y\x16\xc0\xe1\xeb\xca\x16\xbde@+\x00\x9e\xbf\x12\xe7\x0c\xa7#\x88\x80\xa04\xe2M\xc2\x1f\xc2\x8a\xfc\x08M\x02\x03\x01\x00\x01\x02\x81\x81\x00\x92\x1d\x0fO\xaf\xe0-+\xd9\x96$9VZ\xd8\x9b\xe0\xcb\xc7\x1bU\x16UH,\x01\x976r&\xa3\x05b\x8f?\xff\xef\xa0\xf4\x19\xc9\xbc\xd5W\x07\xe4\xc5\xba9\x9d\x05\x85\xbd"\x9c\xdeV\r\xbe\x13\xf6\\\x94<\x99\xa0/\xa8\x8f\xd8\x14\xa3\x88\x88\x1b\xdf\xee\xbb\xaf\xcd\xc7k{\xb2\x9e\x90B\x05)\x7f\xedo\x95\xb9[\xf4\x8fQ\xc0\xee\xd0\xc9\xb9\x1e\xbfP\xe7\x8c\x87\xab\x87\n\xfd\xcb\x04\xe5\x9bEv\x0f)8\x94R;\xf8B\xc1\x02A\x00\xe8D\x96\xdd\x1f\xd4\xd1\xbc\xd2p\xd0\x11\x99pkp\xa9\xb5\xdd:\xa7\xdfn\xd6%\x82\xaeK\xb20\xd2\x03\xf2\r\x06\x1as\xc3_\x95\xf3\xab`>\xaa\x1c\xc1\x19]\xa3\xf2]Q+\xf9\xebi\x9feQ\xd6\xf4\xe3\x11\x02A\x00\xce? \xe6=\xad\x14\xf5\x96PY\xf8\xc1\xaa\xb8y\x9f{\xd8\xf4\x94\x8b}\x9c\\\xec\x10\x7f\xfbD"\xbbd\xa3g\x85\xbd\x97\x18\xd7\xde\x99\xb7\x1dw\xbfwb\xbb\xaa\x01\xaf~\x8aW K\xed;{\xf6t\x99}\x02A\x00\x9b\x13\xf8\x9a\x89?B\x0eM\x7fo\x1c\xe1\x12\xd3Yt\xa6m\xa0U\'tL\\\xdd$\xdc{\x8b\xe7\x1d%F\x96\xd5\xa0\x87H\xd1\xc8\xd0\x9a\xc1\x1c9x\xa0$\nk\xae\xec\x9cm\x10F\x04[\xd4\xc9\xad\xd5\xd1\x02@I\xf9V\x81~I\xa0$\xdd\xbf\x00&:\xc0R\xde<\x97\x9d\x1fLP#\xc3{\x88\xa7\xfa_R\xf6\xea#\x94\x80B\xf5\xd7E\xef\xd7Ef\xeaH\xd3\x01\xad\x06\x06Z\x08i\xe8\x90\x8bb\xf09\xcf\xa2{\xfb\xb9\x02@D\xbaAV\x03\x94,\xc7\xf3/\xbd\xf3I\xc2\x0fAI\xcd\x9e\xa1\xce\xdf\xa7\x19S\x86\xf3\xc2\x854]\xac\xab\xc8\x8f@\x03_-?{>\x1f\xcc\x1a@\xdb\n\xf0v5\xe4tL\xf3\x16kD\xb5\x83L(3\xd2' 210 | public_key = b'0\x81\x9f0\r\x06\t*\x86H\x86\xf7\r\x01\x01\x01\x05\x00\x03\x81\x8d\x000\x81\x89\x02\x81\x81\x00\xbb x \x88x\xa6\x1b\x94\r\x93\x82\x9bU4j\x90//4\x97\xfd\x0c\xdf\xd3\x10\xab}\x99\x19\xe4\xfe\xf1=\x8aM\xca\x06\xa6\xf3\xa5\xce8\x19Q\xcc\x12\x1a\xc2\xc4\xd9w\xeex\xf6\xbc\x1f\xb2u\xb3Z\x0e!fsLJ>\x7fi\xdcc\xb9:\xee2\xf8h5h\x1f\x96\xab\xa4\xfc\x02\x12=D\xde\xde}i~\xe8\xe1y\x16\xc0\xe1\xeb\xca\x16\xbde@+\x00\x9e\xbf\x12\xe7\x0c\xa7#\x88\x80\xa04\xe2M\xc2\x1f\xc2\x8a\xfc\x08M\x02\x03\x01\x00\x01' 211 | 212 | private_key = cse.AsymmetricCryptoContext.from_der_private_key(private_key) 213 | public_key = cse.AsymmetricCryptoContext.from_der_public_key(public_key) 214 | 215 | symmetric_crypto_context = cse.AsymmetricCryptoContext(public_key=public_key, private_key=private_key) 216 | s3_cse = cse.S3CSE(symmetric_crypto_context, s3_client_args={'region_name': region}) 217 | 218 | async with s3_cse: 219 | # Upload file 220 | await s3_cse.put_object(Body=DATA, Bucket=bucket_name, Key=s3_key_name) 221 | 222 | encrypted_resp = await s3_client.get_object(Bucket=bucket_name, Key=s3_key_name) 223 | encrypted_resp['Body'] = await encrypted_resp['Body'].read() 224 | 225 | # Check it doesnt start with lorem ipsum 226 | assert not encrypted_resp['Body'].startswith(DATA[:10]) 227 | 228 | # Check metadata for KMS encryption 229 | assert len(base64.b64decode(encrypted_resp['Metadata']['x-amz-key'])) == 128 # 1024bit key 230 | assert encrypted_resp['Metadata']['x-amz-unencrypted-content-length'] == str(len(DATA)) 231 | assert encrypted_resp['Metadata']['x-amz-matdesc'] == '{}' 232 | 233 | assert 'x-amz-iv' in encrypted_resp['Metadata'] 234 | assert 'x-amz-cek-alg' not in encrypted_resp['Metadata'] 235 | assert 'x-amz-key-v2' not in encrypted_resp['Metadata'] 236 | assert 'x-amz-wrap-alg' not in encrypted_resp['Metadata'] 237 | assert 'x-amz-tag-len' not in encrypted_resp['Metadata'] 238 | 239 | # This is a quick test to ensure decryption works, and resp['Body'] looks like an aiohttp obj 240 | unencrypted_resp = await s3_cse.get_object(Bucket=bucket_name, Key=s3_key_name) 241 | unencrypted_resp['Body'] = await unencrypted_resp['Body'].read() 242 | 243 | assert unencrypted_resp['Body'] == DATA 244 | 245 | # Whilst were here, try range get_object with AES/CBC, should fail 246 | # TODO moto doesnt return Metadata when doing range get 247 | # with pytest.raises(cse.DecryptError): 248 | # await s3_cse.get_object(Bucket=bucket_name, Key=s3_key_name, Range='bytes=20-30') 249 | 250 | 251 | def test_adjust_iv(): 252 | iv = b'+^\xa5\x9a\xe1\x97p\x0f)\xf2\x10C' 253 | after = b'+^\xa5\x9a\xe1\x97p\x0f)\xf2\x10C\x00\x00\x00\x02' 254 | actual = cse._adjust_iv_for_range(iv, 0) 255 | 256 | assert after == actual 257 | 258 | 259 | def test_increment_blocks(): 260 | before = b'+^\xa5\x9a\xe1\x97p\x0f)\xf2\x10C\x00\x00\x00\x01' 261 | after = b'+^\xa5\x9a\xe1\x97p\x0f)\xf2\x10C\x00\x00\x00\x02' 262 | 263 | actual = cse._increment_blocks(before, 1) 264 | 265 | assert after == actual 266 | 267 | 268 | def test_compute_j0(): 269 | before = b'+^\xa5\x9a\xe1\x97p\x0f)\xf2\x10C' 270 | after = b'+^\xa5\x9a\xe1\x97p\x0f)\xf2\x10C\x00\x00\x00\x02' 271 | 272 | actual = cse._compute_j0(before) 273 | 274 | assert after == actual 275 | 276 | 277 | def test_get_adjusted_crypto_range(): 278 | actual_start, actual_end = cse._get_adjusted_crypto_range(3, 64) 279 | 280 | assert actual_start == 0 281 | assert actual_end == 256 282 | 283 | 284 | # Testing max size clamping 285 | @pytest.mark.parametrize('input,expected', [ 286 | (0, 256), 287 | (257, 512), 288 | (9223372036854775807, 9223372036854775807), 289 | ]) 290 | def test_get_cipher_block_upper_bound(input, expected): 291 | result = cse._get_cipher_block_upper_bound(input) 292 | 293 | assert result == expected 294 | 295 | 296 | # Testing min size clamping 297 | @pytest.mark.parametrize('input,expected', [ 298 | (0, 0), 299 | (20, 0), 300 | (257, 128), 301 | (510, 256), 302 | ]) 303 | def test_get_cipher_block_lower_bound(input, expected): 304 | result = cse._get_cipher_block_lower_bound(input) 305 | 306 | assert result == expected 307 | -------------------------------------------------------------------------------- /resources/S3-CSE/src/main/java/demo/UploadObjectKMSKey.java: -------------------------------------------------------------------------------- 1 | package demo; 2 | import java.io.*; 3 | import java.security.*; 4 | import java.security.spec.InvalidKeySpecException; 5 | import java.security.spec.PKCS8EncodedKeySpec; 6 | import java.security.spec.X509EncodedKeySpec; 7 | 8 | import com.amazonaws.AmazonServiceException; 9 | import com.amazonaws.auth.profile.ProfileCredentialsProvider; 10 | import com.amazonaws.regions.RegionUtils; 11 | import com.amazonaws.services.s3.AmazonS3Encryption; 12 | import com.amazonaws.services.s3.AmazonS3EncryptionClientBuilder; 13 | import com.amazonaws.services.s3.model.*; 14 | 15 | import org.apache.commons.cli.*; 16 | 17 | import javax.crypto.KeyGenerator; 18 | import javax.crypto.SecretKey; 19 | import javax.crypto.spec.SecretKeySpec; 20 | 21 | 22 | public class UploadObjectKMSKey { 23 | private static void kmsCrypto(String clientRegion, String bucketName, String keyName, String kmsKeyId, 24 | boolean authenticatedCrypto, boolean getOnly, boolean putOnly) { 25 | System.out.println("KMS Key: " + kmsKeyId); 26 | System.out.println("Authenticated Encryption: " + authenticatedCrypto); 27 | 28 | try { 29 | // Create the encryption client. 30 | KMSEncryptionMaterialsProvider materialProvider = new KMSEncryptionMaterialsProvider(kmsKeyId); 31 | 32 | CryptoConfiguration cryptoConfig; 33 | 34 | if (authenticatedCrypto) { 35 | // This does AES/GCM/NoPadding 36 | cryptoConfig = new CryptoConfiguration(CryptoMode.AuthenticatedEncryption).withAwsKmsRegion(RegionUtils.getRegion(clientRegion)); 37 | } else { 38 | // This does AES/CBC/PKCS5Padding 39 | cryptoConfig = new CryptoConfiguration().withAwsKmsRegion(RegionUtils.getRegion(clientRegion)); 40 | } 41 | 42 | AmazonS3Encryption encryptionClient = AmazonS3EncryptionClientBuilder.standard() 43 | .withCredentials(new ProfileCredentialsProvider()) 44 | .withEncryptionMaterials(materialProvider) 45 | .withCryptoConfiguration(cryptoConfig) 46 | .withRegion(clientRegion).build(); 47 | 48 | // Upload an object using the encryption client. 49 | String origContent = "S3 Encrypted Object Using KMS-Managed Customer Master Key."; 50 | int origContentLength = origContent.length(); 51 | 52 | if (putOnly || !getOnly) { 53 | encryptionClient.putObject(bucketName, keyName, origContent); 54 | } 55 | 56 | if (getOnly || !putOnly) { 57 | // Download the object. The downloaded object is still encrypted. 58 | S3Object downloadedObject = encryptionClient.getObject(bucketName, keyName); 59 | S3ObjectInputStream input = downloadedObject.getObjectContent(); 60 | 61 | // Decrypt and read the object and close the input stream. 62 | byte[] readBuffer = new byte[4096]; 63 | ByteArrayOutputStream baos = new ByteArrayOutputStream(4096); 64 | int bytesRead = 0; 65 | int decryptedContentLength = 0; 66 | 67 | while ((bytesRead = input.read(readBuffer)) != -1) { 68 | baos.write(readBuffer, 0, bytesRead); 69 | decryptedContentLength += bytesRead; 70 | } 71 | input.close(); 72 | 73 | // Verify that the original and decrypted contents are the same size. 74 | 75 | System.out.println("Decrypted content length: " + decryptedContentLength); 76 | } 77 | System.out.println("Original content length: " + origContentLength); 78 | } 79 | catch(AmazonServiceException e) { 80 | // The call was transmitted successfully, but Amazon S3 couldn't process 81 | // it, so it returned an error response. 82 | e.printStackTrace(); 83 | } 84 | catch(IOException e) { 85 | // The call was transmitted successfully, but Amazon S3 couldn't process 86 | // it, so it returned an error response. 87 | e.printStackTrace(); 88 | } 89 | 90 | } 91 | 92 | private static void symmetricCrypto(String clientRegion, String bucketName, String keyName, 93 | String masterKeyDir, boolean getOnly, boolean putOnly) throws Exception { 94 | String masterKeyName = "secret.key"; 95 | 96 | System.out.println("symmetric Encryption"); 97 | 98 | KeyGenerator symKeyGenerator = KeyGenerator.getInstance("AES"); 99 | symKeyGenerator.init(256); 100 | SecretKey symKey = symKeyGenerator.generateKey(); 101 | 102 | // Only saves if key doesnt already exist 103 | saveSymmetricKey(masterKeyDir, masterKeyName, symKey); 104 | symKey = loadSymmetricAESKey(masterKeyDir, masterKeyName, "AES"); 105 | 106 | try { 107 | EncryptionMaterials encryptionMaterials = new EncryptionMaterials(symKey); 108 | AmazonS3Encryption encryptionClient = AmazonS3EncryptionClientBuilder.standard() 109 | .withCredentials(new ProfileCredentialsProvider()) 110 | .withEncryptionMaterials(new StaticEncryptionMaterialsProvider(encryptionMaterials)) 111 | .withRegion(clientRegion) 112 | .build(); 113 | 114 | // Upload an object using the encryption client. 115 | String origContent = "S3 Encrypted Object Using symmetric."; 116 | int origContentLength = origContent.length(); 117 | if (putOnly || !getOnly) { 118 | encryptionClient.putObject(bucketName, keyName, origContent); 119 | } 120 | 121 | // Download the object. The downloaded object is still encrypted. 122 | if (getOnly || !putOnly) { 123 | S3Object downloadedObject = encryptionClient.getObject(bucketName, keyName); 124 | S3ObjectInputStream input = downloadedObject.getObjectContent(); 125 | 126 | // Decrypt and read the object and close the input stream. 127 | byte[] readBuffer = new byte[4096]; 128 | ByteArrayOutputStream baos = new ByteArrayOutputStream(4096); 129 | int bytesRead = 0; 130 | int decryptedContentLength = 0; 131 | 132 | while ((bytesRead = input.read(readBuffer)) != -1) { 133 | baos.write(readBuffer, 0, bytesRead); 134 | decryptedContentLength += bytesRead; 135 | } 136 | input.close(); 137 | 138 | // Verify that the original and decrypted contents are the same size. 139 | 140 | System.out.println("Decrypted content length: " + decryptedContentLength); 141 | } 142 | System.out.println("Original content length: " + origContentLength); 143 | } 144 | catch(AmazonServiceException e) { 145 | // The call was transmitted successfully, but Amazon S3 couldn't process 146 | // it, so it returned an error response. 147 | e.printStackTrace(); 148 | } 149 | catch(IOException e) { 150 | // The call was transmitted successfully, but Amazon S3 couldn't process 151 | // it, so it returned an error response. 152 | e.printStackTrace(); 153 | } 154 | 155 | } 156 | 157 | private static void asymmetricCrypto(String clientRegion, String bucketName, String keyName, 158 | String masterKeyDir, boolean getOnly, boolean putOnly) throws Exception { 159 | String pubKeyName = "secret.pub"; 160 | String privKeyName = "secret.priv"; 161 | 162 | System.out.println("asymmetric Encryption"); 163 | 164 | KeyPairGenerator keyGenerator = KeyPairGenerator.getInstance("RSA"); 165 | keyGenerator.initialize(1024, new SecureRandom()); 166 | KeyPair origKeyPair = keyGenerator.generateKeyPair(); 167 | 168 | // To see how it works, save and load the key pair to and from the file system. 169 | saveKeyPair(masterKeyDir, pubKeyName, privKeyName, origKeyPair); 170 | KeyPair keyPair = loadKeyPair(masterKeyDir, pubKeyName, privKeyName, "RSA"); 171 | 172 | try { 173 | EncryptionMaterials encryptionMaterials = new EncryptionMaterials(keyPair); 174 | AmazonS3Encryption encryptionClient = AmazonS3EncryptionClientBuilder.standard() 175 | .withCredentials(new ProfileCredentialsProvider()) 176 | .withEncryptionMaterials(new StaticEncryptionMaterialsProvider(encryptionMaterials)) 177 | .withRegion(clientRegion) 178 | .build(); 179 | 180 | // Upload an object using the encryption client. 181 | String origContent = "S3 Encrypted Object Using asymmetric encryption."; 182 | int origContentLength = origContent.length(); 183 | if (putOnly || !getOnly) { 184 | encryptionClient.putObject(bucketName, keyName, origContent); 185 | } 186 | // Download the object. The downloaded object is still encrypted. 187 | 188 | if (getOnly || !putOnly) { 189 | S3Object downloadedObject = encryptionClient.getObject(bucketName, keyName); 190 | S3ObjectInputStream input = downloadedObject.getObjectContent(); 191 | 192 | // Decrypt and read the object and close the input stream. 193 | byte[] readBuffer = new byte[4096]; 194 | ByteArrayOutputStream baos = new ByteArrayOutputStream(4096); 195 | int bytesRead = 0; 196 | int decryptedContentLength = 0; 197 | 198 | while ((bytesRead = input.read(readBuffer)) != -1) { 199 | baos.write(readBuffer, 0, bytesRead); 200 | decryptedContentLength += bytesRead; 201 | } 202 | input.close(); 203 | 204 | // Verify that the original and decrypted contents are the same size. 205 | 206 | System.out.println("Decrypted content length: " + decryptedContentLength); 207 | } 208 | System.out.println("Original content length: " + origContentLength); 209 | } 210 | catch(AmazonServiceException e) { 211 | // The call was transmitted successfully, but Amazon S3 couldn't process 212 | // it, so it returned an error response. 213 | e.printStackTrace(); 214 | } 215 | catch(IOException e) { 216 | // The call was transmitted successfully, but Amazon S3 couldn't process 217 | // it, so it returned an error response. 218 | e.printStackTrace(); 219 | } 220 | 221 | } 222 | 223 | private static void saveSymmetricKey(String masterKeyDir, String masterKeyName, SecretKey secretKey) throws IOException { 224 | File outputFile = new File(masterKeyDir + File.separator + masterKeyName); 225 | 226 | if (!outputFile.exists()) { 227 | X509EncodedKeySpec x509EncodedKeySpec = new X509EncodedKeySpec(secretKey.getEncoded()); 228 | FileOutputStream keyOutputStream = new FileOutputStream(masterKeyDir + File.separator + masterKeyName); 229 | keyOutputStream.write(x509EncodedKeySpec.getEncoded()); 230 | keyOutputStream.close(); 231 | } 232 | } 233 | 234 | private static SecretKey loadSymmetricAESKey(String masterKeyDir, String masterKeyName, String algorithm) 235 | throws IOException, NoSuchAlgorithmException, InvalidKeySpecException, InvalidKeyException { 236 | // Read the key from the specified file. 237 | File keyFile = new File(masterKeyDir + File.separator + masterKeyName); 238 | FileInputStream keyInputStream = new FileInputStream(keyFile); 239 | byte[] encodedPrivateKey = new byte[(int) keyFile.length()]; 240 | keyInputStream.read(encodedPrivateKey); 241 | keyInputStream.close(); 242 | 243 | // Reconstruct and return the master key. 244 | return new SecretKeySpec(encodedPrivateKey, "AES"); 245 | } 246 | 247 | 248 | private static void saveKeyPair(String dir, 249 | String publicKeyName, 250 | String privateKeyName, 251 | KeyPair keyPair) throws IOException { 252 | File outputFile = new File(dir + File.separator + publicKeyName); 253 | 254 | if (!outputFile.exists()) { 255 | PrivateKey privateKey = keyPair.getPrivate(); 256 | PublicKey publicKey = keyPair.getPublic(); 257 | 258 | // Write the public key to the specified file. 259 | X509EncodedKeySpec x509EncodedKeySpec = new X509EncodedKeySpec(publicKey.getEncoded()); 260 | FileOutputStream publicKeyOutputStream = new FileOutputStream(dir + File.separator + publicKeyName); 261 | publicKeyOutputStream.write(x509EncodedKeySpec.getEncoded()); 262 | publicKeyOutputStream.close(); 263 | 264 | // Write the private key to the specified file. 265 | PKCS8EncodedKeySpec pkcs8EncodedKeySpec = new PKCS8EncodedKeySpec(privateKey.getEncoded()); 266 | FileOutputStream privateKeyOutputStream = new FileOutputStream(dir + File.separator + privateKeyName); 267 | privateKeyOutputStream.write(pkcs8EncodedKeySpec.getEncoded()); 268 | privateKeyOutputStream.close(); 269 | } 270 | } 271 | 272 | private static KeyPair loadKeyPair(String dir, 273 | String publicKeyName, 274 | String privateKeyName, 275 | String algorithm) 276 | throws IOException, NoSuchAlgorithmException, InvalidKeySpecException { 277 | // Read the public key from the specified file. 278 | File publicKeyFile = new File(dir + File.separator + publicKeyName); 279 | FileInputStream publicKeyInputStream = new FileInputStream(publicKeyFile); 280 | byte[] encodedPublicKey = new byte[(int) publicKeyFile.length()]; 281 | publicKeyInputStream.read(encodedPublicKey); 282 | publicKeyInputStream.close(); 283 | 284 | // Read the private key from the specified file. 285 | File privateKeyFile = new File(dir + File.separator + privateKeyName); 286 | FileInputStream privateKeyInputStream = new FileInputStream(privateKeyFile); 287 | byte[] encodedPrivateKey = new byte[(int) privateKeyFile.length()]; 288 | privateKeyInputStream.read(encodedPrivateKey); 289 | privateKeyInputStream.close(); 290 | 291 | // Convert the keys into a key pair. 292 | KeyFactory keyFactory = KeyFactory.getInstance(algorithm); 293 | X509EncodedKeySpec publicKeySpec = new X509EncodedKeySpec(encodedPublicKey); 294 | PublicKey publicKey = keyFactory.generatePublic(publicKeySpec); 295 | 296 | PKCS8EncodedKeySpec privateKeySpec = new PKCS8EncodedKeySpec(encodedPrivateKey); 297 | PrivateKey privateKey = keyFactory.generatePrivate(privateKeySpec); 298 | 299 | return new KeyPair(publicKey, privateKey); 300 | } 301 | 302 | 303 | public static void main(String[] args) throws IOException { 304 | Options options = new Options(); 305 | Option bucketNameInput = new Option("b", "bucket-name", true, "S3 Bucket to use"); 306 | bucketNameInput.setRequired(true); 307 | options.addOption(bucketNameInput); 308 | 309 | Option keyNameInput = new Option("k", "key-name", true, "S3 File name to use"); 310 | keyNameInput.setRequired(true); 311 | options.addOption(keyNameInput); 312 | 313 | Option regionInput = new Option("r", "region", true, "AWS Region"); 314 | regionInput.setRequired(true); 315 | options.addOption(regionInput); 316 | 317 | Option cryptoTypeInput = new Option("c", "crypto-type", true, "Which crypto type to use"); 318 | cryptoTypeInput.setRequired(true); 319 | options.addOption(cryptoTypeInput); 320 | 321 | Option kmsKeyIdInput = new Option("a", "kms-key-id", true, "KMS Key ID or alias"); 322 | options.addOption(kmsKeyIdInput); 323 | 324 | Option authenticatedCryptoInput = new Option("d", "authenticated-crypto", false, "Authenticated crypto"); 325 | options.addOption(authenticatedCryptoInput); 326 | 327 | Option keyDirInput = new Option("e", "key-dir", true, "Symmetric or Asymmetric key directory"); 328 | options.addOption(keyDirInput); 329 | 330 | Option putOnlyInput = new Option("p", "put-only", false, "Upload only"); 331 | options.addOption(putOnlyInput); 332 | 333 | Option getOnlyInput = new Option("g", "get-only", false, "Download only"); 334 | options.addOption(getOnlyInput); 335 | 336 | CommandLineParser parser = new DefaultParser(); 337 | HelpFormatter formatter = new HelpFormatter(); 338 | CommandLine cmd = null; 339 | 340 | try { 341 | cmd = parser.parse(options, args); 342 | } catch (ParseException e) { 343 | System.out.println(e.getMessage()); 344 | formatter.printHelp("s3_cse_example", options); 345 | 346 | System.exit(1); 347 | } 348 | 349 | String bucketName = cmd.getOptionValue("bucket-name"); 350 | String keyName = cmd.getOptionValue("key-name"); 351 | String clientRegion = cmd.getOptionValue("region"); 352 | String cryptoType = cmd.getOptionValue("crypto-type"); 353 | boolean getOnly = cmd.hasOption("get-only"); 354 | boolean putOnly = cmd.hasOption("put-only"); 355 | 356 | System.out.println("S3: s3://" + bucketName + "/" + keyName); 357 | System.out.println("Region: " + clientRegion); 358 | 359 | try { 360 | switch (cryptoType) { 361 | case "kms": 362 | String kmsKeyId = cmd.getOptionValue("kms-key-id"); 363 | boolean authenticatedCrypto = cmd.hasOption("authenticated-crypto"); 364 | 365 | UploadObjectKMSKey.kmsCrypto(clientRegion, bucketName, keyName, kmsKeyId, authenticatedCrypto, getOnly, putOnly); 366 | break; 367 | case "asymmetric": 368 | String keyDir = cmd.getOptionValue("key-dir"); 369 | 370 | UploadObjectKMSKey.asymmetricCrypto(clientRegion, bucketName, keyName, keyDir, getOnly, putOnly); 371 | break; 372 | case "symmetric": 373 | keyDir = cmd.getOptionValue("key-dir"); 374 | 375 | UploadObjectKMSKey.symmetricCrypto(clientRegion, bucketName, keyName, keyDir, getOnly, putOnly); 376 | break; 377 | 378 | default: 379 | System.out.println("Crypto type " + cryptoType + " unsupported"); 380 | } 381 | } catch (Exception e) { 382 | e.printStackTrace(); 383 | } 384 | 385 | 386 | } 387 | } 388 | -------------------------------------------------------------------------------- /tests/test_s3.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import os 3 | import datetime 4 | import tempfile 5 | from io import BytesIO 6 | from unittest.mock import AsyncMock 7 | 8 | from botocore.exceptions import ClientError 9 | from boto3.s3.transfer import S3TransferConfig 10 | import aiofiles 11 | import pytest 12 | 13 | 14 | @pytest.mark.asyncio 15 | async def test_s3_download_file(s3_client, bucket_name, region): 16 | await s3_client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration={'LocationConstraint': region}) 17 | await s3_client.put_object(Bucket=bucket_name, Key='test_file', Body=b'Hello World\n') 18 | 19 | download_file = '/tmp/aioboto3_temp_s3_download.txt' 20 | try: 21 | os.remove(download_file) 22 | except OSError: 23 | pass 24 | 25 | callback_called = False 26 | 27 | def download_callback(b): 28 | nonlocal callback_called 29 | callback_called = True 30 | 31 | await s3_client.download_file(bucket_name, 'test_file', download_file, Callback=download_callback) 32 | 33 | assert callback_called 34 | assert os.path.exists(download_file) 35 | assert os.path.isfile(download_file) 36 | 37 | try: 38 | os.remove(download_file) 39 | except OSError: 40 | pass 41 | 42 | 43 | @pytest.mark.asyncio 44 | async def test_s3_download_fileobj(s3_client, bucket_name, region): 45 | data = b'Hello World\n' 46 | await s3_client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration={'LocationConstraint': region}) 47 | await s3_client.put_object(Bucket=bucket_name, Key='test_file', Body=data) 48 | 49 | fh = BytesIO() 50 | await s3_client.download_fileobj(bucket_name, 'test_file', fh) 51 | 52 | fh.seek(0) 53 | assert fh.read() == data 54 | 55 | 56 | @pytest.mark.asyncio 57 | async def test_s3_download_fileobj_nonseekable_asyncwrite(s3_client, bucket_name, region): 58 | data = b'Hello World\n' 59 | await s3_client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration={'LocationConstraint': region}) 60 | await s3_client.put_object(Bucket=bucket_name, Key='test_file', Body=data) 61 | 62 | class FileObj: 63 | def __init__(self) -> None: 64 | self.data = b'' 65 | 66 | async def write(self, b: bytes) -> int: 67 | self.data += b 68 | return len(b) 69 | 70 | fh = FileObj() 71 | await s3_client.download_fileobj(bucket_name, 'test_file', fh) 72 | 73 | assert fh.data == data 74 | 75 | 76 | @pytest.mark.asyncio 77 | async def test_s3_download_fileobj_nonseekable_syncwrite(s3_client, bucket_name, region): 78 | data = b'Hello World\n' 79 | await s3_client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration={'LocationConstraint': region}) 80 | await s3_client.put_object(Bucket=bucket_name, Key='test_file', Body=data) 81 | 82 | class FileObj: 83 | def __init__(self) -> None: 84 | self.data = b'' 85 | 86 | def write(self, b: bytes) -> int: 87 | self.data += b 88 | return len(b) 89 | 90 | fh = FileObj() 91 | await s3_client.download_fileobj(bucket_name, 'test_file', fh) 92 | 93 | assert fh.data == data 94 | 95 | 96 | @pytest.mark.asyncio 97 | async def test_s3_download_file_404(s3_client, bucket_name, region): 98 | await s3_client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration={'LocationConstraint': region}) 99 | 100 | try: 101 | await s3_client.download_file(bucket_name, 'test_file', '/tmp/somefile') 102 | assert False, 'Fail, should of raised exception' 103 | except ClientError as err: 104 | assert err.response['Error']['Code'] == '404' 105 | 106 | 107 | @pytest.mark.asyncio 108 | async def test_s3_upload_fileobj(s3_client, bucket_name, region): 109 | data = b'Hello World\n' 110 | await s3_client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration={'LocationConstraint': region}) 111 | 112 | fh = BytesIO() 113 | fh.write(data) 114 | fh.seek(0) 115 | 116 | callbacks = [] 117 | 118 | def callback(bytes_sent): 119 | callbacks.append(bytes_sent) 120 | 121 | await s3_client.upload_fileobj(fh, bucket_name, 'test_file', Callback=callback) 122 | 123 | # We should of got 1 callback saying its written 12 bytes 124 | assert len(callbacks) == 1 125 | assert callbacks[0] == 12 126 | 127 | resp = await s3_client.get_object(Bucket=bucket_name, Key='test_file') 128 | assert (await resp['Body'].read()) == data 129 | 130 | 131 | def _count_running_tasks_excluding_current(): 132 | current = asyncio.current_task() 133 | return len([t for t in asyncio.all_tasks() if t is not current and not t.done() and not t.cancelled()]) 134 | 135 | 136 | @pytest.mark.asyncio 137 | async def test_s3_upload_fileobj_cancel(s3_client, bucket_name, region): 138 | before = _count_running_tasks_excluding_current() 139 | 140 | data = b"x" * 10_000_000 141 | await s3_client.create_bucket( 142 | Bucket=bucket_name, 143 | CreateBucketConfiguration={'LocationConstraint': region} 144 | ) 145 | 146 | fh = BytesIO(data) 147 | 148 | class SlowFakeFile: 149 | def __init__(self, fileobj): 150 | self.fileobj = fileobj 151 | 152 | async def read(self, size): 153 | await asyncio.sleep(0.3) 154 | return self.fileobj.read(size) 155 | 156 | slow_file = SlowFakeFile(fh) 157 | 158 | upload_task = asyncio.create_task( 159 | s3_client.upload_fileobj( 160 | slow_file, 161 | bucket_name, 162 | 'test_slow_file' 163 | ) 164 | ) 165 | 166 | await asyncio.sleep(0.3) 167 | 168 | upload_task.cancel() 169 | 170 | with pytest.raises(asyncio.CancelledError): 171 | await upload_task 172 | 173 | after = _count_running_tasks_excluding_current() 174 | assert before == after, "Task leak detected" 175 | 176 | 177 | @pytest.mark.asyncio 178 | async def test_s3_upload_empty_fileobj(s3_client, bucket_name, region): 179 | await s3_client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration={'LocationConstraint': region}) 180 | 181 | fh = BytesIO(b'') 182 | fh.seek(0) 183 | 184 | callbacks = [] 185 | 186 | def callback(bytes_sent): 187 | callbacks.append(bytes_sent) 188 | 189 | await s3_client.upload_fileobj(fh, bucket_name, 'test_file', Callback=callback) 190 | 191 | # We should of got 1 callback saying its written 12 bytes 192 | assert len(callbacks) == 1 193 | assert callbacks[0] == 0 194 | 195 | resp = await s3_client.get_object(Bucket=bucket_name, Key='test_file') 196 | assert len(await resp['Body'].read()) == 0 197 | 198 | 199 | @pytest.mark.asyncio 200 | async def test_s3_upload_fileobj_async(s3_client, bucket_name, region): 201 | await s3_client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration={'LocationConstraint': region}) 202 | 203 | data = b'Hello World\n' 204 | 205 | tmpfile = tempfile.NamedTemporaryFile(delete=False) 206 | tmpfile.close() 207 | async with aiofiles.open(tmpfile.name, mode='wb') as fpw: 208 | await fpw.write(data) 209 | 210 | async with aiofiles.open(tmpfile.name, mode='rb') as fpr: 211 | await s3_client.upload_fileobj(fpr, bucket_name, 'test_file') 212 | 213 | resp = await s3_client.get_object(Bucket=bucket_name, Key='test_file') 214 | assert (await resp['Body'].read()) == data 215 | 216 | 217 | @pytest.mark.asyncio 218 | async def test_s3_upload_fileobj_async_multipart(s3_client, bucket_name, region): 219 | await s3_client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration={'LocationConstraint': region}) 220 | 221 | data = b'Hello World\n' 222 | 223 | tmpfile = tempfile.NamedTemporaryFile(delete=False) 224 | tmpfile.close() 225 | async with aiofiles.open(tmpfile.name, mode='wb') as fpw: 226 | await fpw.write(data) 227 | 228 | async with aiofiles.open(tmpfile.name, mode='rb') as fpr: 229 | config = S3TransferConfig(multipart_threshold=4) 230 | await s3_client.upload_fileobj(fpr, bucket_name, 'test_file', Config=config) 231 | 232 | resp = await s3_client.get_object(Bucket=bucket_name, Key='test_file') 233 | assert (await resp['Body'].read()) == data 234 | 235 | @pytest.mark.parametrize('checksum_algo', ['CRC32', 'SHA1', None]) 236 | @pytest.mark.asyncio 237 | async def test_s3_upload_fileobj_async_multipart_completes_with_checksum_on_parts( 238 | s3_client, bucket_name, region, checksum_algo): 239 | """This test verifies that when performing a multipart upload with a checksum algorithm: 240 | 1. Each uploaded part includes the specified checksum type (e.g. CRC32 or SHA1) 241 | 2. The complete_multipart_upload call receives all part checksums correctly 242 | 243 | Note that moto does not use checksums properly, hence unittest.mock was used to 244 | test the call args of `complete_multipart_upload` 245 | """ 246 | await s3_client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration={'LocationConstraint': region}) 247 | 248 | data = b'Hello World\n' 249 | 250 | tmpfile = tempfile.NamedTemporaryFile(delete=False) 251 | tmpfile.close() 252 | async with aiofiles.open(tmpfile.name, mode='wb') as fpw: 253 | await fpw.write(data) 254 | 255 | mock_complete_multipart_upload = AsyncMock() 256 | s3_client.complete_multipart_upload = mock_complete_multipart_upload 257 | async with aiofiles.open(tmpfile.name, mode='rb') as fpr: 258 | config = S3TransferConfig(multipart_threshold=4) 259 | 260 | upload_fileobj_kwargs = {} 261 | if checksum_algo: 262 | upload_fileobj_kwargs = {'ExtraArgs': {'ChecksumAlgorithm': checksum_algo}} 263 | await s3_client.upload_fileobj(fpr, bucket_name, 'test_file', Config=config, **upload_fileobj_kwargs) 264 | 265 | mock_complete_multipart_upload.assert_called_once() 266 | args, kwargs = mock_complete_multipart_upload.call_args 267 | parts = kwargs['MultipartUpload']['Parts'] 268 | if checksum_algo: 269 | expected_checksum_key = 'Checksum' + checksum_algo 270 | for part in parts: 271 | assert expected_checksum_key in part 272 | else: 273 | for part in parts: 274 | for key in part: 275 | assert not key.startswith('Checksum') 276 | 277 | 278 | 279 | @pytest.mark.asyncio 280 | async def test_s3_upload_fileobj_async_slow(s3_client, bucket_name, region): 281 | await s3_client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration={'LocationConstraint': region}) 282 | 283 | data = b'Hello World\n' 284 | 285 | class FakeFile: 286 | def __init__(self, filedata: bytes) -> None: 287 | self._data = filedata 288 | 289 | async def read(self, numbytes: int) -> bytes: 290 | result = self._data[:5] 291 | self._data = self._data[5:] 292 | return result 293 | 294 | await s3_client.upload_fileobj(FakeFile(data), bucket_name, 'test_file') 295 | 296 | resp = await s3_client.get_object(Bucket=bucket_name, Key='test_file') 297 | assert (await resp['Body'].read()) == data 298 | 299 | 300 | @pytest.mark.asyncio 301 | async def test_s3_upload_broken_fileobj(s3_client, bucket_name, region): 302 | class BrokenFile(object): 303 | def __init__(self, data: bytes): 304 | self._data = data 305 | 306 | def read(self, count): 307 | raise IOError("some bad file") 308 | 309 | await s3_client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration={'LocationConstraint': region}) 310 | 311 | fh = BrokenFile(b'Hello World\n') 312 | try: 313 | await s3_client.upload_fileobj(fh, bucket_name, 'test_file') 314 | except Exception as err: 315 | print() 316 | 317 | uploads_resps = await s3_client.list_multipart_uploads(Bucket=bucket_name) 318 | assert len(uploads_resps.get('Uploads', [])) == 0 319 | 320 | 321 | @pytest.mark.asyncio 322 | async def test_s3_upload_fileobj_with_transform(s3_client, bucket_name, region): 323 | data = b'Hello World\n' 324 | await s3_client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration={'LocationConstraint': region}) 325 | 326 | fh = BytesIO() 327 | fh.write(data) 328 | fh.seek(0) 329 | 330 | processing = lambda x: x.lower() 331 | 332 | await s3_client.upload_fileobj(fh, bucket_name, 'test_file', Processing=processing) 333 | 334 | resp = await s3_client.get_object(Bucket=bucket_name, Key='test_file') 335 | assert (await resp['Body'].read()) == data.lower() 336 | 337 | 338 | @pytest.mark.asyncio 339 | async def test_s3_upload_file(s3_client, bucket_name, region): 340 | data = b'Hello World\n' 341 | filename = '/tmp/aioboto3_temp_s3_upload.txt' 342 | await s3_client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration={'LocationConstraint': region}) 343 | 344 | open(filename, 'wb').write(data) 345 | 346 | await s3_client.upload_file(filename, bucket_name, 'test_file') 347 | 348 | resp = await s3_client.get_object(Bucket=bucket_name, Key='test_file') 349 | assert (await resp['Body'].read()) == data 350 | 351 | 352 | @pytest.mark.asyncio 353 | async def test_s3_copy(s3_client, bucket_name, region): 354 | data = b'Hello World\n' 355 | 356 | filename = '/tmp/aioboto3_temp_s3_upload.txt' 357 | await s3_client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration={'LocationConstraint': region}) 358 | 359 | # Upload file 360 | open(filename, 'wb').write(data) 361 | await s3_client.upload_file(filename, bucket_name, 'test_file') 362 | 363 | # Copy file 364 | copy_source = {'Bucket': bucket_name, 'Key': 'test_file'} 365 | await s3_client.copy(copy_source, bucket_name, 'test_file2') 366 | 367 | # Get copied file 368 | resp = await s3_client.get_object(Bucket=bucket_name, Key='test_file2') 369 | assert (await resp['Body'].read()) == data 370 | 371 | 372 | @pytest.mark.asyncio 373 | async def test_s3_copy_multipart(s3_client, bucket_name, region): 374 | data = b'Hello World\n' 375 | 376 | filename = '/tmp/aioboto3_temp_s3_upload.txt' 377 | await s3_client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration={'LocationConstraint': region}) 378 | 379 | # Upload file 380 | open(filename, 'wb').write(data) 381 | await s3_client.upload_file(filename, bucket_name, 'test_file') 382 | 383 | # Copy file 384 | copy_source = {'Bucket': bucket_name, 'Key': 'test_file'} 385 | config = S3TransferConfig(multipart_threshold=4) 386 | await s3_client.copy(copy_source, bucket_name, 'test_file2', Config=config, ExtraArgs={'RequestPayer': 'requester'}) 387 | 388 | # Get copied file 389 | resp = await s3_client.get_object(Bucket=bucket_name, Key='test_file2') 390 | assert (await resp['Body'].read()) == data 391 | 392 | 393 | @pytest.mark.asyncio 394 | async def test_s3_copy_from(s3_client, s3_resource, bucket_name, region): 395 | data = b'Hello World\n' 396 | await s3_client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration={'LocationConstraint': region}) 397 | 398 | fh = BytesIO() 399 | fh.write(data) 400 | fh.seek(0) 401 | 402 | await s3_client.upload_fileobj(fh, bucket_name, 'test_file') 403 | 404 | resource = await s3_resource.Object(bucket_name, "new_test_file") 405 | copy_source = bucket_name + "/test_file" 406 | copy_result = await resource.copy_from(CopySource=copy_source) 407 | assert 'CopyObjectResult' in copy_result 408 | 409 | resp = await s3_client.get_object(Bucket=bucket_name, Key='new_test_file') 410 | assert (await resp['Body'].read()) == data 411 | 412 | 413 | @pytest.mark.asyncio 414 | async def test_s3_resource_objects_all(s3_client, s3_resource, bucket_name, region): 415 | await s3_client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration={'LocationConstraint': region}) 416 | files_to_create = {'test/file1', 'test2/file1', 'test2/file2'} 417 | for file in files_to_create: 418 | await s3_client.put_object(Bucket=bucket_name, Key=file, Body=b'Hello World\n') 419 | 420 | files = [] 421 | bucket = await s3_resource.Bucket(bucket_name) 422 | async for item in bucket.objects.all(): 423 | files.append(item.key) 424 | 425 | assert len(files) == len(files_to_create) 426 | assert set(files) == files_to_create 427 | 428 | 429 | @pytest.mark.asyncio 430 | async def test_s3_resource_objects_filter(s3_client, s3_resource, bucket_name, region): 431 | await s3_client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration={'LocationConstraint': region}) 432 | files_to_create = {'test/file1', 'test2/file1', 'test2/file2'} 433 | for file in files_to_create: 434 | await s3_client.put_object(Bucket=bucket_name, Key=file, Body=b'Hello World\n') 435 | 436 | files = [] 437 | bucket = await s3_resource.Bucket(bucket_name) 438 | async for item in bucket.objects.filter(Prefix='test2/'): 439 | files.append(item.key) 440 | 441 | assert len(files) == 2 442 | assert all([file.startswith('test2/') for file in files]) 443 | 444 | 445 | @pytest.mark.asyncio 446 | async def test_s3_resource_objects_delete(s3_client, s3_resource, bucket_name, region): 447 | await s3_client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration={'LocationConstraint': region}) 448 | files_to_create = {'test/file1', 'test2/file1', 'test2/file2'} 449 | for file in files_to_create: 450 | await s3_client.put_object(Bucket=bucket_name, Key=file, Body=b'Hello World\n') 451 | 452 | bucket = await s3_resource.Bucket(bucket_name) 453 | await bucket.objects.all().delete() 454 | 455 | files = [] 456 | async for item in bucket.objects.all(): 457 | files.append(item.key) 458 | 459 | assert not files 460 | 461 | 462 | @pytest.mark.asyncio 463 | async def test_s3_resource_objects_delete_filter(s3_client, s3_resource, bucket_name, region): 464 | await s3_client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration={'LocationConstraint': region}) 465 | files_to_create = {'test/file1', 'test2/file1', 'test2/file2'} 466 | for file in files_to_create: 467 | await s3_client.put_object(Bucket=bucket_name, Key=file, Body=b'Hello World\n') 468 | 469 | bucket = await s3_resource.Bucket(bucket_name) 470 | await bucket.objects.filter(Prefix='test2/').delete() 471 | 472 | files = [] 473 | async for item in bucket.objects.all(): 474 | files.append(item.key) 475 | 476 | assert len(files) == 1 477 | assert files[0] == 'test/file1' 478 | 479 | 480 | @pytest.mark.asyncio 481 | async def test_s3_object_summary_load(s3_client, s3_resource, bucket_name, region): 482 | data = b'Hello World\n' 483 | await s3_client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration={'LocationConstraint': region}) 484 | await s3_client.put_object(Bucket=bucket_name, Key='test_file', Body=data) 485 | 486 | obj = await s3_resource.ObjectSummary(bucket_name, 'test_file') 487 | obj_size = await obj.size 488 | assert obj_size == len(data) 489 | 490 | 491 | @pytest.mark.asyncio 492 | async def test_s3_bucket_creation_date(s3_client, s3_resource, bucket_name, region): 493 | await s3_client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration={'LocationConstraint': region}) 494 | 495 | bucket = await s3_resource.Bucket(bucket_name) 496 | creation_date = await bucket.creation_date 497 | assert isinstance(creation_date, datetime.datetime) 498 | --------------------------------------------------------------------------------