├── .builder └── actions │ └── mock_server_setup.py ├── .clang-format ├── .clang-tidy ├── .gitattributes ├── .github ├── .codecov.yml ├── ISSUE_TEMPLATE │ ├── bug-report.yml │ ├── config.yml │ ├── documentation.yml │ └── feature-request.yml ├── PULL_REQUEST_TEMPLATE.md └── workflows │ ├── ci.yml │ ├── clang-format.yml │ ├── closed-issue-message.yml │ ├── codecov.yml │ ├── handle-stale-discussions.yml │ ├── issue-regression-labeler.yml │ └── stale_issue.yml ├── .gitignore ├── CMakeLists.txt ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── NOTICE ├── README.md ├── builder.json ├── cmake └── aws-c-s3-config.cmake ├── docs ├── GetObject.md ├── images │ └── GetObjectFlow.svg └── memory_aware_request_execution.md ├── format-check.py ├── include └── aws │ └── s3 │ ├── exports.h │ ├── private │ ├── s3_auto_ranged_get.h │ ├── s3_auto_ranged_put.h │ ├── s3_checksums.h │ ├── s3_client_impl.h │ ├── s3_copy_object.h │ ├── s3_default_buffer_pool.h │ ├── s3_default_meta_request.h │ ├── s3_endpoint_resolver.h │ ├── s3_list_objects.h │ ├── s3_list_parts.h │ ├── s3_meta_request_impl.h │ ├── s3_paginator.h │ ├── s3_parallel_input_stream.h │ ├── s3_platform_info.h │ ├── s3_request.h │ ├── s3_request_messages.h │ ├── s3_util.h │ └── s3express_credentials_provider_impl.h │ ├── s3.h │ ├── s3_buffer_pool.h │ ├── s3_client.h │ ├── s3_endpoint_resolver.h │ └── s3express_credentials_provider.h ├── samples ├── CMakeLists.txt └── s3 │ ├── CMakeLists.txt │ ├── app_ctx.h │ ├── cli_progress_bar.c │ ├── cli_progress_bar.h │ ├── main.c │ ├── s3-cp.c │ ├── s3-ls.c │ └── s3-platform_info.c ├── scripts └── update_s3_endpoint_resolver_artifacts.py ├── source ├── s3.c ├── s3_auto_ranged_get.c ├── s3_auto_ranged_put.c ├── s3_buffer_pool.c ├── s3_checksum_stream.c ├── s3_checksums.c ├── s3_chunk_stream.c ├── s3_client.c ├── s3_copy_object.c ├── s3_default_buffer_pool.c ├── s3_default_meta_request.c ├── s3_endpoint.c ├── s3_endpoint_resolver │ ├── aws_s3_endpoint_resolver_partition.c │ ├── aws_s3_endpoint_rule_set.c │ └── s3_endpoint_resolver.c ├── s3_list_objects.c ├── s3_list_parts.c ├── s3_meta_request.c ├── s3_paginator.c ├── s3_parallel_input_stream.c ├── s3_platform_info.c ├── s3_request.c ├── s3_request_messages.c ├── s3_util.c └── s3express_credentials_provider.c └── tests ├── CMakeLists.txt ├── mock_s3_server ├── AbortMultipartUpload │ └── default.json ├── CompleteMultipartUpload │ ├── async_access_denied_error.json │ ├── async_internal_error.json │ ├── default.json │ └── sse_kms.json ├── CreateMultipartUpload │ ├── default.json │ ├── request_time_too_skewed.json │ └── request_timeout.json ├── CreateSession │ ├── default.json │ └── sse_kms.json ├── GetObject │ ├── default.json │ ├── get_object_checksum_retry.json │ ├── get_object_delay_60s.json │ ├── get_object_invalid_response_missing_content_range.json │ ├── get_object_invalid_response_missing_etags.json │ ├── get_object_long_error.json │ ├── get_object_modified_failure.json │ ├── get_object_modified_first_part.json │ ├── get_object_modified_success.json │ └── get_object_unmatch_checksum_crc32.json ├── ListParts │ ├── default.json │ ├── multiple_list_parts_1.json │ ├── multiple_list_parts_2.json │ └── resume_first_part_not_completed.json ├── README.md ├── UploadPart │ ├── default.json │ ├── missing_etag.json │ └── throttle.json └── mock_s3_server.py ├── s3_asyncwrite_tests.c ├── s3_cancel_tests.c ├── s3_checksum_stream_test.c ├── s3_checksums_crc32_tests.c ├── s3_checksums_crc32c_tests.c ├── s3_checksums_crc64nvme_tests.c ├── s3_checksums_sha1_tests.c ├── s3_checksums_sha256_tests.c ├── s3_checksums_test_case_helper.h ├── s3_client_test.c ├── s3_data_plane_tests.c ├── s3_default_buffer_pool_tests.c ├── s3_endpoint_resolver_tests.c ├── s3_endpoint_tests.c ├── s3_list_objects_tests.c ├── s3_many_async_uploads_without_data_test.c ├── s3_meta_request_test.c ├── s3_mock_server_s3express_provider_test.c ├── s3_mock_server_tests.c ├── s3_parallel_read_stream_test.c ├── s3_platform_info_test.c ├── s3_request_messages_tests.c ├── s3_retry_tests.c ├── s3_s3express_client_test.c ├── s3_test_input_stream.c ├── s3_test_parallel_stream.c ├── s3_tester.c ├── s3_tester.h ├── s3_util_tests.c └── test_helper ├── README.md └── test_helper.py /.builder/actions/mock_server_setup.py: -------------------------------------------------------------------------------- 1 | """ 2 | Setup local mock server for tests 3 | """ 4 | 5 | import Builder 6 | 7 | import os 8 | import sys 9 | import subprocess 10 | import atexit 11 | 12 | 13 | class MockServerSetup(Builder.Action): 14 | """ 15 | Set up this machine for running the mock server test 16 | 17 | This action should be run in the 'pre_build_steps' or 'build_steps' stage. 18 | """ 19 | 20 | def run(self, env): 21 | if not env.project.needs_tests(env): 22 | print("Skipping mock server setup because tests disabled for project") 23 | return 24 | 25 | self.env = env 26 | python_path = sys.executable 27 | # install dependency for mock server 28 | self.env.shell.exec(python_path, 29 | '-m', 'pip', 'install', 'h11', 'trio', 'proxy.py', check=True) 30 | # check the deps can be import correctly 31 | self.env.shell.exec(python_path, 32 | '-c', 'import h11, trio', check=True) 33 | 34 | # set cmake flag so mock server tests are enabled 35 | env.project.config['cmake_args'].extend( 36 | ['-DENABLE_MOCK_SERVER_TESTS=ON', '-DASSERT_LOCK_HELD=ON']) 37 | 38 | base_dir = os.path.dirname(os.path.realpath(__file__)) 39 | dir = os.path.join(base_dir, "..", "..", "tests", "mock_s3_server") 40 | 41 | p1 = subprocess.Popen([python_path, "mock_s3_server.py"], cwd=dir) 42 | try: 43 | p2 = subprocess.Popen("proxy", cwd=dir) 44 | except Exception as e: 45 | # Okay for proxy to fail starting up as it may not be in the path 46 | print(e) 47 | p2 = None 48 | 49 | @atexit.register 50 | def close_mock_server(): 51 | p1.terminate() 52 | if p2 != None: 53 | p2.terminate() 54 | -------------------------------------------------------------------------------- /.clang-format: -------------------------------------------------------------------------------- 1 | --- 2 | Language: Cpp 3 | # BasedOnStyle: Mozilla 4 | AlignAfterOpenBracket: AlwaysBreak 5 | AlignConsecutiveAssignments: false 6 | AlignConsecutiveDeclarations: false 7 | AlignEscapedNewlines: Right 8 | AlignOperands: true 9 | AlignTrailingComments: true 10 | AllowAllParametersOfDeclarationOnNextLine: false 11 | AllowShortBlocksOnASingleLine: false 12 | AllowShortCaseLabelsOnASingleLine: false 13 | AllowShortFunctionsOnASingleLine: Inline 14 | AllowShortIfStatementsOnASingleLine: false 15 | AllowShortLoopsOnASingleLine: false 16 | AlwaysBreakAfterReturnType: None 17 | AlwaysBreakBeforeMultilineStrings: false 18 | BinPackArguments: false 19 | BinPackParameters: false 20 | BreakBeforeBinaryOperators: None 21 | BreakBeforeBraces: Attach 22 | BreakBeforeTernaryOperators: true 23 | BreakStringLiterals: true 24 | ColumnLimit: 120 25 | ContinuationIndentWidth: 4 26 | DerivePointerAlignment: false 27 | IncludeBlocks: Preserve 28 | IndentCaseLabels: true 29 | IndentPPDirectives: AfterHash 30 | IndentWidth: 4 31 | IndentWrappedFunctionNames: true 32 | KeepEmptyLinesAtTheStartOfBlocks: true 33 | MacroBlockBegin: '' 34 | MacroBlockEnd: '' 35 | MaxEmptyLinesToKeep: 1 36 | PenaltyBreakAssignment: 2 37 | PenaltyBreakBeforeFirstCallParameter: 19 38 | PenaltyBreakComment: 300 39 | PenaltyBreakFirstLessLess: 120 40 | PenaltyBreakString: 1000 41 | PenaltyExcessCharacter: 1000000 42 | PenaltyReturnTypeOnItsOwnLine: 100000 43 | PointerAlignment: Right 44 | ReflowComments: true 45 | SortIncludes: true 46 | SpaceAfterCStyleCast: false 47 | SpaceBeforeAssignmentOperators: true 48 | SpaceBeforeParens: ControlStatements 49 | SpaceInEmptyParentheses: false 50 | SpacesInContainerLiterals: true 51 | SpacesInCStyleCastParentheses: false 52 | SpacesInParentheses: false 53 | SpacesInSquareBrackets: false 54 | Standard: Cpp11 55 | TabWidth: 4 56 | UseTab: Never 57 | ... 58 | 59 | -------------------------------------------------------------------------------- /.clang-tidy: -------------------------------------------------------------------------------- 1 | --- 2 | Checks: 'clang-diagnostic-*,clang-analyzer-*,readability-*,modernize-*,bugprone-*,misc-*,google-runtime-int,llvm-header-guard,fuchsia-restrict-system-includes,-clang-analyzer-valist.Uninitialized,-clang-analyzer-security.insecureAPI.rand,-clang-analyzer-alpha.*,-readability-magic-numbers,-readability-non-const-parameter,-readability-isolate-declaration' 3 | WarningsAsErrors: '*' 4 | HeaderFilterRegex: '.*(? 79 | $) 80 | 81 | aws_use_package(aws-c-auth) 82 | aws_use_package(aws-checksums) 83 | 84 | target_link_libraries(${PROJECT_NAME} PUBLIC ${DEP_AWS_LIBS}) 85 | 86 | aws_prepare_shared_lib_exports(${PROJECT_NAME}) 87 | 88 | install(FILES ${AWS_S3_ROOT_HEADERS} DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/aws/s3" COMPONENT Development) 89 | 90 | if (BUILD_SHARED_LIBS) 91 | set (TARGET_DIR "shared") 92 | else() 93 | set (TARGET_DIR "static") 94 | endif() 95 | 96 | install(EXPORT "${PROJECT_NAME}-targets" 97 | DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}/${TARGET_DIR}/" 98 | NAMESPACE AWS:: 99 | COMPONENT Development) 100 | 101 | configure_file("cmake/${PROJECT_NAME}-config.cmake" 102 | "${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-config.cmake" 103 | @ONLY) 104 | 105 | install(FILES "${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}-config.cmake" 106 | DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}/" 107 | COMPONENT Development) 108 | 109 | include(CTest) 110 | if (BUILD_TESTING) 111 | add_subdirectory(tests) 112 | if (NOT BYO_CRYPTO AND NOT CMAKE_CROSSCOMPILING) 113 | add_subdirectory(samples) 114 | endif() 115 | endif() 116 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | ## Code of Conduct 2 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 3 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 4 | opensource-codeofconduct@amazon.com with any additional questions or comments. 5 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing Guidelines 2 | 3 | Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional 4 | documentation, we greatly value feedback and contributions from our community. 5 | 6 | Please read through this document before submitting any issues or pull requests to ensure we have all the necessary 7 | information to effectively respond to your bug report or contribution. 8 | 9 | 10 | ## Reporting Bugs/Feature Requests 11 | 12 | We welcome you to use the GitHub issue tracker to report bugs or suggest features. 13 | 14 | When filing an issue, please check [existing open](https://github.com/awslabs/aws-c-s3/issues), or [recently closed](https://github.com/awslabs/aws-c-s3/issues?utf8=%E2%9C%93&q=is%3Aissue%20is%3Aclosed%20), issues to make sure somebody else hasn't already 15 | reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: 16 | 17 | * A reproducible test case or series of steps 18 | * The version of our code being used 19 | * Any modifications you've made relevant to the bug 20 | * Anything unusual about your environment or deployment 21 | 22 | 23 | ## Contributing via Pull Requests 24 | Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: 25 | 26 | 1. You are working against the latest source on the *main* branch. 27 | 2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. 28 | 3. You open an issue to discuss any significant work - we would hate for your time to be wasted. 29 | 30 | To send us a pull request, please: 31 | 32 | 1. Fork the repository. 33 | 2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. 34 | 3. Ensure local tests pass. 35 | 4. Commit to your fork using clear commit messages. 36 | 5. Send us a pull request, answering any default questions in the pull request interface. 37 | 6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. 38 | 39 | GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and 40 | [creating a pull request](https://help.github.com/articles/creating-a-pull-request/). 41 | 42 | 43 | ## Finding contributions to work on 44 | Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any ['help wanted'](https://github.com/awslabs/aws-c-s3/labels/help%20wanted) issues is a great place to start. 45 | 46 | 47 | ## Code of Conduct 48 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 49 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 50 | opensource-codeofconduct@amazon.com with any additional questions or comments. 51 | 52 | 53 | ## Security issue notifications 54 | If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. 55 | 56 | 57 | ## Licensing 58 | 59 | See the [LICENSE](https://github.com/awslabs/aws-c-s3/blob/main/LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. 60 | 61 | We may ask you to sign a [Contributor License Agreement (CLA)](http://en.wikipedia.org/wiki/Contributor_License_Agreement) for larger changes. 62 | -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## AWS C S3 2 | 3 | The AWS-C-S3 library is an asynchronous AWS S3 client focused on maximizing throughput and network utilization. 4 | 5 | ### Key features: 6 | - **Automatic Request Splitting**: Improves throughput by automatically splitting the request into part-sized chunks and performing parallel uploads/downloads of these chunks over multiple connections. There's a cap on the throughput of single S3 connection, the only way to go faster is multiple parallel connections. 7 | - **Automatic Retries**: Increases resilience by retrying individual failed chunks of a file transfer, eliminating the need to restart transfers from scratch after an intermittent error. 8 | - **DNS Load Balancing**: DNS resolver continuously harvests Amazon S3 IP addresses. When load is spread across the S3 fleet, overall throughput more reliable than if all connections are going to a single IP. 9 | - **Advanced Network Management**: The client incorporates automatic request parallelization, effective timeouts and retries, and efficient connection reuse. This approach helps to maximize throughput and network utilization, and to avoid network overloads. 10 | - **Thread Pools and Async I/O**: Avoids bottlenecks associated with single-thread processing. 11 | - **Parallel Reads**: When uploading a large file from disk, reads from multiple parts of the file in parallel. This is faster than reading the file sequentially from beginning to end. 12 | 13 | ### Documentation 14 | 15 | - [GetObject](docs/GetObject.md): A visual representation of the GetObject request flow. 16 | - [Memory Aware Requests Execution](docs/memory_aware_request_execution.md): An in-depth guide on optimizing memory usage during request executions. 17 | 18 | ## License 19 | 20 | This library is licensed under the Apache 2.0 License. 21 | 22 | ## Usage 23 | 24 | ### Building 25 | 26 | CMake 3.9+ is required to build. 27 | 28 | `` must be an absolute path in the following instructions. 29 | 30 | #### Linux-Only Dependencies 31 | 32 | If you are building on Linux, you will need to build aws-lc and s2n-tls first. 33 | 34 | ``` 35 | git clone git@github.com:aws/aws-lc.git 36 | cmake -S aws-lc -B aws-lc/build -DCMAKE_INSTALL_PREFIX= 37 | cmake --build aws-lc/build --target install 38 | 39 | git clone git@github.com:aws/s2n-tls.git 40 | cmake -S s2n-tls -B s2n-tls/build -DCMAKE_INSTALL_PREFIX= -DCMAKE_PREFIX_PATH= 41 | cmake --build s2n-tls/build --target install 42 | ``` 43 | 44 | #### Building aws-c-s3 and Remaining Dependencies 45 | 46 | ``` 47 | git clone git@github.com:awslabs/aws-c-common.git 48 | cmake -S aws-c-common -B aws-c-common/build -DCMAKE_INSTALL_PREFIX= 49 | cmake --build aws-c-common/build --target install 50 | 51 | git clone git@github.com:awslabs/aws-checksums.git 52 | cmake -S aws-checksums -B aws-checksums/build -DCMAKE_INSTALL_PREFIX= -DCMAKE_PREFIX_PATH= 53 | cmake --build aws-checksums/build --target install 54 | 55 | git clone git@github.com:awslabs/aws-c-cal.git 56 | cmake -S aws-c-cal -B aws-c-cal/build -DCMAKE_INSTALL_PREFIX= -DCMAKE_PREFIX_PATH= 57 | cmake --build aws-c-cal/build --target install 58 | 59 | git clone git@github.com:awslabs/aws-c-io.git 60 | cmake -S aws-c-io -B aws-c-io/build -DCMAKE_INSTALL_PREFIX= -DCMAKE_PREFIX_PATH= 61 | cmake --build aws-c-io/build --target install 62 | 63 | git clone git@github.com:awslabs/aws-c-compression.git 64 | cmake -S aws-c-compression -B aws-c-compression/build -DCMAKE_INSTALL_PREFIX= -DCMAKE_PREFIX_PATH= 65 | cmake --build aws-c-compression/build --target install 66 | 67 | git clone git@github.com:awslabs/aws-c-http.git 68 | cmake -S aws-c-http -B aws-c-http/build -DCMAKE_INSTALL_PREFIX= -DCMAKE_PREFIX_PATH= 69 | cmake --build aws-c-http/build --target install 70 | 71 | git clone git@github.com:awslabs/aws-c-sdkutils.git 72 | cmake -S aws-c-sdkutils -B aws-c-sdkutils/build -DCMAKE_INSTALL_PREFIX= -DCMAKE_PREFIX_PATH= 73 | cmake --build aws-c-sdkutils/build --target install 74 | 75 | git clone git@github.com:awslabs/aws-c-auth.git 76 | cmake -S aws-c-auth -B aws-c-auth/build -DCMAKE_INSTALL_PREFIX= -DCMAKE_PREFIX_PATH= 77 | cmake --build aws-c-auth/build --target install 78 | 79 | git clone git@github.com:awslabs/aws-c-s3.git 80 | cmake -S aws-c-s3 -B aws-c-s3/build -DCMAKE_INSTALL_PREFIX= -DCMAKE_PREFIX_PATH= 81 | cmake --build aws-c-s3/build --target install 82 | ``` 83 | 84 | #### Running S3 sample 85 | 86 | After installing all the dependencies, and building aws-c-s3, you can run the sample directly from the s3 build directory. 87 | 88 | To download: 89 | ``` 90 | aws-c-s3/build/samples/s3/s3 cp s3:/// --region 91 | ``` 92 | To upload: 93 | ``` 94 | aws-c-s3/build/samples/s3/s3 cp s3:/// --region 95 | ``` 96 | To list objects: 97 | ``` 98 | aws-c-s3/build/samples/s3/s3 ls s3:// --region 99 | ``` 100 | 101 | ## Testing 102 | 103 | The unit tests require an AWS account with S3 buckets set up in a particular way. 104 | Use the [test_helper script](./tests/test_helper/) to set this up. 105 | -------------------------------------------------------------------------------- /builder.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "aws-c-s3", 3 | "targets": { 4 | "android": { 5 | "enabled": false, 6 | "_comment": "disabled until we need to support it. LibCrypto needs to be configured on build machine." 7 | } 8 | }, 9 | "upstream": [ 10 | { "name": "aws-c-auth" }, 11 | { "name": "aws-c-http" }, 12 | { "name": "aws-checksums" }, 13 | { "name": "aws-c-sdkutils"} 14 | ], 15 | "downstream": [ 16 | ], 17 | "test_steps": [ 18 | "test" 19 | ], 20 | "+cmake_args": [ 21 | "-DENABLE_MRAP_TESTS=ON" 22 | ], 23 | "pre_build_steps": ["mock-server-setup"] 24 | } 25 | -------------------------------------------------------------------------------- /cmake/aws-c-s3-config.cmake: -------------------------------------------------------------------------------- 1 | include(CMakeFindDependencyMacro) 2 | 3 | find_dependency(aws-c-auth) 4 | find_dependency(aws-c-http) 5 | find_dependency(aws-checksums) 6 | 7 | macro(aws_load_targets type) 8 | include(${CMAKE_CURRENT_LIST_DIR}/${type}/@PROJECT_NAME@-targets.cmake) 9 | endmacro() 10 | 11 | # try to load the lib follow BUILD_SHARED_LIBS. Fall back if not exist. 12 | if (BUILD_SHARED_LIBS) 13 | if (EXISTS "${CMAKE_CURRENT_LIST_DIR}/shared") 14 | aws_load_targets(shared) 15 | else() 16 | aws_load_targets(static) 17 | endif() 18 | else() 19 | if (EXISTS "${CMAKE_CURRENT_LIST_DIR}/static") 20 | aws_load_targets(static) 21 | else() 22 | aws_load_targets(shared) 23 | endif() 24 | endif() 25 | -------------------------------------------------------------------------------- /docs/GetObject.md: -------------------------------------------------------------------------------- 1 | # GetObject 2 | 3 | ## Overview 4 | The `GetObject` is used to download objects from Amazon S3. Optimized for throughput, the CRT S3 client enhances performance and reliability by parallelizing multiple part-sized `GetObject` with range requests. 5 | 6 | ## Flow Diagram 7 | Below is the typical flow of a GetObject request made by the user. 8 | 9 | ![GetObject Flow Diagram](images/GetObjectFlow.svg) 10 | -------------------------------------------------------------------------------- /docs/memory_aware_request_execution.md: -------------------------------------------------------------------------------- 1 | CRT S3 client was designed with throughput as a primary goal. As such, the client 2 | scales resource usage, such as number of parallel requests in flight, to achieve 3 | target throughput. The client creates buffers to hold data it is sending or 4 | receiving for each request and scaling requests in flight has direct impact on 5 | memory used. In practice, setting high target throughput or larger part size can 6 | lead to high observed memory usage. 7 | 8 | To mitigate high memory usages, memory reuse improvements were added to 9 | the client along with options to limit max memory used. The following sections 10 | will go into more detail on aspects of those changes and how the affect the 11 | client. 12 | 13 | ### Memory Reuse 14 | At the basic level, CRT S3 client starts with a meta request for operation like 15 | put or get, breaks it into smaller part-sized requests and executes those in 16 | parallel. CRT S3 client used to allocate part sized buffer for each of those 17 | requests and release it right after the request was done. That approach, 18 | resulted in a lot of very short lived allocations and allocator thrashing, 19 | overall leading to memory use spikes considerably higher than whats needed. To 20 | address that, the client is switching to a pooled buffer approach, discussed 21 | below. 22 | 23 | Note: approach described below is work in progress and concentrates on improving 24 | the common cases (default 8mb part sizes and part sizes smaller than 64mb). 25 | 26 | Several observations about the client usage of buffers: 27 | - Client does not automatically switch to buffers above default 8mb for upload, until 28 | upload passes 10,000 parts (~80 GB). 29 | - Get operations always use either the configured part size or default of 8mb. 30 | Part size for get is not adjusted, since there is no 10,000 part limitation. 31 | - Both Put and Get operations go through fill and drain phases. Ex. for Put, the 32 | client first schedules a number of reads to 'fill' the buffers from the source 33 | and as those reads complete, the buffer are send over to the networking layer 34 | are 'drained' 35 | - individual uploadParts or ranged gets operations typically have a similar 36 | lifespan (with some caveats). in practice part buffers are acquired/released 37 | in bulk at the same time 38 | 39 | The buffer pooling takes advantage of some of those allocation patterns and 40 | works as follows. 41 | The memory is split into primary and secondary areas. Secondary area is used for 42 | requests with part size bigger than a predefined value (currently 4 times part size) 43 | allocations from it got directly to allocator and are effectively old way of 44 | doing things. 45 | 46 | Primary memory area is split into blocks of fixed size (part size if defined or 47 | 8mb if not times 16). Blocks are allocated on demand. Each block is logically 48 | subdivided into part sized chunks. Pool allocates and releases in chunk sizes 49 | only, and supports acquiring several chunks (up to 4) at once. 50 | 51 | Blocks are kept around while there are ongoing requests and are released async, 52 | when there is low pressure on memory. 53 | 54 | One complication is "forced" buffers. A forced buffer is one that 55 | comes from primary or secondary storage as usual, but it is allowed to exceed 56 | the memory limit. Forced buffers are only used when waiting for a normal ticket 57 | reservation could cause deadlock. (At time of writing, they're only used for 58 | async-writes) 59 | 60 | ### Scheduling 61 | Running out of memory is a terminal condition within CRT and in general its not 62 | practical to try to set overall memory limit on all allocations, since it 63 | dramatically increases the complexity of the code that deals with cases where 64 | only part of a memory was allocated for a task. 65 | 66 | Comparatively, majority of memory usage within S3 Client comes from buffers 67 | allocated for Put/Get parts. So to control memory usage, the client will 68 | concentrate on controlling the number of buffers allocated. Effectively, this 69 | boils down to a back pressure mechanism of limiting the number of parts 70 | scheduled as memory gets closer to the limit. Memory used for other resources, 71 | ex. http connections data, various supporting structures, are not actively 72 | controlled and instead some memory is taken out from overall limit. 73 | 74 | Overall, scheduling does a best-effort memory limiting. At the time of 75 | scheduling, the client reserves memory by using buffer pool ticketing mechanism. 76 | Buffer is acquired from the pool using the ticket as close to the usage as 77 | possible (this approach peaks at lower mem usage than preallocating all mem 78 | upfront because buffers cannot be used right away, ex reading from file will 79 | fill buffers slower than they are sent, leading to decent amount of buffer reuse) 80 | Reservation mechanism is approximate and in some cases can lead to actual memory 81 | usage being higher once tickets are redeemed. The client reserves some memory to 82 | mitigate overflows like that. 83 | -------------------------------------------------------------------------------- /format-check.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import argparse 3 | import os 4 | from pathlib import Path 5 | import re 6 | from subprocess import list2cmdline, run 7 | from tempfile import NamedTemporaryFile 8 | 9 | CLANG_FORMAT_VERSION = '18.1.6' 10 | 11 | INCLUDE_REGEX = re.compile( 12 | r'^(include|source|tests|verification)/.*\.(c|h|inl)$') 13 | EXCLUDE_REGEX = re.compile(r'^$') 14 | 15 | arg_parser = argparse.ArgumentParser(description="Check with clang-format") 16 | arg_parser.add_argument('-i', '--inplace-edit', action='store_true', 17 | help="Edit files inplace") 18 | args = arg_parser.parse_args() 19 | 20 | os.chdir(Path(__file__).parent) 21 | 22 | # create file containing list of all files to format 23 | filepaths_file = NamedTemporaryFile(delete=False) 24 | for dirpath, dirnames, filenames in os.walk('.'): 25 | for filename in filenames: 26 | # our regexes expect filepath to use forward slash 27 | filepath = Path(dirpath, filename).as_posix() 28 | if not INCLUDE_REGEX.match(filepath): 29 | continue 30 | if EXCLUDE_REGEX.match(filepath): 31 | continue 32 | 33 | filepaths_file.write(f"{filepath}\n".encode()) 34 | filepaths_file.close() 35 | 36 | # use pipx to run clang-format from PyPI 37 | # this is a simple way to run the same clang-format version regardless of OS 38 | cmd = ['pipx', 'run', f'clang-format=={CLANG_FORMAT_VERSION}', 39 | f'--files={filepaths_file.name}'] 40 | if args.inplace_edit: 41 | cmd += ['-i'] 42 | else: 43 | cmd += ['--Werror', '--dry-run'] 44 | 45 | print(f"{Path.cwd()}$ {list2cmdline(cmd)}") 46 | if run(cmd).returncode: 47 | exit(1) 48 | -------------------------------------------------------------------------------- /include/aws/s3/exports.h: -------------------------------------------------------------------------------- 1 | #ifndef AWS_S3_EXPORTS_H 2 | #define AWS_S3_EXPORTS_H 3 | 4 | /** 5 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 6 | * SPDX-License-Identifier: Apache-2.0. 7 | */ 8 | 9 | #if defined(AWS_CRT_USE_WINDOWS_DLL_SEMANTICS) || defined(_WIN32) 10 | # ifdef AWS_S3_USE_IMPORT_EXPORT 11 | # ifdef AWS_S3_EXPORTS 12 | # define AWS_S3_API __declspec(dllexport) 13 | # else 14 | # define AWS_S3_API __declspec(dllimport) 15 | # endif /* AWS_S3_EXPORTS */ 16 | # else 17 | # define AWS_S3_API 18 | # endif /*USE_IMPORT_EXPORT */ 19 | 20 | #else 21 | # if defined(AWS_S3_USE_IMPORT_EXPORT) && defined(AWS_S3_EXPORTS) 22 | # define AWS_S3_API __attribute__((visibility("default"))) 23 | # else 24 | # define AWS_S3_API 25 | # endif 26 | 27 | #endif /* defined(AWS_CRT_USE_WINDOWS_DLL_SEMANTICS) || defined(_WIN32) */ 28 | 29 | #endif /* AWS_S3_EXPORTS_H */ 30 | -------------------------------------------------------------------------------- /include/aws/s3/private/s3_auto_ranged_get.h: -------------------------------------------------------------------------------- 1 | #ifndef AWS_S3_AUTO_RANGED_GET_H 2 | #define AWS_S3_AUTO_RANGED_GET_H 3 | 4 | /** 5 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 6 | * SPDX-License-Identifier: Apache-2.0. 7 | */ 8 | 9 | #include "aws/s3/private/s3_meta_request_impl.h" 10 | 11 | enum aws_s3_auto_ranged_get_request_type { 12 | AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_HEAD_OBJECT, 13 | AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_GET_OBJECT_WITH_RANGE, 14 | AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_GET_OBJECT_WITH_PART_NUMBER_1, 15 | }; 16 | 17 | struct aws_s3_auto_ranged_get { 18 | struct aws_s3_meta_request base; 19 | 20 | enum aws_s3_checksum_algorithm validation_algorithm; 21 | 22 | struct aws_string *etag; 23 | 24 | bool initial_message_has_start_range; 25 | bool initial_message_has_end_range; 26 | uint64_t initial_range_start; 27 | uint64_t initial_range_end; 28 | 29 | uint64_t object_size_hint; 30 | bool object_size_hint_available; 31 | 32 | /* Members to only be used when the mutex in the base type is locked. */ 33 | struct { 34 | /* The starting byte of the data that we will be retrieved from the object. 35 | * (ignore this if object_range_empty) */ 36 | uint64_t object_range_start; 37 | 38 | /* The last byte of the data that will be retrieved from the object. 39 | * (ignore this if object_range_empty) 40 | * Note this is inclusive: https://developer.mozilla.org/en-US/docs/Web/HTTP/Range_requests 41 | * So if begin=0 and end=0 then 1 byte is being downloaded. */ 42 | uint64_t object_range_end; 43 | 44 | uint64_t first_part_size; 45 | 46 | /* The total number of parts that are being used in downloading the object range. Note that "part" here 47 | * currently refers to a range-get, and does not require a "part" on the service side. */ 48 | uint32_t total_num_parts; 49 | 50 | uint32_t num_parts_requested; 51 | uint32_t num_parts_completed; 52 | uint32_t num_parts_successful; 53 | uint32_t num_parts_failed; 54 | uint32_t num_parts_checksum_validated; 55 | 56 | uint32_t object_range_known : 1; 57 | 58 | /* True if object_range_known, and it's found to be empty. 59 | * If this is true, ignore object_range_start and object_range_end */ 60 | uint32_t object_range_empty : 1; 61 | uint32_t head_object_sent : 1; 62 | uint32_t head_object_completed : 1; 63 | uint32_t read_window_warning_issued : 1; 64 | } synced_data; 65 | 66 | uint32_t initial_message_has_range_header : 1; 67 | uint32_t initial_message_has_if_match_header : 1; 68 | }; 69 | 70 | AWS_EXTERN_C_BEGIN 71 | 72 | /* Creates a new auto-ranged get meta request. This will do multiple parallel ranged-gets when appropriate. */ 73 | AWS_S3_API struct aws_s3_meta_request *aws_s3_meta_request_auto_ranged_get_new( 74 | struct aws_allocator *allocator, 75 | struct aws_s3_client *client, 76 | size_t part_size, 77 | const struct aws_s3_meta_request_options *options); 78 | 79 | AWS_EXTERN_C_END 80 | 81 | #endif /* AWS_S3_AUTO_RANGED_GET_H */ 82 | -------------------------------------------------------------------------------- /include/aws/s3/private/s3_auto_ranged_put.h: -------------------------------------------------------------------------------- 1 | #ifndef AWS_S3_AUTO_RANGED_PUT_H 2 | #define AWS_S3_AUTO_RANGED_PUT_H 3 | 4 | /** 5 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 6 | * SPDX-License-Identifier: Apache-2.0. 7 | */ 8 | 9 | #include "aws/s3/private/s3_meta_request_impl.h" 10 | #include "s3_paginator.h" 11 | 12 | enum aws_s3_auto_ranged_put_request_tag { 13 | AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_LIST_PARTS, 14 | AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_CREATE_MULTIPART_UPLOAD, 15 | AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_PART, 16 | AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_ABORT_MULTIPART_UPLOAD, 17 | AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_COMPLETE_MULTIPART_UPLOAD, 18 | 19 | AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_MAX, 20 | }; 21 | 22 | struct aws_s3_auto_ranged_put { 23 | struct aws_s3_meta_request base; 24 | 25 | /* Initialized either during creation in resume flow or as result of create multipart upload during normal flow. */ 26 | struct aws_string *upload_id; 27 | 28 | /* Resume token used to resume the operation */ 29 | struct aws_s3_meta_request_resume_token *resume_token; 30 | 31 | uint64_t content_length; 32 | bool has_content_length; 33 | 34 | /* 35 | * total_num_parts_from_content_length is calculated by content_length / part_size. 36 | * It will be 0 if there is no content_length. 37 | */ 38 | uint32_t total_num_parts_from_content_length; 39 | 40 | /* Only meant for use in the update function, which is never called concurrently. */ 41 | struct { 42 | /* 43 | * Next part number to send. 44 | * Note: this follows s3 part number convention and counting starts with 1. 45 | * Throughout codebase 0 based part numbers are usually referred to as part index. 46 | */ 47 | uint32_t next_part_number; 48 | } threaded_update_data; 49 | 50 | /* Members to only be used when the mutex in the base type is locked. */ 51 | struct { 52 | /* Array list of `struct aws_s3_mpu_part_info *` 53 | * Info about each part, that we need to remember for CompleteMultipartUpload. 54 | * This is updated as we upload each part. 55 | * If resuming an upload, we first call ListParts and store the details 56 | * of previously uploaded parts here. In this case, the array may start with gaps 57 | * (e.g. if parts 1 and 3 were previously uploaded, but not part 2). */ 58 | struct aws_array_list part_list; 59 | 60 | struct aws_s3_paginated_operation *list_parts_operation; 61 | struct aws_string *list_parts_continuation_token; 62 | 63 | /* Number of parts we've started work on */ 64 | uint32_t num_parts_started; 65 | /* Number of parts we've started, and we have no more work to do */ 66 | uint32_t num_parts_completed; 67 | uint32_t num_parts_successful; 68 | uint32_t num_parts_failed; 69 | /* When content length is not known, requests are optimistically 70 | * scheduled, below represents how many requests were scheduled and had no 71 | * work to do*/ 72 | uint32_t num_parts_noop; 73 | 74 | /* Number of parts we've started, but they're not done reading from stream yet. 75 | * Though reads are serial (only 1 part can be reading from stream at a time) 76 | * we may queue up more to minimize delays between each read. */ 77 | uint32_t num_parts_pending_read; 78 | 79 | struct aws_http_headers *needed_response_headers; 80 | 81 | /* Whether body stream is exhausted. */ 82 | bool is_body_stream_at_end; 83 | 84 | int list_parts_error_code; 85 | int create_multipart_upload_error_code; 86 | int complete_multipart_upload_error_code; 87 | int abort_multipart_upload_error_code; 88 | 89 | struct { 90 | /* Mark a single ListParts request has started or not */ 91 | uint32_t started : 1; 92 | /* Mark ListParts need to continue or not */ 93 | uint32_t continues : 1; 94 | /* Mark ListParts has completed all the pages or not */ 95 | uint32_t completed : 1; 96 | } list_parts_state; 97 | uint32_t create_multipart_upload_sent : 1; 98 | uint32_t create_multipart_upload_completed : 1; 99 | uint32_t complete_multipart_upload_sent : 1; 100 | uint32_t complete_multipart_upload_completed : 1; 101 | uint32_t abort_multipart_upload_sent : 1; 102 | uint32_t abort_multipart_upload_completed : 1; 103 | 104 | } synced_data; 105 | }; 106 | 107 | AWS_EXTERN_C_BEGIN 108 | 109 | /* Creates a new auto-ranged put meta request. 110 | * This will do a multipart upload in parallel when appropriate. 111 | * Note: if has_content_length is false, content_length and num_parts are ignored. 112 | */ 113 | 114 | AWS_S3_API struct aws_s3_meta_request *aws_s3_meta_request_auto_ranged_put_new( 115 | struct aws_allocator *allocator, 116 | struct aws_s3_client *client, 117 | size_t part_size, 118 | bool has_content_length, 119 | uint64_t content_length, 120 | uint32_t num_parts, 121 | const struct aws_s3_meta_request_options *options); 122 | 123 | AWS_EXTERN_C_END 124 | 125 | #endif /* AWS_S3_AUTO_RANGED_PUT_H */ 126 | -------------------------------------------------------------------------------- /include/aws/s3/private/s3_checksums.h: -------------------------------------------------------------------------------- 1 | #ifndef AWS_S3_CHECKSUMS_H 2 | #define AWS_S3_CHECKSUMS_H 3 | /** 4 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 5 | * SPDX-License-Identifier: Apache-2.0. 6 | */ 7 | #include "aws/s3/s3_client.h" 8 | 9 | /* TODO: consider moving the aws_checksum_stream to aws-c-checksum, and the rest about checksum headers and trailer to 10 | * aws-c-sdkutil. */ 11 | 12 | struct aws_s3_checksum; 13 | 14 | /* List to check the checksum algorithm to use based on the priority. */ 15 | static const enum aws_s3_checksum_algorithm s_checksum_algo_priority_list[] = { 16 | AWS_SCA_CRC64NVME, 17 | AWS_SCA_CRC32C, 18 | AWS_SCA_CRC32, 19 | AWS_SCA_SHA1, 20 | AWS_SCA_SHA256, 21 | }; 22 | AWS_STATIC_ASSERT(AWS_ARRAY_SIZE(s_checksum_algo_priority_list) == (AWS_SCA_END - AWS_SCA_INIT + 1)); 23 | 24 | struct aws_checksum_vtable { 25 | void (*destroy)(struct aws_s3_checksum *checksum); 26 | int (*update)(struct aws_s3_checksum *checksum, const struct aws_byte_cursor *buf); 27 | int (*finalize)(struct aws_s3_checksum *checksum, struct aws_byte_buf *out); 28 | }; 29 | 30 | struct aws_s3_checksum { 31 | struct aws_allocator *allocator; 32 | struct aws_checksum_vtable *vtable; 33 | size_t digest_size; 34 | enum aws_s3_checksum_algorithm algorithm; 35 | bool good; 36 | union { 37 | struct aws_hash *hash; 38 | uint32_t crc_val_32bit; 39 | uint64_t crc_val_64bit; 40 | } impl; 41 | }; 42 | 43 | struct checksum_config_storage { 44 | struct aws_allocator *allocator; 45 | struct aws_byte_buf full_object_checksum; 46 | bool has_full_object_checksum; 47 | 48 | aws_s3_meta_request_full_object_checksum_fn *full_object_checksum_callback; 49 | void *user_data; 50 | 51 | enum aws_s3_checksum_location location; 52 | enum aws_s3_checksum_algorithm checksum_algorithm; 53 | bool validate_response_checksum; 54 | struct { 55 | bool crc64nvme; 56 | bool crc32c; 57 | bool crc32; 58 | bool sha1; 59 | bool sha256; 60 | } response_checksum_algorithms; 61 | }; 62 | 63 | /** 64 | * a stream that takes in a stream, computes a running checksum as it is read, and outputs the checksum when the stream 65 | * is destroyed. 66 | * Note: seek this stream will immediately fail, as it would prevent an accurate calculation of the 67 | * checksum. 68 | * 69 | * @param allocator 70 | * @param existing_stream The real content to read from. Destroying the checksum stream destroys the existing stream. 71 | * outputs the checksum of existing stream to checksum_output upon destruction. Will be kept 72 | * alive by the checksum stream 73 | * @param algorithm Checksum algorithm to use. 74 | * @param checksum_output Checksum of the `existing_stream`, owned by caller, which will be calculated when this stream 75 | * is destroyed. 76 | */ 77 | AWS_S3_API 78 | struct aws_input_stream *aws_checksum_stream_new( 79 | struct aws_allocator *allocator, 80 | struct aws_input_stream *existing_stream, 81 | enum aws_s3_checksum_algorithm algorithm, 82 | struct aws_byte_buf *checksum_output); 83 | 84 | /** 85 | * TODO: properly support chunked encoding. 86 | * 87 | * A stream that takes in a stream, encodes it to aws_chunked. Computes a running checksum as it is read and add the 88 | * checksum as trailer at the end of the stream. All of the added bytes will be counted to the length of the stream. 89 | * Note: seek this stream will immediately fail, as it would prevent an accurate calculation of the 90 | * checksum. 91 | * 92 | * @param allocator 93 | * @param existing_stream The data to be chunkified prepended by information on the stream length followed by a final 94 | * chunk and a trailing chunk containing a checksum of the existing stream. Destroying the 95 | * chunk stream will destroy the existing stream. 96 | * @param checksum_output Optional argument, if provided the buffer will be initialized to the appropriate size and 97 | * filled with the checksum result when calculated. Callers responsibility to cleanup. 98 | */ 99 | AWS_S3_API 100 | struct aws_input_stream *aws_chunk_stream_new( 101 | struct aws_allocator *allocator, 102 | struct aws_input_stream *existing_stream, 103 | enum aws_s3_checksum_algorithm algorithm, 104 | struct aws_byte_buf *checksum_output); 105 | 106 | /** 107 | * Get the size of the checksum output corresponding to the aws_s3_checksum_algorithm enum value. 108 | */ 109 | AWS_S3_API 110 | size_t aws_get_digest_size_from_checksum_algorithm(enum aws_s3_checksum_algorithm algorithm); 111 | 112 | /** 113 | * Get header name to use for algorithm (e.g. "x-amz-checksum-crc32") 114 | */ 115 | AWS_S3_API 116 | struct aws_byte_cursor aws_get_http_header_name_from_checksum_algorithm(enum aws_s3_checksum_algorithm algorithm); 117 | 118 | /** 119 | * Get algorithm's name (e.g. "CRC32"), to be used as the value of headers like `x-amz-checksum-algorithm` 120 | */ 121 | AWS_S3_API 122 | struct aws_byte_cursor aws_get_checksum_algorithm_name(enum aws_s3_checksum_algorithm algorithm); 123 | 124 | /** 125 | * Get the name of checksum algorithm to be used as the details of the parts were uploaded. Referring to 126 | * https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompletedPart.html#AmazonS3-Type-CompletedPart 127 | */ 128 | AWS_S3_API 129 | struct aws_byte_cursor aws_get_completed_part_name_from_checksum_algorithm(enum aws_s3_checksum_algorithm algorithm); 130 | 131 | /** 132 | * create a new aws_checksum corresponding to the aws_s3_checksum_algorithm enum value. 133 | */ 134 | AWS_S3_API 135 | struct aws_s3_checksum *aws_checksum_new(struct aws_allocator *allocator, enum aws_s3_checksum_algorithm algorithm); 136 | 137 | /** 138 | * Compute an aws_checksum corresponding to the provided enum, passing a function pointer around instead of using a 139 | * conditional would be faster, but would be a negligible improvement compared to the cost of processing data twice 140 | * which would be the only time this function would be used, and would be harder to follow. 141 | */ 142 | AWS_S3_API 143 | int aws_checksum_compute( 144 | struct aws_allocator *allocator, 145 | enum aws_s3_checksum_algorithm algorithm, 146 | const struct aws_byte_cursor *input, 147 | struct aws_byte_buf *output); 148 | 149 | /** 150 | * Cleans up and deallocates checksum. 151 | */ 152 | AWS_S3_API 153 | void aws_checksum_destroy(struct aws_s3_checksum *checksum); 154 | 155 | /** 156 | * Updates the running checksum with to_checksum. this can be called multiple times. 157 | */ 158 | AWS_S3_API 159 | int aws_checksum_update(struct aws_s3_checksum *checksum, const struct aws_byte_cursor *to_checksum); 160 | 161 | /** 162 | * Completes the checksum computation and writes the final digest to output. 163 | * Allocation of output is the caller's responsibility. 164 | */ 165 | AWS_S3_API 166 | int aws_checksum_finalize(struct aws_s3_checksum *checksum, struct aws_byte_buf *output); 167 | 168 | AWS_S3_API 169 | int aws_checksum_config_storage_init( 170 | struct aws_allocator *allocator, 171 | struct checksum_config_storage *internal_config, 172 | const struct aws_s3_checksum_config *config, 173 | const struct aws_http_message *message, 174 | const void *log_id); 175 | 176 | AWS_S3_API 177 | void aws_checksum_config_storage_cleanup(struct checksum_config_storage *internal_config); 178 | 179 | #endif /* AWS_S3_CHECKSUMS_H */ 180 | -------------------------------------------------------------------------------- /include/aws/s3/private/s3_copy_object.h: -------------------------------------------------------------------------------- 1 | #ifndef AWS_S3_COPY_OBJECT_H 2 | #define AWS_S3_COPY_OBJECT_H 3 | 4 | /** 5 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 6 | * SPDX-License-Identifier: Apache-2.0. 7 | */ 8 | 9 | #include "aws/s3/private/s3_meta_request_impl.h" 10 | #include 11 | 12 | enum aws_s3_copy_object_request_tag { 13 | AWS_S3_COPY_OBJECT_REQUEST_TAG_GET_OBJECT_SIZE, 14 | AWS_S3_COPY_OBJECT_REQUEST_TAG_BYPASS, 15 | AWS_S3_COPY_OBJECT_REQUEST_TAG_CREATE_MULTIPART_UPLOAD, 16 | AWS_S3_COPY_OBJECT_REQUEST_TAG_MULTIPART_COPY, 17 | AWS_S3_COPY_OBJECT_REQUEST_TAG_ABORT_MULTIPART_UPLOAD, 18 | AWS_S3_COPY_OBJECT_REQUEST_TAG_COMPLETE_MULTIPART_UPLOAD, 19 | 20 | AWS_S3_COPY_OBJECT_REQUEST_TAG_MAX, 21 | }; 22 | 23 | struct aws_s3_copy_object { 24 | struct aws_s3_meta_request base; 25 | 26 | /* Usable after the Create Multipart Upload request succeeds. */ 27 | struct aws_string *upload_id; 28 | 29 | /* (Optional) source_uri for the copy operation. */ 30 | struct aws_uri source_uri; 31 | 32 | /* Only meant for use in the update function, which is never called concurrently. */ 33 | struct { 34 | uint32_t next_part_number; 35 | } threaded_update_data; 36 | 37 | /* Members to only be used when the mutex in the base type is locked. */ 38 | struct { 39 | /* Array-list of `struct aws_s3_mpu_part_info *`. 40 | * If copying via multipart upload, we fill in this info as each part gets copied, 41 | * and it's used to generate the final CompleteMultipartUpload. */ 42 | struct aws_array_list part_list; 43 | 44 | /* obtained through a HEAD request against the source object */ 45 | uint64_t content_length; 46 | size_t part_size; 47 | 48 | uint32_t total_num_parts; 49 | uint32_t num_parts_sent; 50 | uint32_t num_parts_completed; 51 | uint32_t num_parts_successful; 52 | uint32_t num_parts_failed; 53 | 54 | struct aws_http_headers *needed_response_headers; 55 | 56 | int create_multipart_upload_error_code; 57 | int complete_multipart_upload_error_code; 58 | int abort_multipart_upload_error_code; 59 | 60 | uint32_t head_object_sent : 1; 61 | uint32_t head_object_completed : 1; 62 | uint32_t copy_request_bypass_sent : 1; 63 | uint32_t copy_request_bypass_completed : 1; 64 | uint32_t create_multipart_upload_sent : 1; 65 | uint32_t create_multipart_upload_completed : 1; 66 | uint32_t complete_multipart_upload_sent : 1; 67 | uint32_t complete_multipart_upload_completed : 1; 68 | uint32_t abort_multipart_upload_sent : 1; 69 | uint32_t abort_multipart_upload_completed : 1; 70 | 71 | } synced_data; 72 | }; 73 | 74 | /* Creates a new CopyObject meta request. This will perform either 75 | * 1) A CopyObject S3 API call if the source object length is < 1 GB or 76 | * 2) a multipart copy in parallel otherwise. 77 | */ 78 | struct aws_s3_meta_request *aws_s3_meta_request_copy_object_new( 79 | struct aws_allocator *allocator, 80 | struct aws_s3_client *client, 81 | const struct aws_s3_meta_request_options *options); 82 | 83 | #endif 84 | -------------------------------------------------------------------------------- /include/aws/s3/private/s3_default_buffer_pool.h: -------------------------------------------------------------------------------- 1 | #ifndef AWS_S3_BUFFER_ALLOCATOR_H 2 | #define AWS_S3_BUFFER_ALLOCATOR_H 3 | 4 | /** 5 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 6 | * SPDX-License-Identifier: Apache-2.0. 7 | */ 8 | 9 | #include 10 | #include 11 | 12 | /* 13 | * S3 buffer pool. 14 | * Buffer pool used for pooling part sized buffers for Put/Get operations. 15 | * Provides additional functionally for limiting overall memory used. 16 | * High-level buffer pool usage flow: 17 | * - Create buffer with overall memory limit and common buffer size, aka chunk 18 | * size (typically part size configured on client) 19 | * - For each request: 20 | * -- call reserve to acquire ticket for future buffer acquisition. this will 21 | * mark memory reserved, but would not allocate it. if reserve call hits 22 | * memory limit, it fails and reservation hold is put on the whole buffer 23 | * pool. (aws_s3_buffer_pool_remove_reservation_hold can be used to remove 24 | * reservation hold). 25 | * -- once request needs memory, it can exchange ticket for a buffer using 26 | * aws_s3_buffer_pool_acquire_buffer. this operation never fails, even if it 27 | * ends up going over memory limit. 28 | * -- buffer lifetime is tied to the ticket. so once request is done with the 29 | * buffer, ticket is released and buffer returns back to the pool. 30 | */ 31 | 32 | AWS_EXTERN_C_BEGIN 33 | 34 | struct aws_s3_default_buffer_pool; 35 | struct aws_s3_default_buffer_ticket; 36 | 37 | struct aws_s3_default_buffer_pool_usage_stats { 38 | /* Effective Max memory limit. Memory limit value provided during construction minus 39 | * buffer reserved for overhead of the pool */ 40 | size_t mem_limit; 41 | 42 | /* Max size of buffer to be allocated from primary. */ 43 | size_t primary_cutoff; 44 | 45 | /* Overall memory allocated for blocks. */ 46 | size_t primary_allocated; 47 | /* Number of blocks allocated in primary. */ 48 | size_t primary_num_blocks; 49 | /* Memory used in primary storage. 50 | * Does not account for wasted space if memory doesn't map perfectly into chunks. 51 | * This is always <= primary_allocated */ 52 | size_t primary_used; 53 | /* How much memory is reserved, but not yet used, in primary storage. 54 | * Does not account for wasted space if memory doesn't map perfectly into chunks. */ 55 | size_t primary_reserved; 56 | 57 | /* Secondary memory used. Accurate, maps directly to base allocator. */ 58 | size_t secondary_used; 59 | /* Secondary memory reserved, but not yet used. Accurate, maps directly to base allocator. */ 60 | size_t secondary_reserved; 61 | 62 | /* Bytes used in "forced" buffers (created even if they exceed memory limits). 63 | * This is always <= primary_used + secondary_used */ 64 | size_t forced_used; 65 | }; 66 | 67 | /* 68 | * Create new buffer pool. 69 | * chunk_size - specifies the size of memory that will most commonly be acquired 70 | * from the pool (typically part size). 71 | * mem_limit - limit on how much mem buffer pool can use. once limit is hit, 72 | * buffers can no longer be reserved from (reservation hold is placed on the pool). 73 | * Returns buffer pool pointer on success and NULL on failure. 74 | */ 75 | AWS_S3_API struct aws_s3_buffer_pool *aws_s3_default_buffer_pool_new( 76 | struct aws_allocator *allocator, 77 | struct aws_s3_buffer_pool_config config); 78 | 79 | /* 80 | * Destroys buffer pool. 81 | * Does nothing if buffer_pool is NULL. 82 | */ 83 | AWS_S3_API void aws_s3_default_buffer_pool_destroy(struct aws_s3_buffer_pool *buffer_pool); 84 | 85 | /* 86 | * Reserves memory from the pool for later use. 87 | * Best effort and can potentially reserve memory slightly over the limit. 88 | * Reservation takes some memory out of the available pool, but does not 89 | * allocate it right away. 90 | * On success ticket will be returned. 91 | * On failure NULL is returned, error is raised and reservation hold is placed 92 | * on the buffer. Any further reservations while hold is active will fail. 93 | * Remove reservation hold to unblock reservations. 94 | * 95 | * If you MUST acquire a buffer now (waiting to reserve a ticket would risk deadlock), 96 | * use aws_s3_buffer_pool_acquire_forced_buffer() instead. 97 | */ 98 | AWS_S3_API struct aws_future_s3_buffer_ticket *aws_s3_default_buffer_pool_reserve( 99 | struct aws_s3_buffer_pool *buffer_pool, 100 | struct aws_s3_buffer_pool_reserve_meta meta); 101 | 102 | /* 103 | * Trades in the ticket for a buffer. 104 | * Cannot fail and can over allocate above mem limit if reservation was not accurate. 105 | * Using the same ticket twice will return the same buffer. 106 | * Buffer is only valid until the ticket is released. 107 | */ 108 | AWS_S3_API struct aws_byte_buf aws_s3_default_buffer_pool_acquire_buffer( 109 | struct aws_s3_buffer_pool *buffer_pool, 110 | struct aws_s3_default_buffer_ticket *ticket); 111 | 112 | /* 113 | * Get pool memory usage stats. 114 | */ 115 | AWS_S3_API struct aws_s3_default_buffer_pool_usage_stats aws_s3_default_buffer_pool_get_usage( 116 | struct aws_s3_buffer_pool *buffer_pool); 117 | 118 | /* 119 | * Trims all unused mem from the pool. 120 | * Warning: fairly slow operation, do not use in critical path. 121 | * TODO: partial trimming? ex. only trim down to 50% of max? 122 | */ 123 | AWS_S3_API void aws_s3_default_buffer_pool_trim(struct aws_s3_buffer_pool *buffer_pool); 124 | 125 | AWS_EXTERN_C_END 126 | 127 | #endif /* AWS_S3_BUFFER_ALLOCATOR_H */ 128 | -------------------------------------------------------------------------------- /include/aws/s3/private/s3_default_meta_request.h: -------------------------------------------------------------------------------- 1 | #ifndef AWS_S3_DEFAULT_META_REQUEST_H 2 | #define AWS_S3_DEFAULT_META_REQUEST_H 3 | 4 | /** 5 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 6 | * SPDX-License-Identifier: Apache-2.0. 7 | */ 8 | 9 | #include "aws/s3/private/s3_meta_request_impl.h" 10 | 11 | struct aws_s3_client; 12 | 13 | struct aws_s3_meta_request_default { 14 | struct aws_s3_meta_request base; 15 | 16 | size_t content_length; 17 | 18 | /* Actual type for the single request (may be AWS_S3_REQUEST_TYPE_UNKNOWN) */ 19 | enum aws_s3_request_type request_type; 20 | 21 | /* S3 operation name for the single request */ 22 | struct aws_string *operation_name; 23 | 24 | /* Members to only be used when the mutex in the base type is locked. */ 25 | struct { 26 | int cached_response_status; 27 | int request_error_code; 28 | 29 | uint32_t request_sent : 1; 30 | uint32_t request_completed : 1; 31 | 32 | } synced_data; 33 | }; 34 | 35 | /* Creates a new default meta request. This will send the request as is and pass back the response. */ 36 | struct aws_s3_meta_request *aws_s3_meta_request_default_new( 37 | struct aws_allocator *allocator, 38 | struct aws_s3_client *client, 39 | enum aws_s3_request_type request_type, 40 | uint64_t content_length, 41 | bool should_compute_content_md5, 42 | const struct aws_s3_meta_request_options *options); 43 | 44 | #endif 45 | -------------------------------------------------------------------------------- /include/aws/s3/private/s3_endpoint_resolver.h: -------------------------------------------------------------------------------- 1 | #ifndef AWS_S3_ENDPOINT_RESOLVER_PRIVATE_H 2 | #define AWS_S3_ENDPOINT_RESOLVER_PRIVATE_H 3 | 4 | /** 5 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 6 | * SPDX-License-Identifier: Apache-2.0. 7 | */ 8 | 9 | extern const struct aws_byte_cursor aws_s3_endpoint_resolver_partitions; 10 | extern const struct aws_byte_cursor aws_s3_endpoint_rule_set; 11 | 12 | #endif /* AWS_S3_ENDPOINT_RESOLVER_PRIVATE_H */ 13 | -------------------------------------------------------------------------------- /include/aws/s3/private/s3_list_objects.h: -------------------------------------------------------------------------------- 1 | #ifndef AWS_S3_LIST_OBJECTS_H 2 | #define AWS_S3_LIST_OBJECTS_H 3 | 4 | /** 5 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 6 | * SPDX-License-Identifier: Apache-2.0. 7 | */ 8 | 9 | #include 10 | 11 | #include 12 | #include 13 | 14 | #include 15 | 16 | /** Struct representing the file system relevant data for an object returned from a ListObjectsV2 API call. */ 17 | struct aws_s3_object_info { 18 | /** 19 | * When a delimiter is specified in the request, S3 groups the common prefixes that contain the delimiter. 20 | * This member is set to the prefix substring ending at the first occurrence of the specified delimiter, 21 | * analogous to a directory entry of a file system. 22 | */ 23 | struct aws_byte_cursor prefix; 24 | /** 25 | * Prefix is not included. This is the object name for use with prefix for a call to GetObject() 26 | */ 27 | struct aws_byte_cursor key; 28 | /** 29 | * Size of the object in bytes. 30 | */ 31 | uint64_t size; 32 | /** 33 | * Timestamp from S3 on the latest modification, if you have a reliable clock on your machine, you COULD use this 34 | * to implement caching. 35 | */ 36 | struct aws_date_time last_modified; 37 | /** 38 | * Etag for the object, usually an MD5 hash. you COULD also use this to implement caching. 39 | */ 40 | struct aws_byte_cursor e_tag; 41 | }; 42 | 43 | /** 44 | * Invoked when an object or prefix is encountered during a ListObjectsV2 API call. Return false, to immediately 45 | * terminate the list operation. Returning true will continue until at least the current page is iterated. 46 | */ 47 | typedef int(aws_s3_on_object_fn)(const struct aws_s3_object_info *info, void *user_data); 48 | 49 | /** 50 | * Invoked upon the complete fetch and parsing of a page. If error_code is AWS_OP_SUCCESS and 51 | * aws_s3_paginator_has_more_results() returns true, you may want to call, 52 | * aws_s3_paginator_continue() from here to fetch the rest of the bucket contents. 53 | */ 54 | typedef void(aws_s3_on_object_list_finished_fn)(struct aws_s3_paginator *paginator, int error_code, void *user_data); 55 | 56 | /** 57 | * Parameters for calling aws_s3_initiate_list_objects(). All values are copied out or re-seated and reference counted. 58 | */ 59 | struct aws_s3_list_objects_params { 60 | /** 61 | * Must not be NULL. The internal call will increment the reference count on client. 62 | */ 63 | struct aws_s3_client *client; 64 | /** 65 | * Must not be empty. Name of the bucket to list. 66 | */ 67 | struct aws_byte_cursor bucket_name; 68 | /** 69 | * Optional. The prefix to list. By default, this will be the root of the bucket. If you would like to start the 70 | * list operation at a prefix (similar to a file system directory), specify that here. 71 | */ 72 | struct aws_byte_cursor prefix; 73 | /** 74 | * Optional. The prefix delimiter. By default, this is the '/' character. 75 | */ 76 | struct aws_byte_cursor delimiter; 77 | /** 78 | * Optional. The continuation token for fetching the next page for ListBucketV2. You likely shouldn't set this 79 | * unless you have a special use case. 80 | */ 81 | struct aws_byte_cursor continuation_token; 82 | /** 83 | * Must not be empty. The endpoint for the S3 bucket to hit. Can be virtual or path style. 84 | */ 85 | struct aws_byte_cursor endpoint; 86 | /** 87 | * Callback to invoke on each object that's listed. 88 | */ 89 | aws_s3_on_object_fn *on_object; 90 | /** 91 | * Callback to invoke when each page of the bucket listing completes. 92 | */ 93 | aws_s3_on_object_list_finished_fn *on_list_finished; 94 | void *user_data; 95 | }; 96 | 97 | AWS_EXTERN_C_BEGIN 98 | 99 | /** 100 | * Initiates a list objects command (without executing it), and returns a paginator object to iterate the bucket with if 101 | * successful. 102 | * 103 | * Returns NULL on failure. Check aws_last_error() for details on the error that occurred. 104 | * 105 | * this is a reference counted object. It is returned with a reference count of 1. You must call 106 | * aws_s3_paginator_release() on this object when you are finished with it. 107 | * 108 | * This does not start the actual list operation. You need to call aws_s3_paginator_continue() to start 109 | * the operation. 110 | */ 111 | AWS_S3_API struct aws_s3_paginator *aws_s3_initiate_list_objects( 112 | struct aws_allocator *allocator, 113 | const struct aws_s3_list_objects_params *params); 114 | 115 | AWS_EXTERN_C_END 116 | 117 | #endif /* AWS_S3_LIST_OBJECTS_H */ 118 | -------------------------------------------------------------------------------- /include/aws/s3/private/s3_list_parts.h: -------------------------------------------------------------------------------- 1 | #ifndef AWS_S3_LIST_PARTS_H 2 | #define AWS_S3_LIST_PARTS_H 3 | 4 | /** 5 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 6 | * SPDX-License-Identifier: Apache-2.0. 7 | */ 8 | 9 | #include 10 | #include 11 | 12 | #include 13 | #include 14 | 15 | /** Struct representing part info as returned from ListParts call. */ 16 | struct aws_s3_part_info { 17 | /** 18 | * Size of the object in bytes. 19 | */ 20 | uint64_t size; 21 | /** 22 | * Part number of the given part. 23 | */ 24 | uint32_t part_number; 25 | /** 26 | * Timestamp from S3 on the latest modification, if you have a reliable clock on your machine, you COULD use this 27 | * to implement caching. 28 | */ 29 | struct aws_date_time last_modified; 30 | /** 31 | * Etag for the object, usually an MD5 hash. you COULD also use this to implement caching. 32 | */ 33 | struct aws_byte_cursor e_tag; 34 | 35 | /** 36 | * CRC32 checksum for the part. Optional. 37 | */ 38 | struct aws_byte_cursor checksumCRC32; 39 | 40 | /** 41 | * CRC32C checksum for the part. Optional. 42 | */ 43 | struct aws_byte_cursor checksumCRC32C; 44 | 45 | /** 46 | * SHA1 checksum for the part. Optional. 47 | */ 48 | struct aws_byte_cursor checksumSHA1; 49 | 50 | /** 51 | * SHA256 checksum for the part. Optional. 52 | */ 53 | struct aws_byte_cursor checksumSHA256; 54 | }; 55 | 56 | /** 57 | * Invoked when a part is encountered during ListParts call. 58 | * Return AWS_OP_ERR (after an error has been raised) to fail the list operation. 59 | * Return AWS_OP_SUCCESS to continue until at least the current page is iterated. 60 | */ 61 | typedef int(aws_s3_on_part_fn)(const struct aws_s3_part_info *info, void *user_data); 62 | 63 | /** 64 | * Parameters for calling aws_s3_list_parts_operation_new(). All values are copied out or re-seated and reference 65 | * counted. 66 | */ 67 | struct aws_s3_list_parts_params { 68 | /** 69 | * Must not be NULL. The internal call will increment the reference count on client. 70 | */ 71 | struct aws_s3_client *client; 72 | /** 73 | * Must not be empty. Name of the bucket to list. 74 | */ 75 | struct aws_byte_cursor bucket_name; 76 | /** 77 | * Must not be empty. Key with which multipart upload was initiated. 78 | */ 79 | struct aws_byte_cursor key; 80 | /** 81 | * Must not be empty. Id identifying multipart upload. 82 | */ 83 | struct aws_byte_cursor upload_id; 84 | /** 85 | * Must not be empty. The endpoint for the S3 bucket to hit. Can be virtual or path style. 86 | */ 87 | struct aws_byte_cursor endpoint; 88 | /** 89 | * Callback to invoke on each part that's listed. 90 | */ 91 | aws_s3_on_part_fn *on_part; 92 | /** 93 | * Associated user data. 94 | */ 95 | void *user_data; 96 | }; 97 | 98 | AWS_EXTERN_C_BEGIN 99 | 100 | AWS_S3_API struct aws_s3_paginated_operation *aws_s3_list_parts_operation_new( 101 | struct aws_allocator *allocator, 102 | const struct aws_s3_list_parts_params *params); 103 | 104 | AWS_EXTERN_C_END 105 | 106 | #endif /* AWS_S3_LIST_PARTS_H */ 107 | -------------------------------------------------------------------------------- /include/aws/s3/private/s3_paginator.h: -------------------------------------------------------------------------------- 1 | #ifndef AWS_S3_PAGINATOR_H 2 | #define AWS_S3_PAGINATOR_H 3 | 4 | /** 5 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 6 | * SPDX-License-Identifier: Apache-2.0. 7 | */ 8 | 9 | #include 10 | #include 11 | #include 12 | 13 | #include 14 | #include 15 | 16 | /** 17 | * Wrapper for a generic paginated operation. 18 | * Provides implementations for how to construct next paginated and how to read the request. 19 | * Can be used with either paginator or plugged into request loop. 20 | */ 21 | struct aws_s3_paginated_operation; 22 | 23 | /** 24 | * Generic driver for paginated operations. 25 | * Provides functionality to send requests to iterate over pages of the operation. 26 | */ 27 | struct aws_s3_paginator; 28 | 29 | typedef int(aws_s3_next_http_message_fn)( 30 | struct aws_byte_cursor *continuation_token, 31 | void *user_data, 32 | struct aws_http_message **out_message); 33 | 34 | typedef int(aws_s3_on_result_node_encountered_fn)(struct aws_xml_node *node, void *user_data); 35 | 36 | typedef void(aws_s3_on_page_finished_fn)(struct aws_s3_paginator *paginator, int error_code, void *user_data); 37 | 38 | typedef void(aws_s3_on_paginated_operation_cleanup_fn)(void *user_data); 39 | 40 | /** 41 | * Parameters for initiating paginator. All values are copied out or re-seated and reference counted. 42 | */ 43 | struct aws_s3_paginator_params { 44 | /** 45 | * Must not be NULL. The internal call will increment the reference count on client. 46 | */ 47 | struct aws_s3_client *client; 48 | 49 | /** 50 | * Underlying paginated operation. Must not be NULL. 51 | */ 52 | struct aws_s3_paginated_operation *operation; 53 | 54 | /** 55 | * Optional. The continuation token for fetching the next page. You likely shouldn't set this 56 | * unless you have a special use case. 57 | */ 58 | struct aws_byte_cursor continuation_token; 59 | 60 | /** 61 | * Must not be empty. Name of the bucket to list. 62 | */ 63 | struct aws_byte_cursor bucket_name; 64 | 65 | /** 66 | * Must not be empty. Key with which multipart upload was initiated. 67 | */ 68 | struct aws_byte_cursor endpoint; 69 | 70 | /** 71 | * Callback to invoke on each part that's listed. 72 | */ 73 | aws_s3_on_page_finished_fn *on_page_finished_fn; 74 | 75 | /** 76 | * User data passed back into callbacks. 77 | */ 78 | void *user_data; 79 | }; 80 | 81 | /** 82 | * Parameters for initiating paginated operation. All values are copied out or re-seated and reference counted. 83 | */ 84 | struct aws_s3_paginated_operation_params { 85 | /** 86 | * The S3 operation name (e.g. "ListParts"). Must not be empty. 87 | */ 88 | struct aws_byte_cursor operation_name; 89 | 90 | /** 91 | * Name of the top level result node. Must not be empty. 92 | */ 93 | struct aws_byte_cursor result_xml_node_name; 94 | 95 | /** 96 | * Name of the continuation token node. Must not be empty. 97 | */ 98 | struct aws_byte_cursor continuation_token_node_name; 99 | 100 | /** 101 | * Function to generate next message. 102 | */ 103 | aws_s3_next_http_message_fn *next_message; 104 | 105 | /** 106 | * Function to parse result node. 107 | */ 108 | aws_s3_on_result_node_encountered_fn *on_result_node_encountered_fn; 109 | 110 | /** 111 | * Callback for when operation is cleaned. 112 | */ 113 | aws_s3_on_paginated_operation_cleanup_fn *on_paginated_operation_cleanup; 114 | 115 | /** 116 | * Associated user data. 117 | */ 118 | void *user_data; 119 | }; 120 | 121 | AWS_EXTERN_C_BEGIN 122 | 123 | AWS_S3_API struct aws_s3_paginator *aws_s3_initiate_paginator( 124 | struct aws_allocator *allocator, 125 | const struct aws_s3_paginator_params *params); 126 | 127 | AWS_S3_API void aws_s3_paginator_acquire(struct aws_s3_paginator *paginator); 128 | AWS_S3_API void aws_s3_paginator_release(struct aws_s3_paginator *paginator); 129 | 130 | AWS_S3_API struct aws_s3_paginated_operation *aws_s3_paginated_operation_new( 131 | struct aws_allocator *allocator, 132 | const struct aws_s3_paginated_operation_params *params); 133 | 134 | AWS_S3_API void aws_s3_paginated_operation_acquire(struct aws_s3_paginated_operation *operation); 135 | AWS_S3_API void aws_s3_paginated_operation_release(struct aws_s3_paginated_operation *operation); 136 | 137 | /** 138 | * Start the paginated operation. If there are more results to fetch, it will begin that work. 139 | * 140 | * Signing_config contains information for SigV4 signing for the operation. It must not be NULL. It will be copied. 141 | * 142 | * Returns AWS_OP_SUCCESS on successful start of the operation, and AWS_OP_ERR otherwise. Check aws_last_error() for 143 | * more information on the error that occurred. 144 | */ 145 | AWS_S3_API int aws_s3_paginator_continue( 146 | struct aws_s3_paginator *paginator, 147 | const struct aws_signing_config_aws *signing_config); 148 | 149 | /** 150 | * If the paginator has more results to fetch, returns true. 151 | */ 152 | AWS_S3_API bool aws_s3_paginator_has_more_results(const struct aws_s3_paginator *paginator); 153 | 154 | /** 155 | * Construct next message for the given operation. 156 | */ 157 | AWS_S3_API int aws_s3_construct_next_paginated_request_http_message( 158 | struct aws_s3_paginated_operation *operation, 159 | struct aws_byte_cursor *continuation_token, 160 | struct aws_http_message **out_message); 161 | 162 | /** 163 | * Parse received response for operation. 164 | */ 165 | AWS_S3_API int aws_s3_paginated_operation_on_response( 166 | struct aws_s3_paginated_operation *operation, 167 | struct aws_byte_cursor *response_body, 168 | struct aws_string **continuation_token_out, 169 | bool *has_more_results_out); 170 | 171 | AWS_EXTERN_C_END 172 | 173 | #endif /* AWS_S3_PAGINATOR_H */ 174 | -------------------------------------------------------------------------------- /include/aws/s3/private/s3_parallel_input_stream.h: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | * SPDX-License-Identifier: Apache-2.0. 4 | */ 5 | 6 | #ifndef AWS_S3_PARALLEL_INPUT_STREAM_H 7 | #define AWS_S3_PARALLEL_INPUT_STREAM_H 8 | 9 | #include 10 | 11 | #include 12 | 13 | AWS_PUSH_SANE_WARNING_LEVEL 14 | 15 | struct aws_byte_buf; 16 | struct aws_future_bool; 17 | struct aws_input_stream; 18 | 19 | struct aws_event_loop_group; 20 | 21 | struct aws_parallel_input_stream { 22 | const struct aws_parallel_input_stream_vtable *vtable; 23 | struct aws_allocator *alloc; 24 | struct aws_ref_count ref_count; 25 | 26 | void *impl; 27 | }; 28 | 29 | struct aws_parallel_input_stream_vtable { 30 | /** 31 | * Destroy the stream, its refcount has reached 0. 32 | */ 33 | void (*destroy)(struct aws_parallel_input_stream *stream); 34 | 35 | /** 36 | * Read into the buffer in parallel. 37 | * The implementation needs to support this to be invoked concurrently from multiple threads 38 | */ 39 | struct aws_future_bool *( 40 | *read)(struct aws_parallel_input_stream *stream, uint64_t offset, struct aws_byte_buf *dest); 41 | }; 42 | 43 | AWS_EXTERN_C_BEGIN 44 | 45 | /** 46 | * Initialize aws_parallel_input_stream "base class" 47 | */ 48 | AWS_S3_API 49 | void aws_parallel_input_stream_init_base( 50 | struct aws_parallel_input_stream *stream, 51 | struct aws_allocator *alloc, 52 | const struct aws_parallel_input_stream_vtable *vtable, 53 | void *impl); 54 | 55 | /** 56 | * Increment reference count. 57 | * You may pass in NULL (has no effect). 58 | * Returns whatever pointer was passed in. 59 | */ 60 | AWS_S3_API 61 | struct aws_parallel_input_stream *aws_parallel_input_stream_acquire(struct aws_parallel_input_stream *stream); 62 | 63 | /** 64 | * Decrement reference count. 65 | * You may pass in NULL (has no effect). 66 | * Always returns NULL. 67 | */ 68 | AWS_S3_API 69 | struct aws_parallel_input_stream *aws_parallel_input_stream_release(struct aws_parallel_input_stream *stream); 70 | 71 | /** 72 | * Read from the offset until fill the dest, or EOF reached. 73 | * It's thread safe to be called from multiple threads without waiting for other read to complete 74 | * 75 | * @param stream The stream to read from 76 | * @param offset The offset in the stream from beginning to start reading 77 | * @param dest The output buffer read to 78 | * @return a future, which will contain an error code if something went wrong, 79 | * or a result bool indicating whether EOF has been reached. 80 | */ 81 | AWS_S3_API 82 | struct aws_future_bool *aws_parallel_input_stream_read( 83 | struct aws_parallel_input_stream *stream, 84 | uint64_t offset, 85 | struct aws_byte_buf *dest); 86 | 87 | /** 88 | * Create a new file based parallel input stream. 89 | * 90 | * This implementation will open a file handler when the read happens, and seek to the offset to start reading. Close 91 | * the file handler as read finishes. 92 | * 93 | * @param allocator memory allocator 94 | * @param file_name The file path to read from 95 | * @return aws_parallel_input_stream 96 | */ 97 | AWS_S3_API 98 | struct aws_parallel_input_stream *aws_parallel_input_stream_new_from_file( 99 | struct aws_allocator *allocator, 100 | struct aws_byte_cursor file_name); 101 | 102 | AWS_EXTERN_C_END 103 | AWS_POP_SANE_WARNING_LEVEL 104 | 105 | #endif /* AWS_S3_PARALLEL_INPUT_STREAM_H */ 106 | -------------------------------------------------------------------------------- /include/aws/s3/private/s3_platform_info.h: -------------------------------------------------------------------------------- 1 | #ifndef AWS_S3_S3_PLATFORM_INFO_H 2 | #define AWS_S3_S3_PLATFORM_INFO_H 3 | /** 4 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 5 | * SPDX-License-Identifier: Apache-2.0. 6 | */ 7 | 8 | #include 9 | 10 | struct aws_s3_platform_info_loader; 11 | 12 | AWS_EXTERN_C_BEGIN 13 | 14 | /** 15 | * Initializes and returns a loader for querying the compute platform for information needed for making configuration 16 | * decisions. 17 | * This will never be NULL. 18 | */ 19 | AWS_S3_API 20 | struct aws_s3_platform_info_loader *aws_s3_platform_info_loader_new(struct aws_allocator *allocator); 21 | 22 | AWS_S3_API 23 | struct aws_s3_platform_info_loader *aws_s3_platform_info_loader_acquire(struct aws_s3_platform_info_loader *loader); 24 | 25 | AWS_S3_API 26 | struct aws_s3_platform_info_loader *aws_s3_platform_info_loader_release(struct aws_s3_platform_info_loader *loader); 27 | 28 | /** 29 | * Retrieves the pre-configured metadata for a given ec2 instance type. If no such pre-configuration exists, returns 30 | * NULL. 31 | */ 32 | AWS_S3_API 33 | const struct aws_s3_platform_info *aws_s3_get_platform_info_for_instance_type( 34 | struct aws_s3_platform_info_loader *loader, 35 | struct aws_byte_cursor instance_type_name); 36 | 37 | /** 38 | * Retrieves the metadata for the current environment. If EC2 instance type is unknown, or it is not an EC2 instance at 39 | * all, this value will still include the information about the system that could be determined. This value will never 40 | * be NULL. 41 | * This API is not thread safe. 42 | */ 43 | AWS_S3_API 44 | const struct aws_s3_platform_info *aws_s3_get_platform_info_for_current_environment( 45 | struct aws_s3_platform_info_loader *loader); 46 | 47 | /* 48 | * Retrieves a list of EC2 instance types with recommended configuration. 49 | * Returns aws_array_list. The caller is responsible for cleaning up the array list. 50 | */ 51 | AWS_S3_API 52 | struct aws_array_list aws_s3_get_recommended_platforms(struct aws_s3_platform_info_loader *loader); 53 | 54 | /** 55 | * Returns true if the current process is running on an Amazon EC2 instance powered by Nitro. 56 | */ 57 | AWS_S3_API 58 | bool aws_s3_is_running_on_ec2_nitro(struct aws_s3_platform_info_loader *loader); 59 | 60 | /** 61 | * Returns an EC2 instance type assuming this executable is running on Amazon EC2 powered by nitro. 62 | * 63 | * First this function will check it's running on EC2 via. attempting to read DMI info to avoid making IMDS calls. 64 | * 65 | * If the function detects it's on EC2, and it was able to detect the instance type without a call to IMDS 66 | * it will return it. 67 | * 68 | * Finally, it will call IMDS and return the instance type from there. 69 | * 70 | * Note that in the case of the IMDS call, a new client stack is spun up using 1 background thread. The call is made 71 | * synchronously with a 1 second timeout: It's not cheap. To make this easier, the underlying result is cached 72 | * internally and will be freed when aws_s3_library_clean_up() is called. 73 | * @return byte_cursor containing the instance type. If this is empty, the instance type could not be determined. 74 | */ 75 | AWS_S3_API 76 | struct aws_byte_cursor aws_s3_get_ec2_instance_type(struct aws_s3_platform_info_loader *loader, bool cached_only); 77 | 78 | AWS_EXTERN_C_END 79 | 80 | #endif /* AWS_S3_S3_PLATFORM_INFO_H */ 81 | -------------------------------------------------------------------------------- /include/aws/s3/private/s3_request_messages.h: -------------------------------------------------------------------------------- 1 | #ifndef AWS_S3_REQUEST_MESSAGES_H 2 | #define AWS_S3_REQUEST_MESSAGES_H 3 | 4 | /** 5 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 6 | * SPDX-License-Identifier: Apache-2.0. 7 | */ 8 | #include "aws/s3/s3.h" 9 | #include "aws/s3/s3_client.h" 10 | #include 11 | #include 12 | #include 13 | 14 | struct aws_allocator; 15 | struct aws_http_message; 16 | struct aws_byte_buf; 17 | struct aws_byte_cursor; 18 | struct aws_string; 19 | struct aws_array_list; 20 | struct checksum_config_storage; 21 | 22 | AWS_EXTERN_C_BEGIN 23 | 24 | /* Copy message (but not the body) and retain all headers */ 25 | AWS_S3_API 26 | struct aws_http_message *aws_s3_message_util_copy_http_message_no_body_all_headers( 27 | struct aws_allocator *allocator, 28 | struct aws_http_message *message); 29 | 30 | /* Copy message (but not the body) and exclude specific headers. 31 | * exclude_x_amz_meta controls whether S3 user metadata headers (prefixed with "x-amz-meta) are excluded.*/ 32 | AWS_S3_API 33 | struct aws_http_message *aws_s3_message_util_copy_http_message_no_body_filter_headers( 34 | struct aws_allocator *allocator, 35 | struct aws_http_message *message, 36 | const struct aws_byte_cursor *excluded_headers_arrays, 37 | size_t excluded_headers_size, 38 | bool exclude_x_amz_meta); 39 | 40 | /* Copy headers from one message to the other and exclude specific headers. 41 | * exclude_x_amz_meta controls whether S3 user metadata headers (prefixed with "x-amz-meta) are excluded.*/ 42 | AWS_S3_API 43 | void aws_s3_message_util_copy_headers( 44 | struct aws_http_message *source_message, 45 | struct aws_http_message *dest_message, 46 | const struct aws_byte_cursor *excluded_headers_arrays, 47 | size_t excluded_headers_size, 48 | bool exclude_x_amz_meta); 49 | 50 | AWS_S3_API 51 | struct aws_input_stream *aws_s3_message_util_assign_body( 52 | struct aws_allocator *allocator, 53 | struct aws_byte_buf *byte_buf, 54 | struct aws_http_message *out_message, 55 | const struct checksum_config_storage *checksum_config, 56 | struct aws_byte_buf *out_checksum); 57 | 58 | /* Create an HTTP request for an S3 Ranged Get Object Request, using the given request as a basis */ 59 | AWS_S3_API 60 | struct aws_http_message *aws_s3_ranged_get_object_message_new( 61 | struct aws_allocator *allocator, 62 | struct aws_http_message *base_message, 63 | uint64_t range_start, 64 | uint64_t range_end); 65 | 66 | AWS_S3_API 67 | int aws_s3_message_util_set_multipart_request_path( 68 | struct aws_allocator *allocator, 69 | const struct aws_string *upload_id, 70 | uint32_t part_number, 71 | bool append_uploads_suffix, 72 | struct aws_http_message *message); 73 | 74 | /* Create an HTTP request for an S3 Create-Multipart-Upload request. */ 75 | AWS_S3_API 76 | struct aws_http_message *aws_s3_create_multipart_upload_message_new( 77 | struct aws_allocator *allocator, 78 | struct aws_http_message *base_message, 79 | const struct checksum_config_storage *checksum_config); 80 | 81 | /* Create an HTTP request for an S3 Put Object request, using the original request as a basis. Creates and assigns a 82 | * body stream using the passed in buffer. If multipart is not needed, part number and upload_id can be 0 and NULL, 83 | * respectively. */ 84 | AWS_S3_API 85 | struct aws_http_message *aws_s3_upload_part_message_new( 86 | struct aws_allocator *allocator, 87 | struct aws_http_message *base_message, 88 | struct aws_byte_buf *buffer, 89 | uint32_t part_number, 90 | const struct aws_string *upload_id, 91 | bool should_compute_content_md5, 92 | const struct checksum_config_storage *checksum_config, 93 | struct aws_byte_buf *encoded_checksum_output); 94 | 95 | /* Create an HTTP request for an S3 UploadPartCopy request, using the original request as a basis. 96 | * If multipart is not needed, part number and upload_id can be 0 and NULL, 97 | * respectively. */ 98 | AWS_S3_API 99 | struct aws_http_message *aws_s3_upload_part_copy_message_new( 100 | struct aws_allocator *allocator, 101 | struct aws_http_message *base_message, 102 | struct aws_byte_buf *buffer, 103 | uint32_t part_number, 104 | uint64_t range_start, 105 | uint64_t range_end, 106 | const struct aws_string *upload_id, 107 | bool should_compute_content_md5); 108 | 109 | /* Create an HTTP request for an S3 Complete-Multipart-Upload request. Creates the necessary XML payload using the 110 | * passed in array list of `struct aws_s3_mpu_part_info *`. Buffer passed in will be used to store 111 | * said XML payload, which will be used as the body. */ 112 | AWS_S3_API 113 | struct aws_http_message *aws_s3_complete_multipart_message_new( 114 | struct aws_allocator *allocator, 115 | struct aws_http_message *base_message, 116 | struct aws_byte_buf *body_buffer, 117 | const struct aws_string *upload_id, 118 | const struct aws_array_list *parts, 119 | const struct checksum_config_storage *checksum_config); 120 | 121 | AWS_S3_API 122 | struct aws_http_message *aws_s3_abort_multipart_upload_message_new( 123 | struct aws_allocator *allocator, 124 | struct aws_http_message *base_message, 125 | const struct aws_string *upload_id); 126 | 127 | /* Creates a HEAD GetObject request to get the size of the specified object. */ 128 | AWS_S3_API 129 | struct aws_http_message *aws_s3_get_object_size_message_new( 130 | struct aws_allocator *allocator, 131 | struct aws_http_message *base_message, 132 | struct aws_byte_cursor source_bucket, 133 | struct aws_byte_cursor source_key); 134 | 135 | /* Creates a HEAD GetObject sub-request to get the size of the source object of a Copy meta request. */ 136 | AWS_S3_API 137 | struct aws_http_message *aws_s3_get_source_object_size_message_new( 138 | struct aws_allocator *allocator, 139 | struct aws_http_message *base_message, 140 | struct aws_uri *source_uri); 141 | 142 | /* Add content-md5 header to the http message passed in. The MD5 will be computed from the input_buf */ 143 | AWS_S3_API 144 | int aws_s3_message_util_add_content_md5_header( 145 | struct aws_allocator *allocator, 146 | struct aws_byte_buf *input_buf, 147 | struct aws_http_message *message); 148 | 149 | AWS_S3_API 150 | extern const struct aws_byte_cursor g_s3_create_multipart_upload_excluded_headers[]; 151 | 152 | AWS_S3_API 153 | extern const size_t g_s3_create_multipart_upload_excluded_headers_count; 154 | 155 | AWS_S3_API 156 | extern const struct aws_byte_cursor g_s3_upload_part_excluded_headers[]; 157 | 158 | AWS_S3_API 159 | extern const size_t g_s3_upload_part_excluded_headers_count; 160 | 161 | AWS_S3_API 162 | extern const struct aws_byte_cursor g_s3_complete_multipart_upload_excluded_headers[]; 163 | 164 | AWS_S3_API 165 | extern const size_t g_s3_complete_multipart_upload_excluded_headers_count; 166 | 167 | AWS_S3_API 168 | extern const struct aws_byte_cursor g_s3_abort_multipart_upload_excluded_headers[]; 169 | 170 | AWS_S3_API 171 | extern const size_t g_s3_create_session_allowed_headers_count; 172 | 173 | AWS_S3_API 174 | extern const struct aws_byte_cursor g_s3_create_session_allowed_headers[]; 175 | 176 | AWS_S3_API 177 | extern const size_t g_s3_abort_multipart_upload_excluded_headers_count; 178 | 179 | AWS_S3_API 180 | extern const struct aws_byte_cursor g_s3_list_parts_excluded_headers[]; 181 | 182 | AWS_S3_API extern const size_t g_s3_list_parts_excluded_headers_count; 183 | 184 | AWS_S3_API 185 | extern const struct aws_byte_cursor g_s3_list_parts_with_checksum_excluded_headers[]; 186 | 187 | AWS_S3_API 188 | extern const size_t g_s3_list_parts_with_checksum_excluded_headers_count; 189 | 190 | AWS_EXTERN_C_END 191 | 192 | #endif /* AWS_S3_REQUEST_H */ 193 | -------------------------------------------------------------------------------- /include/aws/s3/private/s3express_credentials_provider_impl.h: -------------------------------------------------------------------------------- 1 | #ifndef AWS_S3EXPRESS_CREDENTIALS_PROVIDER_IMPL_H 2 | #define AWS_S3EXPRESS_CREDENTIALS_PROVIDER_IMPL_H 3 | 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | 10 | struct aws_cache; 11 | 12 | /** 13 | * Everything in the session should ONLY be accessed with lock HELD 14 | */ 15 | struct aws_s3express_session { 16 | struct aws_allocator *allocator; 17 | /* The hash key for the table storing creator and session. */ 18 | struct aws_string *hash_key; 19 | 20 | /* The s3express credentials cached for the session */ 21 | struct aws_credentials *s3express_credentials; 22 | 23 | /* Pointer to the creator if the session is in process creating */ 24 | struct aws_s3express_session_creator *creator; 25 | 26 | /* The region and host of the session */ 27 | struct aws_string *region; 28 | struct aws_string *host; 29 | 30 | struct aws_http_headers *headers; 31 | bool inactive; 32 | 33 | /* Only used for mock tests */ 34 | struct aws_s3express_credentials_provider_impl *impl; 35 | }; 36 | 37 | struct aws_s3express_credentials_provider_impl { 38 | struct aws_s3_client *client; 39 | 40 | /* Internal Refcount to make sure the provider out lives all the context. */ 41 | struct aws_ref_count internal_ref; 42 | 43 | struct aws_task *bg_refresh_task; 44 | struct aws_event_loop *bg_event_loop; 45 | 46 | const struct aws_credentials *default_original_credentials; 47 | struct aws_credentials_provider *default_original_credentials_provider; 48 | 49 | struct { 50 | /* Protected by the impl lock */ 51 | struct aws_mutex lock; 52 | /** 53 | * Store the session creators in process. 54 | * `struct aws_string *` as Key. `struct aws_s3express_session_creator *` as Value 55 | */ 56 | struct aws_hash_table session_creator_table; 57 | /** 58 | * An LRU cache to store all the sessions. 59 | * `struct aws_string *` as Key. `struct aws_s3express_session *` as Value 60 | **/ 61 | struct aws_cache *cache; 62 | bool destroying; 63 | } synced_data; 64 | 65 | struct { 66 | /* Overrides for testing purpose. */ 67 | 68 | struct aws_uri *endpoint_override; 69 | uint64_t bg_refresh_secs_override; 70 | 71 | bool (*s3express_session_is_valid_override)(struct aws_s3express_session *session, uint64_t now_seconds); 72 | bool (*s3express_session_about_to_expire_override)(struct aws_s3express_session *session, uint64_t now_seconds); 73 | 74 | /* The callback to be invoked before the real meta request finished callback for provider */ 75 | aws_s3_meta_request_finish_fn *meta_request_finished_overhead; 76 | } mock_test; 77 | }; 78 | 79 | /** 80 | * Configuration options for the default S3 Express credentials provider 81 | */ 82 | struct aws_s3express_credentials_provider_default_options { 83 | /** 84 | * The S3 client to fetch credentials. 85 | * Note, the client is not owned by the provider, user should keep the s3 client outlive the provider. */ 86 | struct aws_s3_client *client; 87 | 88 | /* Optional callback for shutdown complete of the provider */ 89 | aws_simple_completion_callback *shutdown_complete_callback; 90 | void *shutdown_user_data; 91 | 92 | struct { 93 | uint64_t bg_refresh_secs_override; 94 | } mock_test; 95 | }; 96 | 97 | AWS_EXTERN_C_BEGIN 98 | /** 99 | * Create the default S3 Express credentials provider. 100 | * 101 | * @param allocator 102 | * @return 103 | */ 104 | AWS_S3_API 105 | struct aws_s3express_credentials_provider *aws_s3express_credentials_provider_new_default( 106 | struct aws_allocator *allocator, 107 | const struct aws_s3express_credentials_provider_default_options *options); 108 | 109 | /** 110 | * Encodes the hash key in the format: [host_value][hash_of_credentials_and_headers] 111 | * 112 | * The hash_of_credentials_and_headers is calculated as follows: 113 | * 1. Concatenate: [access_key][secret_access_key][headers] 114 | * where headers = ",header_name1:header_value1,header_name2:header_value2..." 115 | * 2. Generates SHA256 hash of the concatenated string 116 | */ 117 | AWS_S3_API 118 | struct aws_string *aws_encode_s3express_hash_key_new( 119 | struct aws_allocator *allocator, 120 | const struct aws_credentials *original_credentials, 121 | struct aws_byte_cursor host_value, 122 | struct aws_http_headers *headers); 123 | 124 | AWS_EXTERN_C_END 125 | #endif /* AWS_S3EXPRESS_CREDENTIALS_PROVIDER_IMPL_H */ 126 | -------------------------------------------------------------------------------- /include/aws/s3/s3.h: -------------------------------------------------------------------------------- 1 | #ifndef AWS_S3_H 2 | #define AWS_S3_H 3 | 4 | /** 5 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 6 | * SPDX-License-Identifier: Apache-2.0. 7 | */ 8 | 9 | #include 10 | #include 11 | #include 12 | 13 | AWS_PUSH_SANE_WARNING_LEVEL 14 | 15 | #define AWS_C_S3_PACKAGE_ID 14 16 | 17 | enum aws_s3_errors { 18 | AWS_ERROR_S3_MISSING_CONTENT_RANGE_HEADER = AWS_ERROR_ENUM_BEGIN_RANGE(AWS_C_S3_PACKAGE_ID), 19 | AWS_ERROR_S3_INVALID_CONTENT_RANGE_HEADER, 20 | AWS_ERROR_S3_MISSING_CONTENT_LENGTH_HEADER, 21 | AWS_ERROR_S3_INVALID_CONTENT_LENGTH_HEADER, 22 | AWS_ERROR_S3_MISSING_ETAG, 23 | AWS_ERROR_S3_INTERNAL_ERROR, 24 | AWS_ERROR_S3_SLOW_DOWN, 25 | AWS_ERROR_S3_INVALID_RESPONSE_STATUS, 26 | AWS_ERROR_S3_MISSING_UPLOAD_ID, 27 | AWS_ERROR_S3_PROXY_PARSE_FAILED, 28 | AWS_ERROR_S3_UNSUPPORTED_PROXY_SCHEME, 29 | AWS_ERROR_S3_CANCELED, 30 | AWS_ERROR_S3_INVALID_RANGE_HEADER, 31 | AWS_ERROR_S3_MULTIRANGE_HEADER_UNSUPPORTED, 32 | AWS_ERROR_S3_RESPONSE_CHECKSUM_MISMATCH, 33 | AWS_ERROR_S3_CHECKSUM_CALCULATION_FAILED, 34 | AWS_ERROR_S3_PAUSED, 35 | AWS_ERROR_S3_LIST_PARTS_PARSE_FAILED, 36 | AWS_ERROR_S3_RESUMED_PART_CHECKSUM_MISMATCH, 37 | AWS_ERROR_S3_RESUME_FAILED, 38 | AWS_ERROR_S3_OBJECT_MODIFIED, 39 | AWS_ERROR_S3_NON_RECOVERABLE_ASYNC_ERROR, 40 | AWS_ERROR_S3_METRIC_DATA_NOT_AVAILABLE, 41 | AWS_ERROR_S3_INCORRECT_CONTENT_LENGTH, 42 | AWS_ERROR_S3_REQUEST_TIME_TOO_SKEWED, 43 | AWS_ERROR_S3_FILE_MODIFIED, 44 | AWS_ERROR_S3_EXCEEDS_MEMORY_LIMIT, 45 | AWS_ERROR_S3_INVALID_MEMORY_LIMIT_CONFIG, 46 | AWS_ERROR_S3EXPRESS_CREATE_SESSION_FAILED, 47 | AWS_ERROR_S3_INTERNAL_PART_SIZE_MISMATCH_RETRYING_WITH_RANGE, 48 | AWS_ERROR_S3_REQUEST_HAS_COMPLETED, 49 | AWS_ERROR_S3_RECV_FILE_ALREADY_EXISTS, 50 | AWS_ERROR_S3_RECV_FILE_NOT_FOUND, 51 | AWS_ERROR_S3_REQUEST_TIMEOUT, 52 | AWS_ERROR_S3_BUFFER_ALLOCATION_FAILED, 53 | 54 | AWS_ERROR_S3_END_RANGE = AWS_ERROR_ENUM_END_RANGE(AWS_C_S3_PACKAGE_ID) 55 | }; 56 | 57 | enum aws_s3_subject { 58 | AWS_LS_S3_GENERAL = AWS_LOG_SUBJECT_BEGIN_RANGE(AWS_C_S3_PACKAGE_ID), 59 | AWS_LS_S3_CLIENT, 60 | AWS_LS_S3_CLIENT_STATS, 61 | AWS_LS_S3_REQUEST, 62 | AWS_LS_S3_META_REQUEST, 63 | AWS_LS_S3_ENDPOINT, 64 | AWS_LS_S3_LAST = AWS_LOG_SUBJECT_END_RANGE(AWS_C_S3_PACKAGE_ID) 65 | }; 66 | 67 | struct aws_s3_platform_info; 68 | 69 | #ifdef _MSC_VER 70 | # pragma warning(push) 71 | # pragma warning(disable : 4626) /* assignment operator was implicitly defined as deleted */ 72 | # pragma warning(disable : 5027) /* move assignment operator was implicitly defined as deleted */ 73 | #endif 74 | 75 | struct aws_s3_platform_info { 76 | /* name of the instance-type: example c5n.18xlarge */ 77 | struct aws_byte_cursor instance_type; 78 | /* max throughput for this instance type, in gigabits per second */ 79 | double max_throughput_gbps; 80 | /* The current build of this library specifically knows an optimal configuration for this 81 | * platform */ 82 | bool has_recommended_configuration; 83 | }; 84 | 85 | #ifdef _MSC_VER 86 | # pragma warning(pop) 87 | #endif 88 | 89 | AWS_EXTERN_C_BEGIN 90 | 91 | /** 92 | * Initializes internal datastructures used by aws-c-s3. 93 | * Must be called before using any functionality in aws-c-s3. 94 | */ 95 | AWS_S3_API 96 | void aws_s3_library_init(struct aws_allocator *allocator); 97 | 98 | /** 99 | * Shuts down the internal datastructures used by aws-c-s3. 100 | */ 101 | AWS_S3_API 102 | void aws_s3_library_clean_up(void); 103 | 104 | /* 105 | * Returns the aws_s3_platform_info for current platform 106 | * NOTE: THIS API IS EXPERIMENTAL AND UNSTABLE 107 | */ 108 | AWS_S3_API 109 | const struct aws_s3_platform_info *aws_s3_get_current_platform_info(void); 110 | 111 | /* 112 | * Returns the ec2 instance_type for current platform if possible 113 | * NOTE: THIS API IS EXPERIMENTAL AND UNSTABLE 114 | */ 115 | AWS_S3_API 116 | struct aws_byte_cursor aws_s3_get_current_platform_ec2_intance_type(bool cached_only); 117 | 118 | /* 119 | * Retrieves a list of EC2 instance types with recommended configuration. 120 | * Returns aws_array_list. The caller is responsible for cleaning up the array list. 121 | */ 122 | AWS_S3_API 123 | struct aws_array_list aws_s3_get_platforms_with_recommended_config(void); 124 | 125 | AWS_EXTERN_C_END 126 | AWS_POP_SANE_WARNING_LEVEL 127 | 128 | #endif /* AWS_S3_H */ 129 | -------------------------------------------------------------------------------- /include/aws/s3/s3_buffer_pool.h: -------------------------------------------------------------------------------- 1 | #ifndef AWS_S3_BUFFER_POOL_H 2 | #define AWS_S3_BUFFER_POOL_H 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | /** 9 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 10 | * SPDX-License-Identifier: Apache-2.0. 11 | */ 12 | 13 | /** 14 | * Generic memory pool interface. 15 | * Allows consumers of aws-c-s3 to override how buffer allocation for part buffers is done. 16 | * Refer to docs/memory_aware_request_execution.md for details on how default implementation works. 17 | * WARNING: this is currently experimental feature and does not provide API stability guarantees so should be used with 18 | * caution. At highlevel the flow is as follows: 19 | * - crt scheduler queues up requests to be prepared 20 | * - requests being prepared will try to reserve mem (i.e. obtain a ticket) and wait until they get it before proceeding 21 | * - once mem is reserved requests will proceed with the pipeline 22 | * - request will acquire buffer from the ticket when needed 23 | * - ticket is released when request is done with the buffer 24 | * Note: in some cases pipeline can stall if new buffer cannot be allocated (ex. async writes flow). 25 | * In this case reserve request will indicate that not granting the ticket can block and buffer pool should try to 26 | * allocate ticket right away (or wait and call waker when mem is allocated for the case of async writes). 27 | * Note for custom pool implementations: Scheduler keeps track of all outstanding futures and will error them out when 28 | * request is paused or cancelled. Its still fine for memory pool implementation to deliver ticket (it will just be 29 | * released by future right away with no side effects) or just ignore the future if its already in error state. 30 | */ 31 | 32 | AWS_PUSH_SANE_WARNING_LEVEL 33 | AWS_EXTERN_C_BEGIN 34 | struct aws_s3_buffer_ticket; 35 | 36 | /** 37 | * aws_future 38 | * Buffer ticket future used for reservations. 39 | */ 40 | AWS_FUTURE_T_POINTER_WITH_RELEASE_DECLARATION(aws_future_s3_buffer_ticket, struct aws_s3_buffer_ticket, AWS_S3_API) 41 | 42 | /** 43 | * Meta information about ticket reservation request. 44 | */ 45 | struct aws_s3_buffer_pool_reserve_meta { 46 | /* client reserving the ticket. accounts for buffer pool being shared between clients. */ 47 | struct aws_s3_client *client; 48 | 49 | /* meta request ticket is being reserved for. */ 50 | struct aws_s3_meta_request *meta_request; 51 | 52 | /* size of the buffer to reserve. */ 53 | size_t size; 54 | 55 | /* whether not granting reservation can result in request pipeline being blocked. 56 | * Note: blocking is currently a terminal condition and that cannot be recovered from, 57 | * i.e. meta request will be stuck and not make any process. 58 | * As such buffer pool should either grant or error out reservation in sync. 59 | * This scenario currently only occurs in the async_write flows. */ 60 | bool can_block; 61 | }; 62 | 63 | struct aws_s3_buffer_ticket; 64 | 65 | struct aws_s3_buffer_ticket_vtable { 66 | /** 67 | * Get buffer associated with the ticket. 68 | * Note: can be called multiple times and the same buffer should be returned. In some cases ticket might not be 69 | * claimed at all. 70 | */ 71 | struct aws_byte_buf (*claim)(struct aws_s3_buffer_ticket *ticket); 72 | 73 | /* Implement below for custom ref count behavior. Alternatively set those to null and init the ref count. */ 74 | struct aws_s3_buffer_ticket *(*acquire)(struct aws_s3_buffer_ticket *ticket); 75 | struct aws_s3_buffer_ticket *(*release)(struct aws_s3_buffer_ticket *ticket); 76 | }; 77 | 78 | /** 79 | * Polymorphic ticket. 80 | */ 81 | struct aws_s3_buffer_ticket { 82 | struct aws_s3_buffer_ticket_vtable *vtable; 83 | struct aws_ref_count ref_count; 84 | void *impl; 85 | }; 86 | 87 | AWS_S3_API struct aws_byte_buf aws_s3_buffer_ticket_claim(struct aws_s3_buffer_ticket *ticket); 88 | 89 | AWS_S3_API struct aws_s3_buffer_ticket *aws_s3_buffer_ticket_acquire(struct aws_s3_buffer_ticket *ticket); 90 | AWS_S3_API struct aws_s3_buffer_ticket *aws_s3_buffer_ticket_release(struct aws_s3_buffer_ticket *ticket); 91 | 92 | struct aws_s3_buffer_pool; 93 | 94 | struct aws_s3_buffer_pool_vtable { 95 | /* Reserve a ticket. Returns a future that is granted whenever reservation can be made. */ 96 | struct aws_future_s3_buffer_ticket *( 97 | *reserve)(struct aws_s3_buffer_pool *pool, struct aws_s3_buffer_pool_reserve_meta meta); 98 | 99 | /** 100 | * Trim the pool. This is mostly a suggestion, which pool can decide to ignore. Triggered by CRT when 101 | * client has been idle for some time. 102 | **/ 103 | void (*trim)(struct aws_s3_buffer_pool *pool); 104 | 105 | /* Implement below for custom ref count behavior. Alternatively set those to null and init the ref count. */ 106 | struct aws_s3_buffer_pool *(*acquire)(struct aws_s3_buffer_pool *pool); 107 | struct aws_s3_buffer_pool *(*release)(struct aws_s3_buffer_pool *pool); 108 | }; 109 | 110 | /** 111 | * Polymorphic buffer pool. 112 | */ 113 | struct aws_s3_buffer_pool { 114 | struct aws_s3_buffer_pool_vtable *vtable; 115 | struct aws_ref_count ref_count; 116 | void *impl; 117 | }; 118 | 119 | AWS_S3_API struct aws_future_s3_buffer_ticket *aws_s3_buffer_pool_reserve( 120 | struct aws_s3_buffer_pool *buffer_pool, 121 | struct aws_s3_buffer_pool_reserve_meta meta); 122 | AWS_S3_API void aws_s3_buffer_pool_trim(struct aws_s3_buffer_pool *buffer_pool); 123 | 124 | AWS_S3_API struct aws_s3_buffer_pool *aws_s3_buffer_pool_acquire(struct aws_s3_buffer_pool *buffer_pool); 125 | AWS_S3_API struct aws_s3_buffer_pool *aws_s3_buffer_pool_release(struct aws_s3_buffer_pool *buffer_pool); 126 | 127 | /** 128 | * Buffer pool configuration options. 129 | */ 130 | struct aws_s3_buffer_pool_config { 131 | struct aws_s3_client *client; /* Client creating the pool. */ 132 | size_t part_size; /* Default part size of the client. */ 133 | size_t max_part_size; /* Max part size configured on the client. */ 134 | size_t memory_limit; /* Memory limit set on the client. */ 135 | }; 136 | 137 | /** 138 | * Factory to construct the pool for the given config. Passes along buffer related info configured on the client, which 139 | * factory may ignore when considering how to construct pool. 140 | * This implementation should fail if pool cannot be constructed for some reason (ex. if config params cannot be met), 141 | * by logging failure reason, returning null and raising aws_error. 142 | */ 143 | typedef struct aws_s3_buffer_pool *(aws_s3_buffer_pool_factory_fn)(struct aws_allocator *allocator, 144 | struct aws_s3_buffer_pool_config config); 145 | 146 | AWS_EXTERN_C_END 147 | AWS_POP_SANE_WARNING_LEVEL 148 | 149 | #endif /* AWS_S3_BUFFER_POOL_H */ 150 | -------------------------------------------------------------------------------- /include/aws/s3/s3_endpoint_resolver.h: -------------------------------------------------------------------------------- 1 | #ifndef AWS_S3_ENDPOINT_RESOLVER_H 2 | #define AWS_S3_ENDPOINT_RESOLVER_H 3 | 4 | /** 5 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 6 | * SPDX-License-Identifier: Apache-2.0. 7 | */ 8 | 9 | #include 10 | AWS_PUSH_SANE_WARNING_LEVEL 11 | 12 | struct aws_endpoints_request_context; 13 | struct aws_endpoints_rule_engine; 14 | AWS_EXTERN_C_BEGIN 15 | 16 | /** 17 | * Creates a new S3 endpoint resolver. 18 | * Warning: Before using this header, you have to enable it by 19 | * setting cmake config AWS_ENABLE_S3_ENDPOINT_RESOLVER=ON 20 | */ 21 | AWS_S3_API 22 | struct aws_endpoints_rule_engine *aws_s3_endpoint_resolver_new(struct aws_allocator *allocator); 23 | 24 | AWS_EXTERN_C_END 25 | AWS_POP_SANE_WARNING_LEVEL 26 | #endif /* AWS_S3_ENDPOINT_RESOLVER_H */ 27 | -------------------------------------------------------------------------------- /include/aws/s3/s3express_credentials_provider.h: -------------------------------------------------------------------------------- 1 | #ifndef AWS_S3EXPRESS_CREDENTIALS_PROVIDER_H 2 | #define AWS_S3EXPRESS_CREDENTIALS_PROVIDER_H 3 | 4 | /** 5 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 6 | * SPDX-License-Identifier: Apache-2.0. 7 | */ 8 | 9 | #include 10 | #include 11 | #include 12 | 13 | AWS_PUSH_SANE_WARNING_LEVEL 14 | 15 | struct aws_s3_client; 16 | struct aws_s3express_credentials_provider; 17 | 18 | struct aws_credentials_properties_s3express { 19 | /** 20 | * Required. 21 | * The host address of the s3 bucket for the request. 22 | */ 23 | struct aws_byte_cursor host; 24 | /** 25 | * Optional. 26 | * The region of the bucket. 27 | * If empty, the region of the S3 client will be used. 28 | */ 29 | struct aws_byte_cursor region; 30 | 31 | struct aws_http_headers *headers; 32 | }; 33 | 34 | struct aws_s3express_credentials_provider_vtable { 35 | /** 36 | * Implementation for S3 Express provider to get S3 Express credentials 37 | */ 38 | int (*get_credentials)( 39 | struct aws_s3express_credentials_provider *provider, 40 | const struct aws_credentials *original_credentials, 41 | const struct aws_credentials_properties_s3express *properties, 42 | aws_on_get_credentials_callback_fn callback, 43 | void *user_data); 44 | 45 | /** 46 | * Implementation to destroy the provider. 47 | */ 48 | void (*destroy)(struct aws_s3express_credentials_provider *provider); 49 | }; 50 | 51 | struct aws_s3express_credentials_provider { 52 | struct aws_s3express_credentials_provider_vtable *vtable; 53 | struct aws_allocator *allocator; 54 | /* Optional callback for shutdown complete of the provider */ 55 | aws_simple_completion_callback *shutdown_complete_callback; 56 | void *shutdown_user_data; 57 | void *impl; 58 | struct aws_ref_count ref_count; 59 | }; 60 | 61 | AWS_EXTERN_C_BEGIN 62 | 63 | AWS_S3_API 64 | struct aws_s3express_credentials_provider *aws_s3express_credentials_provider_release( 65 | struct aws_s3express_credentials_provider *provider); 66 | 67 | /** 68 | * To initialize the provider with basic vtable and refcount. And hook up the refcount with vtable functions. 69 | * 70 | * @param provider 71 | * @param allocator 72 | * @param vtable 73 | * @param impl Optional, the impl for the provider 74 | * @return AWS_S3_API 75 | */ 76 | AWS_S3_API 77 | void aws_s3express_credentials_provider_init_base( 78 | struct aws_s3express_credentials_provider *provider, 79 | struct aws_allocator *allocator, 80 | struct aws_s3express_credentials_provider_vtable *vtable, 81 | void *impl); 82 | 83 | /** 84 | * Async function for retrieving specific credentials based on properties. 85 | * 86 | * @param provider aws_s3express_credentials_provider provider to source from 87 | * @param original_credentials The credentials used to derive the credentials for S3 Express. 88 | * @param properties Specific properties for credentials being fetched. 89 | * @param user_data user data to pass to the completion callback 90 | * 91 | * callback will only be invoked if-and-only-if the return value was AWS_OP_SUCCESS. 92 | * 93 | */ 94 | AWS_S3_API int aws_s3express_credentials_provider_get_credentials( 95 | struct aws_s3express_credentials_provider *provider, 96 | const struct aws_credentials *original_credentials, 97 | const struct aws_credentials_properties_s3express *properties, 98 | aws_on_get_credentials_callback_fn callback, 99 | void *user_data); 100 | 101 | AWS_EXTERN_C_END 102 | AWS_POP_SANE_WARNING_LEVEL 103 | 104 | #endif /* AWS_S3EXPRESS_CREDENTIALS_PROVIDER_H */ 105 | -------------------------------------------------------------------------------- /samples/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | 2 | add_subdirectory(s3) 3 | -------------------------------------------------------------------------------- /samples/s3/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | project(s3 C) 2 | 3 | file(GLOB S3_SRC 4 | "*.c" 5 | ) 6 | 7 | set(S3_PROJECT_NAME s3) 8 | add_executable(${S3_PROJECT_NAME} ${S3_SRC}) 9 | aws_set_common_properties(${S3_PROJECT_NAME}) 10 | 11 | 12 | target_include_directories(${S3_PROJECT_NAME} PUBLIC 13 | $ 14 | $) 15 | 16 | target_link_libraries(${S3_PROJECT_NAME} PRIVATE aws-c-s3) 17 | 18 | if (BUILD_SHARED_LIBS AND NOT WIN32) 19 | message(INFO " s3 will be built with shared libs, but you may need to set LD_LIBRARY_PATH=${CMAKE_INSTALL_PREFIX}/lib to run the application") 20 | endif() 21 | 22 | install(TARGETS ${S3_PROJECT_NAME} 23 | EXPORT ${S3_PROJECT_NAME}-targets 24 | COMPONENT Runtime 25 | RUNTIME 26 | DESTINATION ${CMAKE_INSTALL_BINDIR} 27 | COMPONENT Runtime) 28 | -------------------------------------------------------------------------------- /samples/s3/app_ctx.h: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | * SPDX-License-Identifier: Apache-2.0. 4 | */ 5 | 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | struct app_ctx { 12 | struct aws_allocator *allocator; 13 | struct aws_s3_client *client; 14 | struct aws_credentials_provider *credentials_provider; 15 | struct aws_client_bootstrap *client_bootstrap; 16 | struct aws_logger logger; 17 | struct aws_mutex mutex; 18 | struct aws_condition_variable c_var; 19 | bool execution_completed; 20 | struct aws_signing_config_aws signing_config; 21 | const char *region; 22 | enum aws_log_level log_level; 23 | bool help_requested; 24 | void *sub_command_data; 25 | }; 26 | -------------------------------------------------------------------------------- /samples/s3/cli_progress_bar.h: -------------------------------------------------------------------------------- 1 | #ifndef CLI_PROGRESS_BAR_H 2 | #define CLI_PROGRESS_BAR_H 3 | 4 | /** 5 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 6 | * SPDX-License-Identifier: Apache-2.0. 7 | */ 8 | 9 | #include 10 | #include 11 | 12 | struct progress_listener_group; 13 | struct progress_listener; 14 | 15 | /** 16 | * Creates parent container for progress bars. It's rendered as a single block, and in order to work correctly 17 | * must be the last thing currently rendered on the terminal. It will render all progress bars at ~25 FPS 18 | * if you call progress_listener_group_run_background_render_thread(). Otherwise, you can always call 19 | * progress_listener_group_render() manually. 20 | */ 21 | struct progress_listener_group *progress_listener_group_new(struct aws_allocator *allocator); 22 | 23 | /** 24 | * Wait on any background thread resources to clean up, then delete the group. 25 | */ 26 | void progress_listener_group_delete(struct progress_listener_group *group); 27 | 28 | /** 29 | * Render the current state of the progress bars in this group. Please keep in mind. This works as long as this is the 30 | * last block of text currently rendered on the terminal (the cursor position should be immediately after the last line 31 | * of this group. 32 | */ 33 | void progress_listener_group_render(struct progress_listener_group *group); 34 | 35 | /** 36 | * Initiates a background thread to run progress_listener_group_render at ~25 FPS 37 | */ 38 | void progress_listener_group_run_background_render_thread(struct progress_listener_group *group); 39 | 40 | /** 41 | * Creates a new progress bar and returns a listener back for updating state, labels, and progress. 42 | * @param group group to render the progress bar into. 43 | * @param label label (what are you tracking progress for?) 44 | * @param state_name name of the state (In progress, success, failed etc...). 45 | * @param max_value The 100% value of the progress you're tracking 46 | */ 47 | struct progress_listener *progress_listener_new( 48 | struct progress_listener_group *group, 49 | struct aws_string *label, 50 | struct aws_string *state_name, 51 | uint64_t max_value); 52 | 53 | /** 54 | * Update the state of the progress bar. 55 | */ 56 | void progress_listener_update_state(struct progress_listener *listener, struct aws_string *state_name); 57 | 58 | /** 59 | * Update the progress of the progress bar. 60 | * @param progress_update amount to increment the progress by. 61 | */ 62 | void progress_listener_update_progress(struct progress_listener *listener, uint64_t progress_update); 63 | 64 | void progress_listener_reset_progress(struct progress_listener *listener); 65 | 66 | void progress_listener_update_max_value(struct progress_listener *listener, uint64_t max_value); 67 | 68 | /** 69 | * Update the label for the progress bar. 70 | */ 71 | void progress_listener_update_label(struct progress_listener *listener, struct aws_string *new_label); 72 | 73 | /** 74 | * Render just the bar. This will not render in place and you probably should rely on the group render 75 | * to handle this for you. 76 | */ 77 | void progress_listener_render(struct progress_listener *listener); 78 | 79 | #endif /* CLI_PROGRESS_BAR_H */ 80 | -------------------------------------------------------------------------------- /samples/s3/main.c: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | * SPDX-License-Identifier: Apache-2.0. 4 | */ 5 | 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | 16 | #include "app_ctx.h" 17 | 18 | #include 19 | 20 | int s3_ls_main(int argc, char *const argv[], const char *command_name, void *user_data); 21 | int s3_cp_main(int argc, char *const argv[], const char *command_name, void *user_data); 22 | int s3_compute_platform_info_main(int argc, char *const argv[], const char *command_name, void *user_data); 23 | 24 | static struct aws_cli_subcommand_dispatch s_dispatch_table[] = { 25 | { 26 | .command_name = "ls", 27 | .subcommand_fn = s3_ls_main, 28 | }, 29 | { 30 | .command_name = "cp", 31 | .subcommand_fn = s3_cp_main, 32 | }, 33 | { 34 | .command_name = "platform-info", 35 | .subcommand_fn = s3_compute_platform_info_main, 36 | }, 37 | }; 38 | 39 | static void s_usage(int exit_code) { 40 | 41 | FILE *output = exit_code == 0 ? stdout : stderr; 42 | fprintf(output, "usage: s3 \n"); 43 | fprintf(output, " available commands:\n"); 44 | 45 | for (size_t i = 0; i < AWS_ARRAY_SIZE(s_dispatch_table); ++i) { 46 | fprintf(output, " %s\n", s_dispatch_table[i].command_name); 47 | } 48 | 49 | fflush(output); 50 | exit(exit_code); 51 | } 52 | 53 | static void s_setup_logger(struct app_ctx *app_ctx) { 54 | struct aws_logger_standard_options logger_options = { 55 | .level = app_ctx->log_level, 56 | .file = stderr, 57 | }; 58 | 59 | aws_logger_init_standard(&app_ctx->logger, app_ctx->allocator, &logger_options); 60 | aws_logger_set(&app_ctx->logger); 61 | } 62 | 63 | static struct aws_cli_option s_long_options[] = { 64 | {"region", AWS_CLI_OPTIONS_REQUIRED_ARGUMENT, NULL, 'r'}, 65 | {"verbose", AWS_CLI_OPTIONS_REQUIRED_ARGUMENT, NULL, 'v'}, 66 | {"help", AWS_CLI_OPTIONS_NO_ARGUMENT, NULL, 'h'}, 67 | /* Per getopt(3) the last element of the array has to be filled with all zeros */ 68 | {NULL, AWS_CLI_OPTIONS_NO_ARGUMENT, NULL, 0}, 69 | }; 70 | 71 | static void s_parse_app_ctx(int argc, char *const argv[], struct app_ctx *app_ctx) { 72 | 73 | while (true) { 74 | int option_index = 0; 75 | int c = aws_cli_getopt_long(argc, argv, "r:v:h", s_long_options, &option_index); 76 | if (c == -1) { 77 | break; 78 | } 79 | 80 | switch (c) { 81 | case 0: 82 | /* getopt_long() returns 0 if an option.flag is non-null */ 83 | break; 84 | case 'r': 85 | app_ctx->region = aws_cli_optarg; 86 | break; 87 | case 'v': 88 | if (!strcmp(aws_cli_optarg, "TRACE")) { 89 | app_ctx->log_level = AWS_LL_TRACE; 90 | } else if (!strcmp(aws_cli_optarg, "INFO")) { 91 | app_ctx->log_level = AWS_LL_INFO; 92 | } else if (!strcmp(aws_cli_optarg, "DEBUG")) { 93 | app_ctx->log_level = AWS_LL_DEBUG; 94 | } else if (!strcmp(aws_cli_optarg, "ERROR")) { 95 | app_ctx->log_level = AWS_LL_ERROR; 96 | } else { 97 | fprintf(stderr, "unsupported log level %s.\n", aws_cli_optarg); 98 | s_usage(1); 99 | } 100 | break; 101 | case 'h': 102 | app_ctx->help_requested = true; 103 | break; 104 | default: 105 | break; 106 | } 107 | } 108 | 109 | if (!app_ctx->help_requested) { 110 | if (app_ctx->log_level != AWS_LOG_LEVEL_NONE) { 111 | s_setup_logger(app_ctx); 112 | } 113 | } 114 | 115 | /* reset for the next parser */ 116 | aws_cli_reset_state(); 117 | 118 | /* signing config */ 119 | aws_s3_init_default_signing_config( 120 | &app_ctx->signing_config, aws_byte_cursor_from_c_str(app_ctx->region), app_ctx->credentials_provider); 121 | app_ctx->signing_config.flags.use_double_uri_encode = false; 122 | 123 | /* s3 client */ 124 | struct aws_s3_client_config client_config; 125 | AWS_ZERO_STRUCT(client_config); 126 | client_config.client_bootstrap = app_ctx->client_bootstrap; 127 | client_config.region = aws_byte_cursor_from_c_str(app_ctx->region); 128 | client_config.signing_config = &app_ctx->signing_config; 129 | app_ctx->client = aws_s3_client_new(app_ctx->allocator, &client_config); 130 | } 131 | 132 | int main(int argc, char *argv[]) { 133 | 134 | struct aws_allocator *allocator = aws_default_allocator(); 135 | aws_s3_library_init(allocator); 136 | 137 | struct app_ctx app_ctx; 138 | AWS_ZERO_STRUCT(app_ctx); 139 | app_ctx.allocator = allocator; 140 | app_ctx.c_var = (struct aws_condition_variable)AWS_CONDITION_VARIABLE_INIT; 141 | aws_mutex_init(&app_ctx.mutex); 142 | 143 | /* event loop */ 144 | struct aws_event_loop_group *event_loop_group = aws_event_loop_group_new_default(allocator, 0, NULL); 145 | 146 | /* resolver */ 147 | struct aws_host_resolver_default_options resolver_options = { 148 | .el_group = event_loop_group, 149 | .max_entries = 8, 150 | }; 151 | struct aws_host_resolver *resolver = aws_host_resolver_new_default(allocator, &resolver_options); 152 | 153 | /* client bootstrap */ 154 | struct aws_client_bootstrap_options bootstrap_options = { 155 | .event_loop_group = event_loop_group, 156 | .host_resolver = resolver, 157 | }; 158 | app_ctx.client_bootstrap = aws_client_bootstrap_new(allocator, &bootstrap_options); 159 | if (app_ctx.client_bootstrap == NULL) { 160 | printf("ERROR initializing client bootstrap\n"); 161 | return -1; 162 | } 163 | 164 | /* credentials */ 165 | struct aws_credentials_provider_chain_default_options credentials_provider_options; 166 | AWS_ZERO_STRUCT(credentials_provider_options); 167 | credentials_provider_options.bootstrap = app_ctx.client_bootstrap; 168 | app_ctx.credentials_provider = aws_credentials_provider_new_chain_default(allocator, &credentials_provider_options); 169 | 170 | s_parse_app_ctx(argc, argv, &app_ctx); 171 | int dispatch_return_code = 172 | aws_cli_dispatch_on_subcommand(argc, argv, s_dispatch_table, AWS_ARRAY_SIZE(s_dispatch_table), &app_ctx); 173 | 174 | if (dispatch_return_code && 175 | (aws_last_error() == AWS_ERROR_INVALID_ARGUMENT || aws_last_error() == AWS_ERROR_UNIMPLEMENTED)) { 176 | s_usage(app_ctx.help_requested == true ? 0 : 1); 177 | } 178 | 179 | /* release resources */ 180 | aws_s3_client_release(app_ctx.client); 181 | aws_credentials_provider_release(app_ctx.credentials_provider); 182 | aws_client_bootstrap_release(app_ctx.client_bootstrap); 183 | aws_host_resolver_release(resolver); 184 | aws_event_loop_group_release(event_loop_group); 185 | aws_mutex_clean_up(&app_ctx.mutex); 186 | aws_s3_library_clean_up(); 187 | 188 | return dispatch_return_code; 189 | } 190 | -------------------------------------------------------------------------------- /samples/s3/s3-ls.c: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | * SPDX-License-Identifier: Apache-2.0. 4 | */ 5 | 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | #include "app_ctx.h" 14 | 15 | #include 16 | #include 17 | 18 | struct s3_ls_app_data { 19 | struct aws_uri uri; 20 | struct app_ctx *app_ctx; 21 | struct aws_mutex mutex; 22 | struct aws_condition_variable cvar; 23 | bool execution_completed; 24 | bool long_format; 25 | }; 26 | 27 | static void s_usage(int exit_code) { 28 | FILE *output = exit_code == 0 ? stdout : stderr; 29 | fprintf(output, "usage: s3 ls [options] s3://{bucket}[/prefix]\n"); 30 | fprintf(output, " bucket: the S3 bucket to list objects\n"); 31 | fprintf(output, " prefix: the prefix to filter\n"); 32 | fprintf(output, " -l, List in long format\n"); 33 | fprintf(output, " -h, --help\n"); 34 | fprintf(output, " Display this message and quit.\n"); 35 | exit(exit_code); 36 | } 37 | 38 | static struct aws_cli_option s_long_options[] = { 39 | {"long-format", AWS_CLI_OPTIONS_NO_ARGUMENT, NULL, 'l'}, 40 | /* Per getopt(3) the last element of the array has to be filled with all zeros */ 41 | {NULL, AWS_CLI_OPTIONS_NO_ARGUMENT, NULL, 0}, 42 | }; 43 | 44 | static void s_parse_options(int argc, char **argv, struct s3_ls_app_data *ctx) { 45 | int option_index = 0; 46 | 47 | int opt_val = 0; 48 | bool uri_found = false; 49 | do { 50 | opt_val = aws_cli_getopt_long(argc, argv, "l", s_long_options, &option_index); 51 | /* START_OF_TEXT means our positional argument */ 52 | if (opt_val == 'l') { 53 | ctx->long_format = true; 54 | } 55 | if (opt_val == 0x02) { 56 | struct aws_byte_cursor uri_cursor = aws_byte_cursor_from_c_str(aws_cli_positional_arg); 57 | 58 | if (aws_uri_init_parse(&ctx->uri, ctx->app_ctx->allocator, &uri_cursor)) { 59 | fprintf( 60 | stderr, 61 | "Failed to parse uri %s with error %s\n", 62 | (char *)uri_cursor.ptr, 63 | aws_error_debug_str(aws_last_error())); 64 | s_usage(1); 65 | } 66 | uri_found = true; 67 | } 68 | } while (opt_val != -1); 69 | 70 | if (!uri_found) { 71 | fprintf(stderr, "A URI for the request must be supplied.\n"); 72 | s_usage(1); 73 | } 74 | } 75 | 76 | /** 77 | * Predicate used to decide if the application is ready to exit. 78 | * The corresponding condition variable is set when the last 79 | * page of ListObjects is received. 80 | */ 81 | static bool s_app_completion_predicate(void *arg) { 82 | struct s3_ls_app_data *app_ctx = arg; 83 | return app_ctx->execution_completed; 84 | } 85 | 86 | /** 87 | * Called once for each object returned in the ListObjectsV2 responses. 88 | */ 89 | int s_on_object(const struct aws_s3_object_info *info, void *user_data) { 90 | struct s3_ls_app_data *app_ctx = user_data; 91 | 92 | if (app_ctx->long_format) { 93 | printf("%-18" PRIu64 " ", info->size); 94 | } 95 | printf("%.*s\n", (int)info->key.len, info->key.ptr); 96 | return AWS_OP_SUCCESS; 97 | } 98 | 99 | /** 100 | * Called once for each ListObjectsV2 response received. 101 | * If the response contains a continuation token indicating there are more results to be fetched, 102 | * requests the next page using aws_s3_paginator_continue. 103 | */ 104 | void s_on_list_finished(struct aws_s3_paginator *paginator, int error_code, void *user_data) { 105 | struct s3_ls_app_data *app_ctx = user_data; 106 | 107 | if (error_code == 0) { 108 | bool has_more_results = aws_s3_paginator_has_more_results(paginator); 109 | if (has_more_results) { 110 | /* get next page */ 111 | int result = aws_s3_paginator_continue(paginator, &app_ctx->app_ctx->signing_config); 112 | if (result) { 113 | fprintf(stderr, "ERROR returned by aws_s3_paginator_continue from s_on_list_finished: %d\n", result); 114 | } 115 | return; 116 | } 117 | } else { 118 | fprintf( 119 | stderr, 120 | "Failure while listing objects. Please check if you have valid credentials and s3 path is correct. " 121 | "Error: " 122 | "%s\n", 123 | aws_error_debug_str(error_code)); 124 | } 125 | 126 | /* all pages received. triggers the condition variable to exit the application. */ 127 | aws_mutex_lock(&app_ctx->mutex); 128 | app_ctx->execution_completed = true; 129 | aws_mutex_unlock(&app_ctx->mutex); 130 | aws_condition_variable_notify_one(&app_ctx->cvar); 131 | } 132 | 133 | int s3_ls_main(int argc, char *argv[], const char *command_name, void *user_data) { 134 | (void)command_name; 135 | struct app_ctx *app_ctx = user_data; 136 | 137 | if (app_ctx->help_requested) { 138 | s_usage(0); 139 | } 140 | 141 | if (!app_ctx->region) { 142 | fprintf(stderr, "region is a required argument\n"); 143 | s_usage(1); 144 | } 145 | 146 | struct s3_ls_app_data impl_data = { 147 | .app_ctx = app_ctx, 148 | .mutex = AWS_MUTEX_INIT, 149 | .cvar = AWS_CONDITION_VARIABLE_INIT, 150 | }; 151 | 152 | app_ctx->sub_command_data = &impl_data; 153 | 154 | s_parse_options(argc, argv, &impl_data); 155 | 156 | struct aws_byte_cursor bucket = impl_data.uri.host_name; 157 | struct aws_byte_cursor prefix; 158 | if (impl_data.uri.path.len == 0 || (impl_data.uri.path.len == 1 && impl_data.uri.path.ptr[0] == '/')) { 159 | prefix.len = 0; 160 | prefix.ptr = NULL; 161 | } else { 162 | /* skips the initial / in the path */ 163 | prefix.len = impl_data.uri.path.len - 1; 164 | prefix.ptr = impl_data.uri.path.ptr + 1; 165 | } 166 | 167 | /* listObjects */ 168 | struct aws_s3_list_objects_params params = {.client = app_ctx->client, .bucket_name = bucket, .prefix = prefix}; 169 | 170 | char endpoint[1024]; 171 | snprintf(endpoint, sizeof(endpoint), "s3.%s.amazonaws.com", app_ctx->region); 172 | params.endpoint = aws_byte_cursor_from_c_str(endpoint); 173 | params.user_data = &impl_data; 174 | params.on_object = &s_on_object; 175 | params.on_list_finished = &s_on_list_finished; 176 | 177 | struct aws_s3_paginator *paginator = aws_s3_initiate_list_objects(app_ctx->allocator, ¶ms); 178 | int paginator_result = aws_s3_paginator_continue(paginator, &app_ctx->signing_config); 179 | if (paginator_result) { 180 | printf("ERROR returned from initial call to aws_s3_paginator_continue: %d \n", paginator_result); 181 | } 182 | 183 | aws_s3_paginator_release(paginator); 184 | 185 | /* wait completion of last page */ 186 | aws_mutex_lock(&impl_data.mutex); 187 | aws_condition_variable_wait_pred(&impl_data.cvar, &impl_data.mutex, s_app_completion_predicate, &impl_data); 188 | aws_mutex_unlock(&impl_data.mutex); 189 | 190 | return 0; 191 | } 192 | -------------------------------------------------------------------------------- /samples/s3/s3-platform_info.c: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | * SPDX-License-Identifier: Apache-2.0. 4 | */ 5 | 6 | #include 7 | 8 | #include "app_ctx.h" 9 | 10 | struct s3_compute_platform_ctx { 11 | struct app_ctx *app_ctx; 12 | struct aws_byte_cursor instance_type; 13 | }; 14 | 15 | static void s_usage(int exit_code) { 16 | FILE *output = exit_code == 0 ? stdout : stderr; 17 | fprintf(output, "usage: s3 platform-info [options]\n"); 18 | fprintf( 19 | output, 20 | " -instance-type, (optional) Instance type to look up configuration for, if not set it will be the current " 21 | "executing environment. \n"); 22 | fprintf(output, " -h, --help\n"); 23 | fprintf(output, " Display this message and quit.\n"); 24 | exit(exit_code); 25 | } 26 | 27 | static struct aws_cli_option s_long_options[] = { 28 | {"instance-type", AWS_CLI_OPTIONS_REQUIRED_ARGUMENT, NULL, 'i'}, 29 | /* Per getopt(3) the last element of the array has to be filled with all zeros */ 30 | {NULL, AWS_CLI_OPTIONS_NO_ARGUMENT, NULL, 0}, 31 | }; 32 | 33 | static void s_parse_options(int argc, char **argv, struct s3_compute_platform_ctx *ctx) { 34 | int option_index = 0; 35 | 36 | int opt_val = 0; 37 | do { 38 | opt_val = aws_cli_getopt_long(argc, argv, "i:", s_long_options, &option_index); 39 | /* START_OF_TEXT means our positional argument */ 40 | if (opt_val == 'i') { 41 | ctx->instance_type = aws_byte_cursor_from_c_str(aws_cli_optarg); 42 | } 43 | } while (opt_val != -1); 44 | } 45 | 46 | int s3_compute_platform_info_main(int argc, char *argv[], const char *command_name, void *user_data) { 47 | (void)command_name; 48 | 49 | struct app_ctx *app_ctx = user_data; 50 | 51 | if (app_ctx->help_requested) { 52 | s_usage(0); 53 | } 54 | 55 | struct s3_compute_platform_ctx compute_platform_app_ctx = { 56 | .app_ctx = app_ctx, 57 | }; 58 | app_ctx->sub_command_data = &compute_platform_app_ctx; 59 | 60 | s_parse_options(argc, argv, &compute_platform_app_ctx); 61 | 62 | const struct aws_s3_platform_info *platform_info = aws_s3_get_current_platform_info(); 63 | 64 | printf("{\n"); 65 | printf("\t'instance_type': '" PRInSTR "',\n", AWS_BYTE_CURSOR_PRI(platform_info->instance_type)); 66 | printf("\t'max_throughput_gbps': %d,\n", (int)platform_info->max_throughput_gbps); 67 | printf("\t'has_recommended_configuration': %s,\n", platform_info->has_recommended_configuration ? "true" : "false"); 68 | return 0; 69 | } 70 | -------------------------------------------------------------------------------- /scripts/update_s3_endpoint_resolver_artifacts.py: -------------------------------------------------------------------------------- 1 | # This script pulls latest 'partitions.json' and 's3-endpoint-rule-set.json' from Git. 2 | # You will need a secret in secrets manager which has the 'ruleset-url' and 'ruleset-token'. 3 | # It uses the latest files to generate 'source/s3_endpoint_resolver/aws_s3_endpoint_rule_set.c' and 4 | # 'source/s3_endpoint_resolver/aws_s3_endpoint_resolver_partition.c' 5 | 6 | import argparse 7 | import json 8 | import boto3 9 | import requests 10 | 11 | 12 | def escape_char(c): 13 | escape_dict = { 14 | '\\': '\\\\', 15 | '\'': '\\\'', 16 | '\0': '\\0', 17 | '\a': '\\a', 18 | '\b': '\\b', 19 | '\f': '\\f', 20 | '\n': '\\n', 21 | '\r': '\\r', 22 | '\t': '\\t', 23 | '\v': '\\v' 24 | } 25 | 26 | return escape_dict.get(c, c) 27 | 28 | 29 | def get_header(): 30 | return """\ 31 | /** 32 | * Copyright Amazon.com, Inc. or its affiliates. 33 | * All Rights Reserved. SPDX-License-Identifier: Apache-2.0. 34 | */ 35 | #include "aws/s3/private/s3_endpoint_resolver.h" 36 | #include 37 | 38 | /** 39 | * This file is generated using scripts/update_s3_endpoint_resolver_artifacts.py. 40 | * Do not modify directly. */ 41 | /* clang-format off */ 42 | 43 | """ 44 | 45 | 46 | def generate_c_file_from_json(json_content, c_file_name, c_struct_name): 47 | num_chars_per_line = 20 48 | 49 | try: 50 | # Compact the json 51 | compact_json_str = json.dumps(json_content, separators=(',', ':')) 52 | compact_c = [] 53 | for i in range(0, len(compact_json_str), num_chars_per_line): 54 | compact_c.append( 55 | ', '.join("'{}'".format(escape_char(char)) for char in compact_json_str[i:i + num_chars_per_line])) 56 | 57 | # Write json to a C file 58 | with open(c_file_name, 'w') as f: 59 | f.write(get_header()) 60 | f.write(f"static const char s_generated_array[] = {{\n\t") 61 | f.write(",\n\t".join(compact_c)) 62 | f.write("};\n\n") 63 | 64 | f.write(f"const struct aws_byte_cursor {c_struct_name} = {{\n\t") 65 | f.write(f".len = {len(compact_json_str)},\n\t") 66 | f.write(f".ptr = (uint8_t *) s_generated_array\n}};\n") 67 | 68 | print(f"{c_file_name} has been created successfully.") 69 | 70 | except Exception as e: 71 | print(f"An error occurred: {e}") 72 | 73 | 74 | def get_secret_from_secrets_manager(secret_name, region_name): 75 | session = boto3.session.Session() 76 | client = session.client( 77 | service_name='secretsmanager', 78 | region_name=region_name 79 | ) 80 | 81 | try: 82 | get_secret_value_response = client.get_secret_value( 83 | SecretId=secret_name 84 | ) 85 | except Exception as e: 86 | raise e 87 | 88 | return json.loads(get_secret_value_response['SecretString']) 89 | 90 | 91 | def download_from_git(url, token=None): 92 | headers = {'Accept': 'application/vnd.github+json'} 93 | if token is not None: 94 | headers['Authorization'] = f"Bearer {token}" 95 | http_response = requests.get(url, headers=headers) 96 | if http_response.status_code != 200: 97 | raise Exception(f"HTTP Status code is {http_response.status_code}") 98 | 99 | return json.loads(http_response.content.decode()) 100 | 101 | 102 | if __name__ == '__main__': 103 | argument_parser = argparse.ArgumentParser(description="Endpoint Ruleset Updater") 104 | argument_parser.add_argument("--ruleset", metavar="", 105 | required=False, help="Path to endpoint ruleset json file") 106 | argument_parser.add_argument("--partitions", metavar="", 107 | required=False, help="Path to partitions json file") 108 | parsed_args = argument_parser.parse_args() 109 | 110 | git_secret = get_secret_from_secrets_manager("s3/endpoint/resolver/artifacts/git", "us-east-1") 111 | 112 | if (parsed_args.ruleset): 113 | with open(parsed_args.ruleset) as f: 114 | rule_set = json.load(f) 115 | else: 116 | rule_set = download_from_git(git_secret['ruleset-url'], git_secret['ruleset-token']) 117 | 118 | if (parsed_args.partitions): 119 | with open(parsed_args.partitions) as f: 120 | partition = json.load(f) 121 | else: 122 | partition = download_from_git('https://raw.githubusercontent.com/aws/aws-sdk-cpp/main/tools/code-generation/partitions/partitions.json') 123 | 124 | generate_c_file_from_json( 125 | rule_set, 126 | 'source/s3_endpoint_resolver/aws_s3_endpoint_rule_set.c', 127 | 'aws_s3_endpoint_rule_set') 128 | 129 | generate_c_file_from_json( 130 | partition, 131 | 'source/s3_endpoint_resolver/aws_s3_endpoint_resolver_partition.c', 132 | 'aws_s3_endpoint_resolver_partitions') 133 | -------------------------------------------------------------------------------- /source/s3_buffer_pool.c: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | * SPDX-License-Identifier: Apache-2.0. 4 | */ 5 | 6 | #include "aws/s3/s3_buffer_pool.h" 7 | #include "aws/s3/private/s3_default_buffer_pool.h" 8 | 9 | AWS_FUTURE_T_POINTER_WITH_RELEASE_IMPLEMENTATION( 10 | aws_future_s3_buffer_ticket, 11 | struct aws_s3_buffer_ticket, 12 | aws_s3_buffer_ticket_release) 13 | 14 | struct aws_s3_buffer_pool *aws_s3_buffer_pool_acquire(struct aws_s3_buffer_pool *buffer_pool) { 15 | if (buffer_pool != NULL) { 16 | if (buffer_pool->vtable->acquire) { 17 | buffer_pool->vtable->acquire(buffer_pool); 18 | } else { 19 | aws_ref_count_acquire(&buffer_pool->ref_count); 20 | } 21 | } 22 | return buffer_pool; 23 | } 24 | 25 | struct aws_s3_buffer_pool *aws_s3_buffer_pool_release(struct aws_s3_buffer_pool *buffer_pool) { 26 | if (buffer_pool != NULL) { 27 | if (buffer_pool->vtable->release) { 28 | buffer_pool->vtable->release(buffer_pool); 29 | } else { 30 | aws_ref_count_release(&buffer_pool->ref_count); 31 | } 32 | } 33 | return NULL; 34 | } 35 | 36 | struct aws_future_s3_buffer_ticket *aws_s3_buffer_pool_reserve( 37 | struct aws_s3_buffer_pool *buffer_pool, 38 | struct aws_s3_buffer_pool_reserve_meta meta) { 39 | AWS_PRECONDITION(buffer_pool); 40 | 41 | return buffer_pool->vtable->reserve(buffer_pool, meta); 42 | } 43 | 44 | void aws_s3_buffer_pool_trim(struct aws_s3_buffer_pool *buffer_pool) { 45 | AWS_PRECONDITION(buffer_pool); 46 | 47 | buffer_pool->vtable->trim(buffer_pool); 48 | } 49 | 50 | struct aws_s3_buffer_ticket *aws_s3_buffer_ticket_acquire(struct aws_s3_buffer_ticket *ticket) { 51 | if (ticket != NULL) { 52 | if (ticket->vtable->acquire) { 53 | ticket->vtable->acquire(ticket); 54 | } else { 55 | aws_ref_count_acquire(&ticket->ref_count); 56 | } 57 | } 58 | return ticket; 59 | } 60 | 61 | struct aws_s3_buffer_ticket *aws_s3_buffer_ticket_release(struct aws_s3_buffer_ticket *ticket) { 62 | if (ticket != NULL) { 63 | if (ticket->vtable->release) { 64 | ticket->vtable->release(ticket); 65 | } else { 66 | aws_ref_count_release(&ticket->ref_count); 67 | } 68 | } 69 | return NULL; 70 | } 71 | 72 | struct aws_byte_buf aws_s3_buffer_ticket_claim(struct aws_s3_buffer_ticket *ticket) { 73 | AWS_PRECONDITION(ticket); 74 | 75 | return ticket->vtable->claim(ticket); 76 | } 77 | -------------------------------------------------------------------------------- /source/s3_checksum_stream.c: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | * SPDX-License-Identifier: Apache-2.0. 4 | */ 5 | 6 | #include "aws/s3/private/s3_checksums.h" 7 | #include 8 | #include 9 | 10 | struct aws_checksum_stream { 11 | struct aws_input_stream base; 12 | struct aws_allocator *allocator; 13 | 14 | struct aws_input_stream *old_stream; 15 | struct aws_s3_checksum *checksum; 16 | struct aws_byte_buf checksum_result; 17 | /* base64 encoded checksum of the stream, updated at end of stream */ 18 | struct aws_byte_buf *encoded_checksum_output; 19 | bool checksum_finalized; 20 | }; 21 | 22 | static int s_finalize_checksum(struct aws_checksum_stream *impl) { 23 | if (impl->checksum_finalized) { 24 | return AWS_OP_SUCCESS; 25 | } 26 | 27 | if (aws_checksum_finalize(impl->checksum, &impl->checksum_result) != AWS_OP_SUCCESS) { 28 | AWS_LOGF_ERROR( 29 | AWS_LS_S3_CLIENT, 30 | "Failed to calculate checksum with error code %d (%s).", 31 | aws_last_error(), 32 | aws_error_str(aws_last_error())); 33 | aws_byte_buf_reset(&impl->checksum_result, true); 34 | impl->checksum_finalized = true; 35 | return aws_raise_error(AWS_ERROR_S3_CHECKSUM_CALCULATION_FAILED); 36 | } 37 | struct aws_byte_cursor checksum_result_cursor = aws_byte_cursor_from_buf(&impl->checksum_result); 38 | AWS_FATAL_ASSERT(aws_base64_encode(&checksum_result_cursor, impl->encoded_checksum_output) == AWS_OP_SUCCESS); 39 | impl->checksum_finalized = true; 40 | return AWS_OP_SUCCESS; 41 | } 42 | 43 | static int s_aws_input_checksum_stream_seek( 44 | struct aws_input_stream *stream, 45 | int64_t offset, 46 | enum aws_stream_seek_basis basis) { 47 | (void)stream; 48 | (void)offset; 49 | (void)basis; 50 | AWS_LOGF_ERROR( 51 | AWS_LS_S3_CLIENT, 52 | "Cannot seek on checksum stream, as it will cause the checksum output to mismatch the checksum of the stream " 53 | "contents"); 54 | AWS_ASSERT(false); 55 | return aws_raise_error(AWS_ERROR_UNSUPPORTED_OPERATION); 56 | } 57 | 58 | static int s_aws_input_checksum_stream_read(struct aws_input_stream *stream, struct aws_byte_buf *dest) { 59 | struct aws_checksum_stream *impl = AWS_CONTAINER_OF(stream, struct aws_checksum_stream, base); 60 | 61 | size_t original_len = dest->len; 62 | if (aws_input_stream_read(impl->old_stream, dest)) { 63 | return AWS_OP_ERR; 64 | } 65 | struct aws_byte_cursor to_sum = aws_byte_cursor_from_buf(dest); 66 | /* Move the cursor to the part to calculate the checksum */ 67 | aws_byte_cursor_advance(&to_sum, original_len); 68 | /* If read failed, `aws_input_stream_read` will handle the error to restore the dest. No need to handle error here 69 | */ 70 | if (aws_checksum_update(impl->checksum, &to_sum)) { 71 | return AWS_OP_ERR; 72 | } 73 | /* If we're at the end of the stream, compute and store the final checksum */ 74 | struct aws_stream_status status; 75 | if (aws_input_stream_get_status(impl->old_stream, &status)) { 76 | return AWS_OP_ERR; 77 | } 78 | if (status.is_end_of_stream) { 79 | return s_finalize_checksum(impl); 80 | } 81 | return AWS_OP_SUCCESS; 82 | } 83 | 84 | static int s_aws_input_checksum_stream_get_status(struct aws_input_stream *stream, struct aws_stream_status *status) { 85 | struct aws_checksum_stream *impl = AWS_CONTAINER_OF(stream, struct aws_checksum_stream, base); 86 | return aws_input_stream_get_status(impl->old_stream, status); 87 | } 88 | 89 | static int s_aws_input_checksum_stream_get_length(struct aws_input_stream *stream, int64_t *out_length) { 90 | struct aws_checksum_stream *impl = AWS_CONTAINER_OF(stream, struct aws_checksum_stream, base); 91 | return aws_input_stream_get_length(impl->old_stream, out_length); 92 | } 93 | 94 | /* We take ownership of the old input stream, and destroy it with this input stream. This is because we want to be able 95 | * to substitute in the chunk_stream for the cursor stream currently used in s_s3_meta_request_default_prepare_request 96 | * which returns the new stream. So in order to prevent the need of keeping track of two input streams we instead 97 | * consume the cursor stream and destroy it with this one */ 98 | static void s_aws_input_checksum_stream_destroy(struct aws_checksum_stream *impl) { 99 | if (!impl) { 100 | return; 101 | } 102 | 103 | /* Compute the checksum of whatever was read, if we didn't reach the end of the underlying stream. */ 104 | s_finalize_checksum(impl); 105 | 106 | aws_checksum_destroy(impl->checksum); 107 | aws_input_stream_release(impl->old_stream); 108 | aws_byte_buf_clean_up(&impl->checksum_result); 109 | aws_mem_release(impl->allocator, impl); 110 | } 111 | 112 | static struct aws_input_stream_vtable s_aws_input_checksum_stream_vtable = { 113 | .seek = s_aws_input_checksum_stream_seek, 114 | .read = s_aws_input_checksum_stream_read, 115 | .get_status = s_aws_input_checksum_stream_get_status, 116 | .get_length = s_aws_input_checksum_stream_get_length, 117 | }; 118 | 119 | struct aws_input_stream *aws_checksum_stream_new( 120 | struct aws_allocator *allocator, 121 | struct aws_input_stream *existing_stream, 122 | enum aws_s3_checksum_algorithm algorithm, 123 | struct aws_byte_buf *checksum_output) { 124 | AWS_PRECONDITION(existing_stream); 125 | 126 | struct aws_checksum_stream *impl = aws_mem_calloc(allocator, 1, sizeof(struct aws_checksum_stream)); 127 | 128 | impl->allocator = allocator; 129 | impl->base.vtable = &s_aws_input_checksum_stream_vtable; 130 | 131 | impl->checksum = aws_checksum_new(allocator, algorithm); 132 | if (impl->checksum == NULL) { 133 | goto on_error; 134 | } 135 | aws_byte_buf_init(&impl->checksum_result, allocator, impl->checksum->digest_size); 136 | impl->old_stream = aws_input_stream_acquire(existing_stream); 137 | impl->encoded_checksum_output = checksum_output; 138 | aws_ref_count_init( 139 | &impl->base.ref_count, impl, (aws_simple_completion_callback *)s_aws_input_checksum_stream_destroy); 140 | 141 | return &impl->base; 142 | on_error: 143 | aws_mem_release(impl->allocator, impl); 144 | return NULL; 145 | } 146 | -------------------------------------------------------------------------------- /source/s3_endpoint_resolver/s3_endpoint_resolver.c: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | * SPDX-License-Identifier: Apache-2.0. 4 | */ 5 | 6 | #include "aws/s3/private/s3_endpoint_resolver.h" 7 | #include 8 | #include 9 | #include 10 | 11 | struct aws_endpoints_rule_engine *aws_s3_endpoint_resolver_new(struct aws_allocator *allocator) { 12 | struct aws_endpoints_ruleset *ruleset = NULL; 13 | struct aws_partitions_config *partitions = NULL; 14 | struct aws_endpoints_rule_engine *rule_engine = NULL; 15 | 16 | ruleset = aws_endpoints_ruleset_new_from_string(allocator, aws_s3_endpoint_rule_set); 17 | if (!ruleset) { 18 | goto cleanup; 19 | } 20 | 21 | partitions = aws_partitions_config_new_from_string(allocator, aws_s3_endpoint_resolver_partitions); 22 | if (!partitions) { 23 | goto cleanup; 24 | } 25 | 26 | rule_engine = aws_endpoints_rule_engine_new(allocator, ruleset, partitions); 27 | 28 | cleanup: 29 | aws_endpoints_ruleset_release(ruleset); 30 | aws_partitions_config_release(partitions); 31 | return rule_engine; 32 | } 33 | -------------------------------------------------------------------------------- /source/s3_parallel_input_stream.c: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | * SPDX-License-Identifier: Apache-2.0. 4 | */ 5 | 6 | #include "aws/s3/private/s3_parallel_input_stream.h" 7 | 8 | #include 9 | 10 | #include 11 | #include 12 | 13 | #include 14 | 15 | void aws_parallel_input_stream_init_base( 16 | struct aws_parallel_input_stream *stream, 17 | struct aws_allocator *alloc, 18 | const struct aws_parallel_input_stream_vtable *vtable, 19 | void *impl) { 20 | 21 | AWS_ZERO_STRUCT(*stream); 22 | stream->alloc = alloc; 23 | stream->vtable = vtable; 24 | stream->impl = impl; 25 | aws_ref_count_init(&stream->ref_count, stream, (aws_simple_completion_callback *)vtable->destroy); 26 | } 27 | 28 | struct aws_parallel_input_stream *aws_parallel_input_stream_acquire(struct aws_parallel_input_stream *stream) { 29 | if (stream != NULL) { 30 | aws_ref_count_acquire(&stream->ref_count); 31 | } 32 | return stream; 33 | } 34 | 35 | struct aws_parallel_input_stream *aws_parallel_input_stream_release(struct aws_parallel_input_stream *stream) { 36 | if (stream != NULL) { 37 | aws_ref_count_release(&stream->ref_count); 38 | } 39 | return NULL; 40 | } 41 | 42 | struct aws_future_bool *aws_parallel_input_stream_read( 43 | struct aws_parallel_input_stream *stream, 44 | uint64_t offset, 45 | struct aws_byte_buf *dest) { 46 | /* Ensure the buffer has space available */ 47 | if (dest->len == dest->capacity) { 48 | struct aws_future_bool *future = aws_future_bool_new(stream->alloc); 49 | aws_future_bool_set_error(future, AWS_ERROR_SHORT_BUFFER); 50 | return future; 51 | } 52 | struct aws_future_bool *future = stream->vtable->read(stream, offset, dest); 53 | return future; 54 | } 55 | 56 | struct aws_parallel_input_stream_from_file_impl { 57 | struct aws_parallel_input_stream base; 58 | 59 | struct aws_string *file_path; 60 | }; 61 | 62 | static void s_para_from_file_destroy(struct aws_parallel_input_stream *stream) { 63 | struct aws_parallel_input_stream_from_file_impl *impl = stream->impl; 64 | 65 | aws_string_destroy(impl->file_path); 66 | 67 | aws_mem_release(stream->alloc, impl); 68 | } 69 | 70 | struct aws_future_bool *s_para_from_file_read( 71 | struct aws_parallel_input_stream *stream, 72 | uint64_t offset, 73 | struct aws_byte_buf *dest) { 74 | 75 | struct aws_future_bool *future = aws_future_bool_new(stream->alloc); 76 | struct aws_parallel_input_stream_from_file_impl *impl = stream->impl; 77 | bool success = false; 78 | struct aws_input_stream *file_stream = NULL; 79 | struct aws_stream_status status = { 80 | .is_end_of_stream = false, 81 | .is_valid = true, 82 | }; 83 | 84 | file_stream = aws_input_stream_new_from_file(stream->alloc, aws_string_c_str(impl->file_path)); 85 | if (!file_stream) { 86 | goto done; 87 | } 88 | 89 | if (aws_input_stream_seek(file_stream, offset, AWS_SSB_BEGIN)) { 90 | goto done; 91 | } 92 | /* Keep reading until fill the buffer. 93 | * Note that we must read() after seek() to determine if we're EOF, the seek alone won't trigger it. */ 94 | while ((dest->len < dest->capacity) && !status.is_end_of_stream) { 95 | /* Read from stream */ 96 | if (aws_input_stream_read(file_stream, dest) != AWS_OP_SUCCESS) { 97 | goto done; 98 | } 99 | 100 | /* Check if stream is done */ 101 | if (aws_input_stream_get_status(file_stream, &status) != AWS_OP_SUCCESS) { 102 | goto done; 103 | } 104 | } 105 | success = true; 106 | done: 107 | if (success) { 108 | aws_future_bool_set_result(future, status.is_end_of_stream); 109 | } else { 110 | aws_future_bool_set_error(future, aws_last_error()); 111 | } 112 | 113 | aws_input_stream_release(file_stream); 114 | 115 | return future; 116 | } 117 | 118 | static struct aws_parallel_input_stream_vtable s_parallel_input_stream_from_file_vtable = { 119 | .destroy = s_para_from_file_destroy, 120 | .read = s_para_from_file_read, 121 | }; 122 | 123 | struct aws_parallel_input_stream *aws_parallel_input_stream_new_from_file( 124 | struct aws_allocator *allocator, 125 | struct aws_byte_cursor file_name) { 126 | 127 | struct aws_parallel_input_stream_from_file_impl *impl = 128 | aws_mem_calloc(allocator, 1, sizeof(struct aws_parallel_input_stream_from_file_impl)); 129 | aws_parallel_input_stream_init_base(&impl->base, allocator, &s_parallel_input_stream_from_file_vtable, impl); 130 | impl->file_path = aws_string_new_from_cursor(allocator, &file_name); 131 | if (!aws_path_exists(impl->file_path)) { 132 | /* If file path not exists, raise error from errno. */ 133 | aws_translate_and_raise_io_error(errno); 134 | goto error; 135 | } 136 | return &impl->base; 137 | error: 138 | s_para_from_file_destroy(&impl->base); 139 | return NULL; 140 | } 141 | -------------------------------------------------------------------------------- /tests/mock_s3_server/AbortMultipartUpload/default.json: -------------------------------------------------------------------------------- 1 | { 2 | "status": 204, 3 | "headers": {"Connection": "keep-alive"}, 4 | "body": [ 5 | ] 6 | } 7 | -------------------------------------------------------------------------------- /tests/mock_s3_server/CompleteMultipartUpload/async_access_denied_error.json: -------------------------------------------------------------------------------- 1 | { 2 | "status": 200, 3 | "headers": {"Connection": "close"}, 4 | "body": [ 5 | "", 6 | "", 7 | "", 8 | "AccessDenied", 9 | "Access denied.", 10 | "656c76696e6727732072657175657374", 11 | "Uuag1LuByRx9e6j5Onimru9pO4ZVKnJ2Qz7/C1NPcfTWAtRPfTaOFg==", 12 | "" 13 | ] 14 | } 15 | -------------------------------------------------------------------------------- /tests/mock_s3_server/CompleteMultipartUpload/async_internal_error.json: -------------------------------------------------------------------------------- 1 | { 2 | "status": 200, 3 | "headers": {"Connection": "close"}, 4 | "body": [ 5 | "", 6 | "", 7 | "", 8 | "InternalError", 9 | "We encountered an internal error. Please try again.", 10 | "656c76696e6727732072657175657374", 11 | "Uuag1LuByRx9e6j5Onimru9pO4ZVKnJ2Qz7/C1NPcfTWAtRPfTaOFg==", 12 | "" 13 | ] 14 | } 15 | -------------------------------------------------------------------------------- /tests/mock_s3_server/CompleteMultipartUpload/default.json: -------------------------------------------------------------------------------- 1 | { 2 | "status": 200, 3 | "headers": {"Connection": "close"}, 4 | "body": [ 5 | "", 6 | "", 7 | "http://default.s3.us-west-2.amazonaws.com/default", 8 | "default", 9 | "default", 10 | "\"3858f62230ac3c915f300c664312c11f-9\"", 11 | "" 12 | ] 13 | } 14 | -------------------------------------------------------------------------------- /tests/mock_s3_server/CompleteMultipartUpload/sse_kms.json: -------------------------------------------------------------------------------- 1 | { 2 | "status": 200, 3 | "headers": {"Connection": "close", 4 | "x-amz-server-side-encryption": "aws:kms"}, 5 | "body": [ 6 | "", 7 | "", 8 | "http://default.s3.us-west-2.amazonaws.com/default", 9 | "default", 10 | "default", 11 | "\"3858f62230ac3c915f300c664312c11f-9\"", 12 | "" 13 | ] 14 | } 15 | -------------------------------------------------------------------------------- /tests/mock_s3_server/CreateMultipartUpload/default.json: -------------------------------------------------------------------------------- 1 | { 2 | "status": 200, 3 | "headers": {"x-amz-request-id": "12345"}, 4 | "body": [ 5 | "", 6 | "", 7 | "default", 8 | "default", 9 | "defaultID", 10 | "" 11 | ] 12 | } 13 | -------------------------------------------------------------------------------- /tests/mock_s3_server/CreateMultipartUpload/request_time_too_skewed.json: -------------------------------------------------------------------------------- 1 | { 2 | "status": 403, 3 | "headers": {"x-amz-request-id": "12345"}, 4 | "body": [ 5 | "", 6 | "", 7 | "", 8 | "RequestTimeTooSkewed", 9 | "The difference between the request time and the current time is too large.", 10 | "20230725T161257Z", 11 | "2023-07-25T16:27:59Z", 12 | "900000", 13 | "" 14 | ] 15 | } 16 | -------------------------------------------------------------------------------- /tests/mock_s3_server/CreateMultipartUpload/request_timeout.json: -------------------------------------------------------------------------------- 1 | 2 | { 3 | "status": 400, 4 | "headers": {"x-amz-request-id": "12345"}, 5 | "body": [ 6 | "", 7 | "", 8 | "", 9 | "RequestTimeout", 10 | "Your socket connection to the server was not read from or written to within the timeout period. Idle connections will be closed.", 11 | "1234", 12 | "asdf", 13 | "" 14 | ] 15 | } 16 | -------------------------------------------------------------------------------- /tests/mock_s3_server/CreateSession/default.json: -------------------------------------------------------------------------------- 1 | { 2 | "status": 200, 3 | "headers": {"x-amz-request-id": "12345"}, 4 | "body": [ 5 | "", 6 | "", 7 | "", 8 | "sessionToken", 9 | "secretKey", 10 | "accessKeyId", 11 | "2023-06-26T17:33:30Z", 12 | "", 13 | "" 14 | ] 15 | } 16 | -------------------------------------------------------------------------------- /tests/mock_s3_server/CreateSession/sse_kms.json: -------------------------------------------------------------------------------- 1 | { 2 | "status": 200, 3 | "headers": { 4 | "x-amz-request-id": "12345", 5 | "x-amz-server-side-encryption": "aws:kms" 6 | }, 7 | "request_headers": { 8 | "x-amz-server-side-encryption": "aws:kms" 9 | }, 10 | "body": [ 11 | "", 12 | "", 13 | "", 14 | "sessionToken", 15 | "secretKey", 16 | "accessKeyId", 17 | "2023-06-26T17:33:30Z", 18 | "", 19 | "" 20 | ] 21 | } 22 | -------------------------------------------------------------------------------- /tests/mock_s3_server/GetObject/default.json: -------------------------------------------------------------------------------- 1 | { 2 | "status": 404, 3 | "headers": {}, 4 | "body": [ 5 | ] 6 | } 7 | -------------------------------------------------------------------------------- /tests/mock_s3_server/GetObject/get_object_checksum_retry.json: -------------------------------------------------------------------------------- 1 | { 2 | "status": 200, 3 | "headers": { 4 | "ETag": "b54357faf0632cce46e942fa68356b38", 5 | "Date": "Thu, 12 Jan 2023 00:04:21 GMT", 6 | "Last-Modified": "Tue, 10 Jan 2023 23:39:32 GMT", 7 | "Accept-Ranges": "bytes", 8 | "Content-Range": "bytes 0-65535/65536", 9 | "Content-Type": "binary/octet-stream", 10 | "x-amz-checksum-crc32": "q1875w==" 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /tests/mock_s3_server/GetObject/get_object_delay_60s.json: -------------------------------------------------------------------------------- 1 | { 2 | "delay": 60, 3 | "status": 200, 4 | "headers": { 5 | "ETag": "b54357faf0632cce46e942fa68356b38", 6 | "Date": "Thu, 12 Jan 2023 00:04:21 GMT", 7 | "Last-Modified": "Tue, 10 Jan 2023 23:39:32 GMT", 8 | "Accept-Ranges": "bytes", 9 | "Content-Range": "bytes 0-65535/65536", 10 | "Content-Type": "binary/octet-stream" 11 | }, 12 | "body": [ 13 | "" 14 | ] 15 | } 16 | -------------------------------------------------------------------------------- /tests/mock_s3_server/GetObject/get_object_invalid_response_missing_content_range.json: -------------------------------------------------------------------------------- 1 | { 2 | "status": 206, 3 | "headers": { 4 | "ETag": "b54357faf0632cce46e942fa68356b38", 5 | "Date": "Thu, 12 Jan 2023 00:04:21 GMT", 6 | "Last-Modified": "Tue, 10 Jan 2023 23:39:32 GMT", 7 | "Accept-Ranges": "bytes", 8 | "Content-Type": "binary/octet-stream" 9 | }, 10 | "body": [ 11 | "" 12 | ] 13 | } 14 | -------------------------------------------------------------------------------- /tests/mock_s3_server/GetObject/get_object_invalid_response_missing_etags.json: -------------------------------------------------------------------------------- 1 | { 2 | "status": 206, 3 | "headers": { 4 | "Date": "Thu, 12 Jan 2023 00:04:21 GMT", 5 | "Last-Modified": "Tue, 10 Jan 2023 23:39:32 GMT", 6 | "Accept-Ranges": "bytes", 7 | "Content-Range": "bytes 0-65535/1048576", 8 | "Content-Type": "binary/octet-stream" 9 | }, 10 | "body": [ 11 | "" 12 | ] 13 | } 14 | -------------------------------------------------------------------------------- /tests/mock_s3_server/GetObject/get_object_long_error.json: -------------------------------------------------------------------------------- 1 | { 2 | "status": 403, 3 | "headers": { 4 | "Content-Type": "application/xml" 5 | }, 6 | "body": [ 7 | "\nNoSuchKeyreally long error really long error really long error really long error really long error really long error really long error really long error really long error really long error really long error really long error really long error really long error really long error really long error really long error really long error really long error really long error really long error really long error really long error really long error really long error really long error really long error really long error really long error really long error really long error really long error really long error really long error really long error really long error really long error really long error really long error really long error really long error really long error really long error really long error really long error really long error really long error really long error really long error really long error really long error really long error really long error really long error really long error really long error /mybucket/myfoto.jpg4442587FB7D0A2F9" 8 | ] 9 | } 10 | -------------------------------------------------------------------------------- /tests/mock_s3_server/GetObject/get_object_modified_failure.json: -------------------------------------------------------------------------------- 1 | { 2 | "status": 400, 3 | "headers": {}, 4 | "body": [ 5 | "bad_request" 6 | ] 7 | } 8 | -------------------------------------------------------------------------------- /tests/mock_s3_server/GetObject/get_object_modified_first_part.json: -------------------------------------------------------------------------------- 1 | { 2 | "status": 206, 3 | "headers": { 4 | "ETag": "b54357faf0632cce46e942fa68356b38", 5 | "Date": "Thu, 12 Jan 2023 00:04:21 GMT", 6 | "Last-Modified": "Tue, 10 Jan 2023 23:39:32 GMT", 7 | "Accept-Ranges": "bytes", 8 | "Content-Range": "bytes 0-65535/1048576", 9 | "Content-Type": "binary/octet-stream" 10 | }, 11 | "body": [ 12 | "" 13 | ] 14 | } 15 | -------------------------------------------------------------------------------- /tests/mock_s3_server/GetObject/get_object_modified_success.json: -------------------------------------------------------------------------------- 1 | { 2 | "status": 412, 3 | "headers": { 4 | "Date": "Thu, 12 Jan 2023 00:04:21 GMT", 5 | "Last-Modified": "Tue, 10 Jan 2023 23:39:32 GMT" 6 | }, 7 | "body": [ 8 | "precondition failed" 9 | ] 10 | } 11 | -------------------------------------------------------------------------------- /tests/mock_s3_server/GetObject/get_object_unmatch_checksum_crc32.json: -------------------------------------------------------------------------------- 1 | { 2 | "status": 200, 3 | "headers": { 4 | "ETag": "b54357faf0632cce46e942fa68356b38", 5 | "Date": "Thu, 12 Jan 2023 00:04:21 GMT", 6 | "Last-Modified": "Tue, 10 Jan 2023 23:39:32 GMT", 7 | "Accept-Ranges": "bytes", 8 | "Content-Range": "bytes 0-65535/65536", 9 | "Content-Type": "binary/octet-stream", 10 | "x-amz-checksum-crc32": "q1875w==" 11 | }, 12 | "body": [ 13 | "" 14 | ] 15 | } 16 | -------------------------------------------------------------------------------- /tests/mock_s3_server/ListParts/default.json: -------------------------------------------------------------------------------- 1 | { 2 | "status": 404, 3 | "headers": {}, 4 | "body": [ 5 | ] 6 | } 7 | -------------------------------------------------------------------------------- /tests/mock_s3_server/ListParts/multiple_list_parts_1.json: -------------------------------------------------------------------------------- 1 | { 2 | "status": 200, 3 | "headers": {"Connection": "keep-alive"}, 4 | "body": [ 5 | "", 6 | "", 7 | "example-bucket", 8 | "example-object", 9 | "XXBsb2FkIElEIGZvciBlbHZpbmcncyVcdS1tb3ZpZS5tMnRzEEEwbG9hZA", 10 | "2", 11 | "true", 12 | "", 13 | "2", 14 | "KtQF9Q==", 15 | "2010-11-10T20:48:34.000Z", 16 | "\"7778aef83f66abc1fa1e8477f296d394\"", 17 | "8388608", 18 | "", 19 | "" 20 | ] 21 | } 22 | -------------------------------------------------------------------------------- /tests/mock_s3_server/ListParts/multiple_list_parts_2.json: -------------------------------------------------------------------------------- 1 | { 2 | "status": 200, 3 | "headers": {"Connection": "keep-alive"}, 4 | "body": [ 5 | "", 6 | "", 7 | "example-bucket", 8 | "example-object", 9 | "XXBsb2FkIElEIGZvciBlbHZpbmcncyVcdS1tb3ZpZS5tMnRzEEEwbG9hZA", 10 | "2", 11 | "false", 12 | "", 13 | "3", 14 | "yagJog==", 15 | "2010-11-10T20:48:33.000Z", 16 | "\"aaaa18db4cc2f85cedef654fccc4a4x8\"", 17 | "8388608", 18 | "", 19 | "" 20 | ] 21 | } 22 | -------------------------------------------------------------------------------- /tests/mock_s3_server/ListParts/resume_first_part_not_completed.json: -------------------------------------------------------------------------------- 1 | { 2 | "status": 200, 3 | "headers": {"Connection": "keep-alive"}, 4 | "body": [ 5 | "", 6 | "", 7 | "example-bucket", 8 | "example-object", 9 | "XXBsb2FkIElEIGZvciBlbHZpbmcncyVcdS1tb3ZpZS5tMnRzEEEwbG9hZA", 10 | "false", 11 | "", 12 | "2", 13 | "2010-11-10T20:48:34.000Z", 14 | "\"7778aef83f66abc1fa1e8477f296d394\"", 15 | "8388608", 16 | "", 17 | "", 18 | "3", 19 | "2010-11-10T20:48:33.000Z", 20 | "\"aaaa18db4cc2f85cedef654fccc4a4x8\"", 21 | "8388608", 22 | "", 23 | "" 24 | ] 25 | } 26 | -------------------------------------------------------------------------------- /tests/mock_s3_server/README.md: -------------------------------------------------------------------------------- 1 | # Mock S3 server 2 | 3 | A **NON-TLS** mock S3 server based on [python-hyper/h11](https://github.com/python-hyper/h11) and [trio](http://trio.readthedocs.io/en/latest/index.html). The server code implementation is based on the trio-server example from python-hyper/h11 [here](https://github.com/python-hyper/h11/blob/master/examples/trio-server.py). Only supports very basic mock response for request received. 4 | 5 | ## How to run the server 6 | 7 | Python 3.7+ required. 8 | 9 | - Install hyper/h11 and trio python module. `python3 -m pip install h11 trio` 10 | - Run python. `python3 ./mock_s3_server.py`. 11 | 12 | ### Supported Operations 13 | 14 | - CreateMultipartUpload 15 | - CompleteMultipartUpload 16 | - UploadPart 17 | - AbortMultipartUpload 18 | - GetObject 19 | 20 | ### Defined response 21 | 22 | The server will read from ./{OperationName}/{Key}.json. The json file is formatted as following: 23 | 24 | ```json 25 | { 26 | "status": 200, 27 | "headers": {"Connection": "close"}, 28 | "request_headers:" {"HeaderA": "ValueA"} 29 | "body": [ 30 | "", 31 | "", 32 | "", 33 | "InternalError", 34 | "We encountered an internal error. Please try again.", 35 | "656c76696e6727732072657175657374", 36 | "Uuag1LuByRx9e6j5Onimru9pO4ZVKnJ2Qz7/C1NPcfTWAtRPfTaOFg==", 37 | "" 38 | ] 39 | } 40 | ``` 41 | 42 | Where you can define the expected response status, header and response body. If the {Key}.json is not found from file system, it will load the `default.json`. 43 | 44 | The server validates that all specified headers in the "request_headers" field are present in the incoming request. If any required header is missing, the request will fail. These headers will not be part of the Response headers. 45 | If the "delay" field is present, the response will be delayed by X seconds. 46 | 47 | ### GetObject Response 48 | 49 | By default, the GetObject response will read from ./{OperationName}/{Key}.json for the status and headers. But the body will be generated to match the range in the request. 50 | 51 | To proper handle ranged GetObject, you will need to modify the mock server code. Check function `handle_get_object` for details. 52 | -------------------------------------------------------------------------------- /tests/mock_s3_server/UploadPart/default.json: -------------------------------------------------------------------------------- 1 | { 2 | "status": 200, 3 | "headers": {"ETag": "b54357faf0632cce46e942fa68356b38", "Connection": "keep-alive"}, 4 | "body": [ 5 | ] 6 | } 7 | -------------------------------------------------------------------------------- /tests/mock_s3_server/UploadPart/missing_etag.json: -------------------------------------------------------------------------------- 1 | { 2 | "status": 200, 3 | "headers": {"Connection": "keep-alive"}, 4 | "body": [ 5 | ] 6 | } 7 | -------------------------------------------------------------------------------- /tests/mock_s3_server/UploadPart/throttle.json: -------------------------------------------------------------------------------- 1 | { 2 | "status": 503, 3 | "headers": {"ETag": "b54357faf0632cce46e942fa68356b38", "Connection": "keep-alive"}, 4 | "body": [ 5 | "", 6 | "", 7 | "", 8 | "SlowDown", 9 | "656c76696e6727732072657175657374", 10 | "Uuag1LuByRx9e6j5Onimru9pO4ZVKnJ2Qz7/C1NPcfTWAtRPfTaOFg==", 11 | "" 12 | ] 13 | } 14 | -------------------------------------------------------------------------------- /tests/s3_checksums_crc64nvme_tests.c: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | * SPDX-License-Identifier: Apache-2.0. 4 | */ 5 | #include "aws/s3/private/s3_checksums.h" 6 | #include 7 | #include 8 | 9 | #include 10 | #define AWS_CRC64_LEN sizeof(uint64_t) 11 | 12 | static int s_crc64nvme_nist_test_case_1_fn(struct aws_allocator *allocator, void *ctx) { 13 | (void)ctx; 14 | 15 | struct aws_byte_cursor input = aws_byte_cursor_from_c_str("aaaaaaaaaa"); 16 | uint8_t expected[] = {0x0C, 0x1A, 0x80, 0x03, 0x6D, 0x65, 0xC5, 0x55}; 17 | struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); 18 | 19 | return s_verify_checksum_test_case(allocator, &input, &expected_buf, aws_checksum_new, AWS_SCA_CRC64NVME); 20 | } 21 | 22 | AWS_TEST_CASE(crc64nvme_nist_test_case_1, s_crc64nvme_nist_test_case_1_fn) 23 | 24 | static int s_crc64nvme_nist_test_case_2_fn(struct aws_allocator *allocator, void *ctx) { 25 | (void)ctx; 26 | 27 | struct aws_byte_cursor input = aws_byte_cursor_from_c_str(""); 28 | uint8_t expected[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; 29 | struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); 30 | 31 | return s_verify_checksum_test_case(allocator, &input, &expected_buf, aws_checksum_new, AWS_SCA_CRC64NVME); 32 | } 33 | 34 | AWS_TEST_CASE(crc64nvme_nist_test_case_2, s_crc64nvme_nist_test_case_2_fn) 35 | 36 | static int s_crc64nvme_nist_test_case_3_fn(struct aws_allocator *allocator, void *ctx) { 37 | (void)ctx; 38 | 39 | aws_s3_library_init(allocator); 40 | 41 | struct aws_s3_checksum *checksum = aws_checksum_new(allocator, AWS_SCA_CRC64NVME); 42 | ASSERT_NOT_NULL(checksum); 43 | struct aws_byte_cursor input = aws_byte_cursor_from_c_str("a"); 44 | 45 | for (size_t i = 0; i < 10; ++i) { 46 | ASSERT_SUCCESS(aws_checksum_update(checksum, &input)); 47 | } 48 | 49 | uint8_t output[AWS_CRC64_LEN] = {0}; 50 | struct aws_byte_buf output_buf = aws_byte_buf_from_array(output, sizeof(output)); 51 | output_buf.len = 0; 52 | ASSERT_SUCCESS(aws_checksum_finalize(checksum, &output_buf)); 53 | 54 | uint8_t expected[] = {0x0C, 0x1A, 0x80, 0x03, 0x6D, 0x65, 0xC5, 0x55}; 55 | struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); 56 | ASSERT_BIN_ARRAYS_EQUALS(expected_buf.ptr, expected_buf.len, output_buf.buffer, output_buf.len); 57 | 58 | aws_checksum_destroy(checksum); 59 | 60 | aws_s3_library_clean_up(); 61 | 62 | return AWS_OP_SUCCESS; 63 | } 64 | 65 | AWS_TEST_CASE(crc64nvme_nist_test_case_3, s_crc64nvme_nist_test_case_3_fn) 66 | 67 | static int s_crc64nvme_nist_test_case_4_fn(struct aws_allocator *allocator, void *ctx) { 68 | (void)ctx; 69 | 70 | aws_s3_library_init(allocator); 71 | 72 | struct aws_s3_checksum *checksum = aws_checksum_new(allocator, AWS_SCA_CRC64NVME); 73 | ASSERT_NOT_NULL(checksum); 74 | struct aws_byte_cursor input = aws_byte_cursor_from_c_str("aa"); 75 | 76 | for (size_t i = 0; i < 5; ++i) { 77 | ASSERT_SUCCESS(aws_checksum_update(checksum, &input)); 78 | } 79 | 80 | uint8_t output[AWS_CRC64_LEN] = {0}; 81 | struct aws_byte_buf output_buf = aws_byte_buf_from_array(output, sizeof(output)); 82 | output_buf.len = 0; 83 | ASSERT_SUCCESS(aws_checksum_finalize(checksum, &output_buf)); 84 | 85 | uint8_t expected[] = {0x0C, 0x1A, 0x80, 0x03, 0x6D, 0x65, 0xC5, 0x55}; 86 | 87 | struct aws_byte_cursor expected_buf = aws_byte_cursor_from_array(expected, sizeof(expected)); 88 | ASSERT_BIN_ARRAYS_EQUALS(expected_buf.ptr, expected_buf.len, output_buf.buffer, output_buf.len); 89 | 90 | aws_checksum_destroy(checksum); 91 | 92 | aws_s3_library_clean_up(); 93 | 94 | return AWS_OP_SUCCESS; 95 | } 96 | 97 | AWS_TEST_CASE(crc64nvme_nist_test_case_4, s_crc64nvme_nist_test_case_4_fn) 98 | 99 | static int s_crc64nvme_test_invalid_buffer_fn(struct aws_allocator *allocator, void *ctx) { 100 | (void)ctx; 101 | 102 | aws_s3_library_init(allocator); 103 | 104 | struct aws_byte_cursor input = aws_byte_cursor_from_c_str("abcdefghbcdefghicdefghijdefghijkefghijklfghij" 105 | "klmghijklmnhijklmnoijklmnopjklmnopqklm" 106 | "nopqrlmnopqrsmnopqrstnopqrstu"); 107 | uint8_t output[AWS_CRC64_LEN] = {0}; 108 | struct aws_byte_buf output_buf = aws_byte_buf_from_array(output, sizeof(output)); 109 | output_buf.len = 1; 110 | 111 | ASSERT_ERROR(AWS_ERROR_SHORT_BUFFER, aws_checksum_compute(allocator, AWS_SCA_CRC64NVME, &input, &output_buf)); 112 | 113 | aws_s3_library_clean_up(); 114 | 115 | return AWS_OP_SUCCESS; 116 | } 117 | 118 | AWS_TEST_CASE(crc64nvme_test_invalid_buffer, s_crc64nvme_test_invalid_buffer_fn) 119 | 120 | static int s_crc64nvme_test_invalid_state_fn(struct aws_allocator *allocator, void *ctx) { 121 | (void)ctx; 122 | 123 | aws_s3_library_init(allocator); 124 | 125 | struct aws_byte_cursor input = aws_byte_cursor_from_c_str("abcdefghbcdefghicdefghijdefghijkefghijklfghij" 126 | "klmghijklmnhijklmnoijklmnopjklmnopqklm" 127 | "nopqrlmnopqrsmnopqrstnopqrstu"); 128 | 129 | struct aws_s3_checksum *checksum = aws_checksum_new(allocator, AWS_SCA_CRC64NVME); 130 | ASSERT_NOT_NULL(checksum); 131 | 132 | uint8_t output[AWS_CRC64_LEN] = {0}; 133 | struct aws_byte_buf output_buf = aws_byte_buf_from_array(output, sizeof(output)); 134 | output_buf.len = 0; 135 | 136 | ASSERT_SUCCESS(aws_checksum_update(checksum, &input)); 137 | ASSERT_SUCCESS(aws_checksum_finalize(checksum, &output_buf)); 138 | ASSERT_ERROR(AWS_ERROR_INVALID_STATE, aws_checksum_update(checksum, &input)); 139 | ASSERT_ERROR(AWS_ERROR_INVALID_STATE, aws_checksum_finalize(checksum, &output_buf)); 140 | 141 | aws_checksum_destroy(checksum); 142 | 143 | aws_s3_library_clean_up(); 144 | 145 | return AWS_OP_SUCCESS; 146 | } 147 | 148 | AWS_TEST_CASE(crc64nvme_test_invalid_state, s_crc64nvme_test_invalid_state_fn) 149 | -------------------------------------------------------------------------------- /tests/s3_checksums_test_case_helper.h: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | * SPDX-License-Identifier: Apache-2.0. 4 | */ 5 | #include "aws/s3/private/s3_checksums.h" 6 | #include 7 | 8 | typedef struct aws_s3_checksum *aws_checksum_new_fn( 9 | struct aws_allocator *allocator, 10 | enum aws_s3_checksum_algorithm algorithm); 11 | 12 | static inline int s_verify_checksum_test_case( 13 | struct aws_allocator *allocator, 14 | struct aws_byte_cursor *input, 15 | struct aws_byte_cursor *expected, 16 | aws_checksum_new_fn *new_fn, 17 | enum aws_s3_checksum_algorithm algorithm) { 18 | 19 | aws_s3_library_init(allocator); 20 | 21 | /* test all possible segmentation lengths from 1 byte at a time to the entire 22 | * input. */ 23 | for (size_t i = 1; i < input->len; ++i) { 24 | uint8_t output[128] = {0}; 25 | struct aws_byte_buf output_buf = aws_byte_buf_from_array(output, expected->len); 26 | output_buf.len = 0; 27 | 28 | struct aws_s3_checksum *checksum = new_fn(allocator, algorithm); 29 | ASSERT_NOT_NULL(checksum); 30 | 31 | struct aws_byte_cursor input_cpy = *input; 32 | 33 | while (input_cpy.len) { 34 | size_t max_advance = input_cpy.len > i ? i : input_cpy.len; 35 | struct aws_byte_cursor segment = aws_byte_cursor_from_array(input_cpy.ptr, max_advance); 36 | ASSERT_SUCCESS(aws_checksum_update(checksum, &segment)); 37 | aws_byte_cursor_advance(&input_cpy, max_advance); 38 | } 39 | 40 | ASSERT_SUCCESS(aws_checksum_finalize(checksum, &output_buf)); 41 | ASSERT_BIN_ARRAYS_EQUALS(expected->ptr, expected->len, output_buf.buffer, output_buf.len); 42 | 43 | aws_checksum_destroy(checksum); 44 | } 45 | 46 | aws_s3_library_clean_up(); 47 | 48 | return AWS_OP_SUCCESS; 49 | } 50 | -------------------------------------------------------------------------------- /tests/s3_endpoint_resolver_tests.c: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | * SPDX-License-Identifier: Apache-2.0. 4 | */ 5 | #include 6 | #include 7 | 8 | #ifdef AWS_ENABLE_S3_ENDPOINT_RESOLVER 9 | # include 10 | # include 11 | 12 | AWS_TEST_CASE(test_s3_endpoint_resolver_resolve_endpoint, s_test_s3_endpoint_resolver_resolve_endpoint) 13 | static int s_test_s3_endpoint_resolver_resolve_endpoint(struct aws_allocator *allocator, void *ctx) { 14 | (void)ctx; 15 | aws_s3_library_init(allocator); 16 | 17 | struct aws_endpoints_rule_engine *rule_engine = aws_s3_endpoint_resolver_new(allocator); 18 | ASSERT_NOT_NULL(rule_engine); 19 | struct aws_endpoints_request_context *context = aws_endpoints_request_context_new(allocator); 20 | ASSERT_NOT_NULL(context); 21 | ASSERT_SUCCESS(aws_endpoints_request_context_add_string( 22 | allocator, context, aws_byte_cursor_from_c_str("Region"), aws_byte_cursor_from_c_str("us-west-2"))); 23 | ASSERT_SUCCESS(aws_endpoints_request_context_add_string( 24 | allocator, context, aws_byte_cursor_from_c_str("Bucket"), aws_byte_cursor_from_c_str("s3-bucket-test"))); 25 | 26 | struct aws_endpoints_resolved_endpoint *resolved_endpoint; 27 | ASSERT_SUCCESS(aws_endpoints_rule_engine_resolve(rule_engine, context, &resolved_endpoint)); 28 | 29 | ASSERT_INT_EQUALS(AWS_ENDPOINTS_RESOLVED_ENDPOINT, aws_endpoints_resolved_endpoint_get_type(resolved_endpoint)); 30 | 31 | struct aws_byte_cursor url_cur; 32 | ASSERT_SUCCESS(aws_endpoints_resolved_endpoint_get_url(resolved_endpoint, &url_cur)); 33 | 34 | ASSERT_CURSOR_VALUE_CSTRING_EQUALS(url_cur, "https://s3-bucket-test.s3.us-west-2.amazonaws.com"); 35 | 36 | aws_endpoints_resolved_endpoint_release(resolved_endpoint); 37 | aws_endpoints_request_context_release(context); 38 | aws_endpoints_rule_engine_release(rule_engine); 39 | 40 | aws_s3_library_clean_up(); 41 | return 0; 42 | } 43 | 44 | AWS_TEST_CASE(test_s3_endpoint_resolver_resolve_endpoint_fips, s_test_s3_endpoint_resolver_resolve_endpoint_fips) 45 | static int s_test_s3_endpoint_resolver_resolve_endpoint_fips(struct aws_allocator *allocator, void *ctx) { 46 | (void)ctx; 47 | aws_s3_library_init(allocator); 48 | 49 | struct aws_endpoints_rule_engine *rule_engine = aws_s3_endpoint_resolver_new(allocator); 50 | ASSERT_NOT_NULL(rule_engine); 51 | struct aws_endpoints_request_context *context = aws_endpoints_request_context_new(allocator); 52 | ASSERT_NOT_NULL(context); 53 | ASSERT_SUCCESS(aws_endpoints_request_context_add_string( 54 | allocator, context, aws_byte_cursor_from_c_str("Region"), aws_byte_cursor_from_c_str("us-east-1"))); 55 | ASSERT_SUCCESS(aws_endpoints_request_context_add_string( 56 | allocator, context, aws_byte_cursor_from_c_str("Bucket"), aws_byte_cursor_from_c_str("s3-bucket-test"))); 57 | ASSERT_SUCCESS( 58 | aws_endpoints_request_context_add_boolean(allocator, context, aws_byte_cursor_from_c_str("UseFIPS"), true)); 59 | struct aws_endpoints_resolved_endpoint *resolved_endpoint; 60 | ASSERT_SUCCESS(aws_endpoints_rule_engine_resolve(rule_engine, context, &resolved_endpoint)); 61 | 62 | ASSERT_INT_EQUALS(AWS_ENDPOINTS_RESOLVED_ENDPOINT, aws_endpoints_resolved_endpoint_get_type(resolved_endpoint)); 63 | 64 | struct aws_byte_cursor url_cur; 65 | ASSERT_SUCCESS(aws_endpoints_resolved_endpoint_get_url(resolved_endpoint, &url_cur)); 66 | 67 | ASSERT_CURSOR_VALUE_CSTRING_EQUALS(url_cur, "https://s3-bucket-test.s3-fips.us-east-1.amazonaws.com"); 68 | 69 | aws_endpoints_resolved_endpoint_release(resolved_endpoint); 70 | aws_endpoints_request_context_release(context); 71 | aws_endpoints_rule_engine_release(rule_engine); 72 | 73 | aws_s3_library_clean_up(); 74 | return 0; 75 | } 76 | 77 | AWS_TEST_CASE( 78 | test_s3_endpoint_resolver_resolve_endpoint_force_path_style, 79 | s_test_s3_endpoint_resolver_resolve_endpoint_force_path_style) 80 | static int s_test_s3_endpoint_resolver_resolve_endpoint_force_path_style(struct aws_allocator *allocator, void *ctx) { 81 | (void)ctx; 82 | aws_s3_library_init(allocator); 83 | 84 | struct aws_endpoints_rule_engine *rule_engine = aws_s3_endpoint_resolver_new(allocator); 85 | ASSERT_NOT_NULL(rule_engine); 86 | struct aws_endpoints_request_context *context = aws_endpoints_request_context_new(allocator); 87 | ASSERT_NOT_NULL(context); 88 | ASSERT_SUCCESS(aws_endpoints_request_context_add_string( 89 | allocator, context, aws_byte_cursor_from_c_str("Region"), aws_byte_cursor_from_c_str("us-east-1"))); 90 | ASSERT_SUCCESS(aws_endpoints_request_context_add_string( 91 | allocator, context, aws_byte_cursor_from_c_str("Bucket"), aws_byte_cursor_from_c_str("s3-bucket-test"))); 92 | ASSERT_SUCCESS(aws_endpoints_request_context_add_boolean( 93 | allocator, context, aws_byte_cursor_from_c_str("ForcePathStyle"), true)); 94 | struct aws_endpoints_resolved_endpoint *resolved_endpoint; 95 | ASSERT_SUCCESS(aws_endpoints_rule_engine_resolve(rule_engine, context, &resolved_endpoint)); 96 | 97 | ASSERT_INT_EQUALS(AWS_ENDPOINTS_RESOLVED_ENDPOINT, aws_endpoints_resolved_endpoint_get_type(resolved_endpoint)); 98 | 99 | struct aws_byte_cursor url_cur; 100 | ASSERT_SUCCESS(aws_endpoints_resolved_endpoint_get_url(resolved_endpoint, &url_cur)); 101 | 102 | ASSERT_CURSOR_VALUE_CSTRING_EQUALS(url_cur, "https://s3.us-east-1.amazonaws.com/s3-bucket-test"); 103 | 104 | aws_endpoints_resolved_endpoint_release(resolved_endpoint); 105 | aws_endpoints_request_context_release(context); 106 | aws_endpoints_rule_engine_release(rule_engine); 107 | 108 | aws_s3_library_clean_up(); 109 | return 0; 110 | } 111 | 112 | #endif 113 | -------------------------------------------------------------------------------- /tests/s3_endpoint_tests.c: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | * SPDX-License-Identifier: Apache-2.0. 4 | */ 5 | 6 | #include "aws/s3/private/s3_client_impl.h" 7 | #include "s3_tester.h" 8 | #include 9 | #include 10 | 11 | AWS_TEST_CASE(test_s3_different_endpoints, s_test_s3_different_endpoints) 12 | static int s_test_s3_different_endpoints(struct aws_allocator *allocator, void *ctx) { 13 | (void)ctx; 14 | 15 | struct aws_s3_tester tester; 16 | ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); 17 | 18 | struct aws_s3_client *client = NULL; 19 | struct aws_s3_tester_client_options client_options; 20 | AWS_ZERO_STRUCT(client_options); 21 | 22 | ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); 23 | 24 | { 25 | struct aws_s3_meta_request_test_results meta_request_test_results; 26 | aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator); 27 | 28 | struct aws_s3_tester_meta_request_options options = { 29 | .allocator = allocator, 30 | .client = client, 31 | .meta_request_type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT, 32 | .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_SUCCESS, 33 | .get_options = 34 | { 35 | .object_path = g_pre_existing_object_1MB, 36 | }, 37 | }; 38 | 39 | ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &options, &meta_request_test_results)); 40 | aws_s3_meta_request_test_results_clean_up(&meta_request_test_results); 41 | } 42 | 43 | { 44 | struct aws_s3_meta_request_test_results meta_request_test_results; 45 | aws_s3_meta_request_test_results_init(&meta_request_test_results, allocator); 46 | 47 | struct aws_s3_tester_meta_request_options options = { 48 | .allocator = allocator, 49 | .client = client, 50 | .meta_request_type = AWS_S3_META_REQUEST_TYPE_GET_OBJECT, 51 | .validate_type = AWS_S3_TESTER_VALIDATE_TYPE_EXPECT_SUCCESS, 52 | .bucket_name = &g_test_public_bucket_name, 53 | .get_options = 54 | { 55 | .object_path = g_pre_existing_object_1MB, 56 | }, 57 | }; 58 | 59 | ASSERT_SUCCESS(aws_s3_tester_send_meta_request_with_options(&tester, &options, &meta_request_test_results)); 60 | aws_s3_meta_request_test_results_clean_up(&meta_request_test_results); 61 | } 62 | 63 | aws_s3_client_release(client); 64 | 65 | aws_s3_tester_clean_up(&tester); 66 | 67 | return 0; 68 | } 69 | -------------------------------------------------------------------------------- /tests/s3_many_async_uploads_without_data_test.c: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | * SPDX-License-Identifier: Apache-2.0. 4 | */ 5 | #include "s3_tester.h" 6 | 7 | #include 8 | #include 9 | #include 10 | 11 | /** 12 | * Regression test for deadlock discovered by a user of Mountpoint (which wraps aws-c-s3 13 | * with a filesystem-like API). The user opened MANY files at once. 14 | * The user wrote data to some of the later files they opened, 15 | * and waited for those writes to complete. 16 | * But aws-c-s3 was waiting on data from the first few files. 17 | * Both sides were waiting on each other. It was a deadlock. 18 | * 19 | * This test starts N upload meta-requests. 20 | * Then, it only sends data to 1 meta-request at a time, starting with the last 21 | * meta-request it created, and working backwards to the first. 22 | * If the test times out, then we still suffer from the deadlock. 23 | */ 24 | 25 | /* Number of simultaneous upload meta-requests to create */ 26 | #define MANY_ASYNC_UPLOADS_COUNT 200 27 | 28 | /* Number of bytes each meta-request should upload (small so this this doesn't take forever) */ 29 | #define MANY_ASYNC_UPLOADS_OBJECT_SIZE 100 30 | 31 | /* Bytes per write */ 32 | #define MANY_ASYNC_UPLOADS_BYTES_PER_WRITE 10 33 | 34 | /* How long to spend doing nothing, before assuming we're deadlocked */ 35 | #define SEND_DATA_TIMEOUT_NANOS ((uint64_t)AWS_TIMESTAMP_NANOS * 10) /* 10secs */ 36 | 37 | /* See top of file for full description of what's going on in this test. */ 38 | AWS_TEST_CASE(test_s3_many_async_uploads_without_data, s_test_s3_many_async_uploads_without_data) 39 | static int s_test_s3_many_async_uploads_without_data(struct aws_allocator *allocator, void *ctx) { 40 | (void)ctx; 41 | 42 | /* Set up */ 43 | struct aws_s3_tester tester; 44 | ASSERT_SUCCESS(aws_s3_tester_init(allocator, &tester)); 45 | 46 | struct aws_s3_client *client = NULL; 47 | struct aws_s3_tester_client_options client_options; 48 | AWS_ZERO_STRUCT(client_options); 49 | ASSERT_SUCCESS(aws_s3_tester_client_new(&tester, &client_options, &client)); 50 | 51 | struct aws_s3_meta_request *meta_requests[MANY_ASYNC_UPLOADS_COUNT]; 52 | struct aws_s3_meta_request_test_results meta_request_test_results[MANY_ASYNC_UPLOADS_COUNT]; 53 | 54 | /* Create N upload meta-requests, each with an async-input-stream that 55 | * won't provide data until later in this test... */ 56 | for (int i = 0; i < MANY_ASYNC_UPLOADS_COUNT; ++i) { 57 | aws_s3_meta_request_test_results_init(&meta_request_test_results[i], allocator); 58 | 59 | struct aws_string *host_name = 60 | aws_s3_tester_build_endpoint_string(allocator, &g_test_bucket_name, &g_test_s3_region); 61 | struct aws_byte_cursor host_name_cursor = aws_byte_cursor_from_string(host_name); 62 | 63 | char object_name[128] = {0}; 64 | snprintf(object_name, sizeof(object_name), "/many-async-uploads-%d.txt", i); 65 | struct aws_byte_buf object_path; 66 | ASSERT_SUCCESS( 67 | aws_s3_tester_upload_file_path_init(allocator, &object_path, aws_byte_cursor_from_c_str(object_name))); 68 | 69 | struct aws_http_message *message = aws_s3_test_put_object_request_new_without_body( 70 | allocator, 71 | &host_name_cursor, 72 | g_test_body_content_type, 73 | aws_byte_cursor_from_buf(&object_path), 74 | MANY_ASYNC_UPLOADS_OBJECT_SIZE, 75 | 0 /*flags*/); 76 | 77 | /* Erase content-length header, because Mountpoint always uploads with unknown content-length */ 78 | aws_http_headers_erase(aws_http_message_get_headers(message), g_content_length_header_name); 79 | 80 | struct aws_s3_meta_request_options options = { 81 | .type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT, 82 | .message = message, 83 | .send_using_async_writes = true, 84 | }; 85 | ASSERT_SUCCESS(aws_s3_tester_bind_meta_request(&tester, &options, &meta_request_test_results[i])); 86 | 87 | meta_requests[i] = aws_s3_client_make_meta_request(client, &options); 88 | 89 | /* Release stuff created in this loop */ 90 | aws_string_destroy(host_name); 91 | aws_byte_buf_clean_up(&object_path); 92 | aws_http_message_release(message); 93 | } 94 | 95 | /* Starting at the end, and working backwards, only provide data to one meta-request at a time. */ 96 | for (int i = MANY_ASYNC_UPLOADS_COUNT - 1; i >= 0; --i) { 97 | 98 | struct aws_s3_meta_request *meta_request_i = meta_requests[i]; 99 | 100 | /* Perform sequential writes to meta_request_i, until EOF */ 101 | size_t bytes_written = 0; 102 | bool eof = false; 103 | while (!eof) { 104 | size_t bytes_to_write = 105 | aws_min_size(MANY_ASYNC_UPLOADS_BYTES_PER_WRITE, MANY_ASYNC_UPLOADS_OBJECT_SIZE - bytes_written); 106 | 107 | eof = (bytes_written + bytes_to_write) == MANY_ASYNC_UPLOADS_OBJECT_SIZE; 108 | 109 | /* use freshly allocated buffer for each write, so that we're likely to get memory violations 110 | * if this data is used wrong internally. */ 111 | struct aws_byte_buf tmp_data; 112 | aws_byte_buf_init(&tmp_data, allocator, bytes_to_write); 113 | aws_byte_buf_write_u8_n(&tmp_data, 'z', bytes_to_write); 114 | 115 | struct aws_future_void *write_future = 116 | aws_s3_meta_request_write(meta_request_i, aws_byte_cursor_from_buf(&tmp_data), eof); 117 | 118 | ASSERT_TRUE( 119 | aws_future_void_wait(write_future, SEND_DATA_TIMEOUT_NANOS), 120 | "Timed out waiting to send data on upload %d/%d." 121 | " After writing %zu bytes, timed out on write(data=%zu, eof=%d)", 122 | i + 1, 123 | MANY_ASYNC_UPLOADS_COUNT, 124 | bytes_written, 125 | bytes_to_write, 126 | eof); 127 | 128 | /* write complete! */ 129 | aws_byte_buf_clean_up(&tmp_data); 130 | 131 | ASSERT_INT_EQUALS(0, aws_future_void_get_error(write_future)); 132 | aws_future_void_release(write_future); 133 | 134 | bytes_written += bytes_to_write; 135 | } 136 | } 137 | 138 | /* Wait for everything to finish */ 139 | for (int i = 0; i < MANY_ASYNC_UPLOADS_COUNT; ++i) { 140 | meta_requests[i] = aws_s3_meta_request_release(meta_requests[i]); 141 | } 142 | 143 | aws_s3_tester_wait_for_meta_request_finish(&tester); 144 | aws_s3_tester_wait_for_meta_request_shutdown(&tester); 145 | 146 | for (int i = 0; i < MANY_ASYNC_UPLOADS_COUNT; ++i) { 147 | aws_s3_tester_validate_put_object_results(&meta_request_test_results[i], 0 /*flags*/); 148 | aws_s3_meta_request_test_results_clean_up(&meta_request_test_results[i]); 149 | } 150 | 151 | /* Cleanup */ 152 | aws_s3_client_release(client); 153 | aws_s3_tester_clean_up(&tester); 154 | 155 | return 0; 156 | } 157 | -------------------------------------------------------------------------------- /tests/s3_platform_info_test.c: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | * SPDX-License-Identifier: Apache-2.0. 4 | */ 5 | 6 | #include 7 | #include 8 | 9 | #include 10 | 11 | static int s_test_get_existing_platform_info(struct aws_allocator *allocator, void *ctx) { 12 | (void)ctx; 13 | 14 | aws_s3_library_init(allocator); 15 | 16 | struct aws_byte_cursor instance_type = aws_byte_cursor_from_c_str("c5n.18xlarge"); 17 | struct aws_s3_platform_info_loader *loader = aws_s3_platform_info_loader_new(allocator); 18 | 19 | const struct aws_s3_platform_info *platform_info = 20 | aws_s3_get_platform_info_for_instance_type(loader, instance_type); 21 | ASSERT_NOT_NULL(platform_info); 22 | 23 | ASSERT_BIN_ARRAYS_EQUALS( 24 | instance_type.ptr, instance_type.len, platform_info->instance_type.ptr, platform_info->instance_type.len); 25 | ASSERT_UINT_EQUALS(100, (uintmax_t)platform_info->max_throughput_gbps); 26 | 27 | aws_s3_platform_info_loader_release(loader); 28 | aws_s3_library_clean_up(); 29 | return AWS_OP_SUCCESS; 30 | } 31 | 32 | AWS_TEST_CASE(test_get_existing_platform_info, s_test_get_existing_platform_info) 33 | 34 | static int s_test_get_nonexistent_platform_info(struct aws_allocator *allocator, void *ctx) { 35 | (void)ctx; 36 | 37 | aws_s3_library_init(allocator); 38 | 39 | struct aws_s3_platform_info_loader *loader = aws_s3_platform_info_loader_new(allocator); 40 | 41 | struct aws_byte_cursor instance_type = aws_byte_cursor_from_c_str("non-existent"); 42 | const struct aws_s3_platform_info *platform_info = 43 | aws_s3_get_platform_info_for_instance_type(loader, instance_type); 44 | ASSERT_NULL(platform_info); 45 | 46 | aws_s3_platform_info_loader_release(loader); 47 | aws_s3_library_clean_up(); 48 | return AWS_OP_SUCCESS; 49 | } 50 | 51 | AWS_TEST_CASE(test_get_nonexistent_platform_info, s_test_get_nonexistent_platform_info) 52 | 53 | static int s_load_platform_info_from_global_state_sanity_test(struct aws_allocator *allocator, void *arg) { 54 | (void)arg; 55 | aws_s3_library_init(allocator); 56 | 57 | const struct aws_s3_platform_info *platform_info = aws_s3_get_current_platform_info(); 58 | ASSERT_NOT_NULL(platform_info); 59 | 60 | if (platform_info->instance_type.len) { 61 | struct aws_s3_platform_info_loader *loader = aws_s3_platform_info_loader_new(allocator); 62 | const struct aws_s3_platform_info *by_name_info = 63 | aws_s3_get_platform_info_for_instance_type(loader, platform_info->instance_type); 64 | if (by_name_info) { 65 | ASSERT_BIN_ARRAYS_EQUALS( 66 | platform_info->instance_type.ptr, 67 | platform_info->instance_type.len, 68 | by_name_info->instance_type.ptr, 69 | by_name_info->instance_type.len); 70 | ASSERT_TRUE(platform_info->max_throughput_gbps == by_name_info->max_throughput_gbps); 71 | } 72 | 73 | aws_s3_platform_info_loader_release(loader); 74 | } 75 | 76 | aws_s3_library_clean_up(); 77 | return AWS_OP_SUCCESS; 78 | } 79 | 80 | AWS_TEST_CASE(load_platform_info_from_global_state_sanity_test, s_load_platform_info_from_global_state_sanity_test) 81 | 82 | static int s_test_get_platforms_with_recommended_config(struct aws_allocator *allocator, void *ctx) { 83 | (void)ctx; 84 | 85 | aws_s3_library_init(allocator); 86 | 87 | struct aws_array_list recommended_platform_list = aws_s3_get_platforms_with_recommended_config(); 88 | ASSERT_TRUE(aws_array_list_length(&recommended_platform_list) > 0); 89 | for (size_t i = 0; i < aws_array_list_length(&recommended_platform_list); ++i) { 90 | struct aws_byte_cursor cursor; 91 | aws_array_list_get_at(&recommended_platform_list, &cursor, i); 92 | ASSERT_TRUE(cursor.len > 0); 93 | } 94 | aws_array_list_clean_up(&recommended_platform_list); 95 | aws_s3_library_clean_up(); 96 | return AWS_OP_SUCCESS; 97 | } 98 | 99 | AWS_TEST_CASE(test_get_platforms_with_recommended_config, s_test_get_platforms_with_recommended_config) 100 | -------------------------------------------------------------------------------- /tests/s3_test_input_stream.c: -------------------------------------------------------------------------------- 1 | #include "s3_tester.h" 2 | #include 3 | 4 | struct aws_s3_test_input_stream_impl { 5 | struct aws_input_stream base; 6 | size_t position; 7 | size_t length; 8 | struct aws_allocator *allocator; 9 | }; 10 | 11 | static int s_aws_s3_test_input_stream_seek( 12 | struct aws_input_stream *stream, 13 | int64_t offset, 14 | enum aws_stream_seek_basis basis) { 15 | (void)stream; 16 | (void)offset; 17 | (void)basis; 18 | 19 | /* Stream should never be seeked; all reads should be sequential. */ 20 | aws_raise_error(AWS_ERROR_UNKNOWN); 21 | return AWS_OP_ERR; 22 | } 23 | 24 | static int s_aws_s3_test_input_stream_read( 25 | struct aws_input_stream *stream, 26 | struct aws_byte_buf *dest, 27 | struct aws_byte_cursor *test_string) { 28 | (void)stream; 29 | (void)dest; 30 | 31 | struct aws_s3_test_input_stream_impl *test_input_stream = 32 | AWS_CONTAINER_OF(stream, struct aws_s3_test_input_stream_impl, base); 33 | 34 | while (dest->len < dest->capacity && test_input_stream->position < test_input_stream->length) { 35 | size_t buffer_pos = test_input_stream->position % test_string->len; 36 | 37 | struct aws_byte_cursor source_byte_cursor = { 38 | .len = test_string->len - buffer_pos, 39 | .ptr = test_string->ptr + buffer_pos, 40 | }; 41 | 42 | size_t remaining_in_stream = test_input_stream->length - test_input_stream->position; 43 | if (remaining_in_stream < source_byte_cursor.len) { 44 | source_byte_cursor.len = remaining_in_stream; 45 | } 46 | 47 | size_t remaining_in_buffer = dest->capacity - dest->len; 48 | 49 | if (remaining_in_buffer < source_byte_cursor.len) { 50 | source_byte_cursor.len = remaining_in_buffer; 51 | } 52 | 53 | aws_byte_buf_append(dest, &source_byte_cursor); 54 | 55 | test_input_stream->position += source_byte_cursor.len; 56 | } 57 | 58 | return AWS_OP_SUCCESS; 59 | } 60 | 61 | static int s_aws_s3_test_input_stream_read_1(struct aws_input_stream *stream, struct aws_byte_buf *dest) { 62 | struct aws_byte_cursor test_string = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("This is an S3 test."); 63 | return s_aws_s3_test_input_stream_read(stream, dest, &test_string); 64 | } 65 | 66 | static int s_aws_s3_test_input_stream_read_2(struct aws_input_stream *stream, struct aws_byte_buf *dest) { 67 | struct aws_byte_cursor test_string = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Different S3 test value."); 68 | return s_aws_s3_test_input_stream_read(stream, dest, &test_string); 69 | } 70 | 71 | static int s_aws_s3_test_input_stream_get_status(struct aws_input_stream *stream, struct aws_stream_status *status) { 72 | (void)stream; 73 | (void)status; 74 | 75 | struct aws_s3_test_input_stream_impl *test_input_stream = 76 | AWS_CONTAINER_OF(stream, struct aws_s3_test_input_stream_impl, base); 77 | 78 | status->is_end_of_stream = test_input_stream->position == test_input_stream->length; 79 | status->is_valid = true; 80 | 81 | return AWS_OP_SUCCESS; 82 | } 83 | 84 | static int s_aws_s3_test_input_stream_get_length(struct aws_input_stream *stream, int64_t *out_length) { 85 | AWS_ASSERT(stream != NULL); 86 | struct aws_s3_test_input_stream_impl *test_input_stream = 87 | AWS_CONTAINER_OF(stream, struct aws_s3_test_input_stream_impl, base); 88 | *out_length = (int64_t)test_input_stream->length; 89 | return AWS_OP_SUCCESS; 90 | } 91 | 92 | static void s_aws_s3_test_input_stream_destroy(struct aws_s3_test_input_stream_impl *test_input_stream) { 93 | aws_mem_release(test_input_stream->allocator, test_input_stream); 94 | } 95 | 96 | static struct aws_input_stream_vtable s_aws_s3_test_input_stream_vtable_1 = { 97 | .seek = s_aws_s3_test_input_stream_seek, 98 | .read = s_aws_s3_test_input_stream_read_1, 99 | .get_status = s_aws_s3_test_input_stream_get_status, 100 | .get_length = s_aws_s3_test_input_stream_get_length, 101 | }; 102 | 103 | static struct aws_input_stream_vtable s_aws_s3_test_input_stream_vtable_2 = { 104 | .seek = s_aws_s3_test_input_stream_seek, 105 | .read = s_aws_s3_test_input_stream_read_2, 106 | .get_status = s_aws_s3_test_input_stream_get_status, 107 | .get_length = s_aws_s3_test_input_stream_get_length, 108 | }; 109 | 110 | struct aws_input_stream *aws_s3_test_input_stream_new_with_value_type( 111 | struct aws_allocator *allocator, 112 | size_t stream_length, 113 | enum aws_s3_test_stream_value stream_value) { 114 | 115 | struct aws_s3_test_input_stream_impl *test_input_stream = 116 | aws_mem_calloc(allocator, 1, sizeof(struct aws_s3_test_input_stream_impl)); 117 | 118 | test_input_stream->base.vtable = stream_value == TEST_STREAM_VALUE_1 ? &s_aws_s3_test_input_stream_vtable_1 119 | : &s_aws_s3_test_input_stream_vtable_2; 120 | 121 | aws_ref_count_init( 122 | &test_input_stream->base.ref_count, 123 | test_input_stream, 124 | (aws_simple_completion_callback *)s_aws_s3_test_input_stream_destroy); 125 | 126 | struct aws_input_stream *input_stream = &test_input_stream->base; 127 | 128 | test_input_stream->position = 0; 129 | test_input_stream->length = stream_length; 130 | test_input_stream->allocator = allocator; 131 | 132 | return input_stream; 133 | } 134 | 135 | struct aws_input_stream *aws_s3_test_input_stream_new(struct aws_allocator *allocator, size_t stream_length) { 136 | return aws_s3_test_input_stream_new_with_value_type(allocator, stream_length, TEST_STREAM_VALUE_1); 137 | } 138 | -------------------------------------------------------------------------------- /tests/s3_test_parallel_stream.c: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | * SPDX-License-Identifier: Apache-2.0. 4 | */ 5 | 6 | #include "aws/s3/private/s3_parallel_input_stream.h" 7 | #include "s3_tester.h" 8 | #include 9 | 10 | struct aws_parallel_input_stream_from_file_failure_impl { 11 | struct aws_parallel_input_stream base; 12 | 13 | struct aws_atomic_var number_read; 14 | }; 15 | 16 | static void s_para_from_file_failure_destroy(struct aws_parallel_input_stream *stream) { 17 | struct aws_parallel_input_stream_from_file_failure_impl *impl = stream->impl; 18 | 19 | aws_mem_release(stream->alloc, impl); 20 | } 21 | 22 | struct aws_future_bool *s_para_from_file_failure_read( 23 | struct aws_parallel_input_stream *stream, 24 | uint64_t offset, 25 | struct aws_byte_buf *dest) { 26 | (void)offset; 27 | 28 | struct aws_future_bool *future = aws_future_bool_new(stream->alloc); 29 | struct aws_parallel_input_stream_from_file_failure_impl *impl = stream->impl; 30 | size_t previous_number_read = aws_atomic_fetch_add(&impl->number_read, 1); 31 | if (previous_number_read == 1) { 32 | /* TODO: make the failure configurable */ 33 | aws_future_bool_set_error(future, AWS_ERROR_UNIMPLEMENTED); 34 | } else { 35 | 36 | struct aws_byte_cursor test_string = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("This is an S3 test."); 37 | while (dest->len < dest->capacity) { 38 | size_t remaining_in_buffer = dest->capacity - dest->len; 39 | if (remaining_in_buffer < test_string.len) { 40 | test_string.len = remaining_in_buffer; 41 | } 42 | aws_byte_buf_append(dest, &test_string); 43 | } 44 | aws_future_bool_set_result(future, false); 45 | } 46 | return future; 47 | } 48 | 49 | static struct aws_parallel_input_stream_vtable s_parallel_input_stream_from_file_failure_vtable = { 50 | .destroy = s_para_from_file_failure_destroy, 51 | .read = s_para_from_file_failure_read, 52 | }; 53 | 54 | struct aws_parallel_input_stream *aws_parallel_input_stream_new_from_file_failure_tester( 55 | struct aws_allocator *allocator, 56 | struct aws_byte_cursor file_name) { 57 | (void)file_name; 58 | 59 | struct aws_parallel_input_stream_from_file_failure_impl *impl = 60 | aws_mem_calloc(allocator, 1, sizeof(struct aws_parallel_input_stream_from_file_failure_impl)); 61 | aws_parallel_input_stream_init_base( 62 | &impl->base, allocator, &s_parallel_input_stream_from_file_failure_vtable, impl); 63 | 64 | aws_atomic_init_int(&impl->number_read, 0); 65 | return &impl->base; 66 | } 67 | -------------------------------------------------------------------------------- /tests/test_helper/README.md: -------------------------------------------------------------------------------- 1 | # Helper script to setup your S3 structure to run the tests for aws-c-s3 2 | 3 | To use this script, you must have AWS credentials with permission to create and delete buckets. 4 | 5 | To create the S3 buckets and objects that tests will use: 6 | 7 | ```sh 8 | pip3 install boto3 9 | export CRT_S3_TEST_BUCKET_NAME= 10 | python3 test_helper.py init 11 | # change directory to the build/tests 12 | cd aws-c-s3/build/tests && ctest 13 | ``` 14 | 15 | To clean up the S3 buckets created 16 | 17 | ```sh 18 | export CRT_S3_TEST_BUCKET_NAME= 19 | python3 test_helper.py clean 20 | ``` 21 | 22 | ## Actions 23 | 24 | ### `init` action 25 | 26 | * Create `` in us-west-2. 27 | + Add the lifecycle to automatic clean up the `upload/` and clean up incomplete multipart uploads after one day. 28 | + Upload files: 29 | - `pre-existing-10MB-aes256-c` [SSE-C](https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html#sse-c-highlights) encrypted fille 30 | - `pre-existing-10MB-aes256` [SSE-S3](https://docs.aws.amazon.com/AmazonS3/latest/userguide/specifying-s3-encryption.html) encrypted fille 31 | - `pre-existing-10MB-kms` [SSE-KMS](https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) encrypted fille 32 | - `pre-existing-10MB` 33 | - `pre-existing-1MB` 34 | - `pre-existing-1MB-@` 35 | - `pre-existing-empty` 36 | - `pre-existing-error-xml` 37 | - with `--large_objects` enabled, several large objects will also be uploaded. Currently, only aws-c-s3's tests require these files, the aws-crt-*** repos do not: 38 | - `pre-existing-256MB` 39 | - `pre-existing-256MB-@` 40 | - `pre-existing-2GB` 41 | - `pre-existing-2GB-@` 42 | 43 | * with `--create_public_bucket` enabled, create `-public` in us-west-2 44 | + Upload files: 45 | - `pre-existing-1MB` 1MB file with public read access. 46 | 47 | * Create directory bucket `--usw2-az1--x-s3` in us-west-2 48 | + Add the lifecycle to automatic clean up the `upload/` and clean up incomplete multipart uploads after one day. 49 | + Upload files: 50 | - `pre-existing-10MB` 10MB file. 51 | - with `--large_objects` enabled 52 | - `pre-existing-2GB` 53 | 54 | * Create directory bucket `--use1-az4--x-s3` in us-east-1 55 | + Add the lifecycle to automatic clean up the `upload/` and clean up incomplete multipart uploads after one day. 56 | + Upload files: 57 | - `pre-existing-10MB` 10MB file. 58 | - with `--large_objects` enabled 59 | - `pre-existing-2GB` 60 | 61 | ### `clean` action 62 | 63 | * Delete the buckets create by init action and every object inside them. 64 | 65 | ## BUCKET_NAME 66 | 67 | You can specify the bucket name to be created either by passing argument to the script or by setting an environment variable, the `bucket_name` passed in takes precedence. If neither of these options is chosen, the `init` action will create a random bucket name. In this case, you will need to set the `CRT_S3_TEST_BUCKET_NAME` environment variable to the printed-out bucket name before running the test. 68 | 69 | ## Notes 70 | 71 | * The MRAP tests are not included in this script, and it's disabled by default. To run those tests, you will need to create a MRAP access point with the buckets have `pre-existing-1MB` in it. Then update `g_test_mrap_endpoint` to the uri of the MRAP endpoint and build with `-DENABLE_MRAP_TESTS=true`. 72 | * To run tests in tests/s3_mock_server_tests.c, initialize the mock S3 server first from [here](./../mock_s3_server/). And build your cmake project with `-DENABLE_MOCK_SERVER_TESTS=true` 73 | * Note: If you are not at the aws-common-runtime AWS team account, you must set environment variable `CRT_S3_TEST_BUCKET_NAME` to the bucket created before running the test. 74 | * When you see error with "Check your account level S3 settings, public access may be blocked.", Check https://docs.aws.amazon.com/AmazonS3/latest/userguide/configuring-block-public-access-account.html to set `BlockPublicAcls` to false, which enables public read of the object with `public-read` ACL in the bucket. 75 | 76 | ## TODO 77 | 78 | * Automatic the mrap creation 79 | * Instead of hard-coded path and region, make it configurable and pick up from tests. 80 | --------------------------------------------------------------------------------