├── code_examples ├── dotnet_examples │ ├── .gitignore │ ├── S3Examples │ │ ├── App.config │ │ ├── packages.config │ │ ├── Properties │ │ │ └── AssemblyInfo.cs │ │ ├── AbortMPUUsingHighLevelAPITest.cs │ │ ├── GenPresignedURLTest.cs │ │ ├── DeleteObjectNonVersionedBucketTest1.cs │ │ ├── ManageingBucketACLTest.cs │ │ ├── CopyObjectTest.cs │ │ └── TransferAccelerationTest.cs │ └── S3Examples.sln ├── php_examples │ └── S3examples │ │ ├── s3-deleting-single-nonversioned-object.php │ │ ├── s3-list-multipart-upload.php │ │ ├── s3-check-sse.php │ │ ├── s3-abort-multipart-upload.php │ │ ├── s3-upload-sse.php │ │ ├── s3-downloading-object.php │ │ ├── s3-specify-sse.php │ │ ├── s3-copy-object-sse.php │ │ ├── s3-uploading-object.php │ │ ├── s3-request-aws.php │ │ ├── s3-multipart-upload-using-multipartuploader.php │ │ ├── s3-bucket-website-configuration.php │ │ ├── s3-deleting-multi-objects-nonversioned.php │ │ ├── s3-listing-object-keys.php │ │ ├── s3-request-with-temp-credentials.php │ │ ├── s3-copying-objects.php │ │ ├── s3-request-fed-user-with-temp-credentials.php │ │ ├── s3-deleting-multi-objects-versioned.php │ │ └── s3-multipart-upload-using-lowlevel-php-sdk-api.php └── java_examples │ └── S3Examples │ ├── DualStackEndpoints.java │ ├── DeleteObjectNonVersionedBucket.java │ ├── CopyObjectSingleOperation.java │ └── HighLevelAbortMultipartUpload.java ├── doc_source ├── glossary.md ├── Appendices.md ├── BucketAccess.md ├── restoring-objects-rest.md ├── UsingTheBotoAPI.md ├── configure-requester-pays-rest.md ├── UploadObjSingleOpREST.md ├── transitioning-object-versions.md ├── DeletingAnObjectsUsingREST.md ├── SelectObjectContentUsingOtherSDKs.md ├── SelectObjectContentUsingRestApi.md ├── tagging-managing.md ├── RetrieveObjSingleOpREST.md ├── UploadObjSingleOpCLI.md ├── ErrorMessage.md ├── tagging-manage-console.md ├── DeletingMultipleObjectsUsingREST.md ├── RetrievingObjectsfromVersioningSuspendedBuckets.md ├── restoring-objects-console.md ├── manage-lifecycle-using-console.md ├── UsingRouting.md ├── SOAPAPI3.md ├── ListingObjectKeysUsingREST.md ├── HandlingErrors.md ├── s3-glacier-select-sql-reference-sql-functions.md ├── ManageCorsUsingConsole.md ├── ShareObjectPreSignedURLVSExplorer.md ├── lifecycle-additional-considerations.md ├── manage-acls-using-console.md ├── manage-lifecycle-using-ruby.md ├── ErrorDetails.md ├── s3-glacier-select-sql-reference.md ├── ErrorResponse.md ├── qfacts.md ├── ConfigWebSiteREST.md ├── batch-ops-operations.md ├── CopyingObjctsMPUapi.md ├── ManageCorsUsing.md ├── manage-lifecycle-using-rest.md ├── SSEUsingConsole.md ├── ErrorCode.md ├── CopyingObjectsUsingAPIs.md ├── mpListPartsJavaAPI.md ├── EnableCorsUsingREST.md ├── storage_lens_view_metrics_export.md ├── hosting-websites-on-s3-examples.md ├── S3OutpostsJavaExamples.md ├── configure-requester-pays-console.md ├── using-mobile-sdks.md ├── usingLLmpuPHP.md ├── s3-glacier-select-sql-reference-aggregate.md ├── MakingAuthenticatedRequests.md ├── UploadInSingleOp.md ├── AuthUsingTempSessionToken.md ├── managing-acls.md ├── HowDoIWebsiteConfiguration.md ├── S3OutpostsExamples.md ├── VersionSuspendedBehavior.md ├── DeletingOneObject.md ├── enable-logging-console.md ├── vulnerability-analysis-and-management.md ├── replication-additional-configs.md ├── manage-objects-versioned-bucket.md ├── usingHLmpuDotNet.md ├── using-iam-policies.md ├── DNSConsiderations.md ├── using-aws-amplify.md ├── AuthUsingTempFederationToken.md ├── usingLLmpuDotNet.md ├── UsingEncryption.md ├── batch-ops-examples.md ├── BucketPayerValues.md ├── s3-glacier-select-sql-reference-conversion.md ├── UsingRESTAPImpUpload.md ├── deleting-log-files-lifecycle.md ├── UsingCLImpUpload.md ├── monitoring-overview.md ├── storage_lens_view_metrics.md ├── S3LensExamples.md ├── CopyingObjctsUsingRESTMPUapi.md ├── DeletingMultipleObjects.md ├── UsingRESTError.md ├── LLTrackProgressMPUNet.md ├── ManagingBucketWebsiteConfig.md ├── S3_ACLs_UsingACLs.md ├── LLlistMPuploadsPHP.md ├── replication-example-walkthroughs.md ├── UsingSOAPOperations.md ├── ObjectAndSubResource.md ├── RetMetaOfObjVersion.md ├── UsingMFADelete.md ├── UsingSOAPError.md ├── DeletingObjectsfromVersioningSuspendedBuckets.md ├── LLlistMPuploadsDotNet.md ├── lifecycle-expire-general-considerations.md ├── S3TorrentCharge.md ├── BucketBilling.md ├── LLAbortMPUphp.md ├── network-isolation.md ├── CopyingObjectUsingREST.md ├── LLAbortMPUnet.md ├── usingHLmpuJava.md ├── RestoringPreviousVersions.md ├── batch-ops-copy-object.md ├── ObjectOperations.md ├── UsingHTTPPOST.md ├── Welcome.md ├── AddingObjectstoVersionSuspendedBuckets.md ├── s3-glacier-select-sql-reference-operators.md ├── auth-request-sig-v2.md ├── uploadobjusingmpu-ruby-sdk.md ├── UploadObjSingleOpPHP.md ├── ObjectsinRequesterPaysBuckets.md ├── S3TorrentPublish.md ├── cors-troubleshooting.md ├── storage_lens_understanding_metrics_export_schema.md ├── HTTPPOSTFlash.md ├── access-points.md ├── S3Torrent.md ├── inter-network-traffic-privacy.md ├── CopyingObjectUsingJava.md ├── access-points-restrictions-limitations.md ├── LoggingWebsiteTraffic.md ├── RequesterPaysBucketConfiguration.md ├── ConfigWebSitePHP.md ├── uploadobjusingmpu.md ├── GettingObjectsUsingAPIs.md ├── RelatedResources012.md ├── security.md ├── batch-ops-put-object-tagging.md ├── s3-glacier-select-sql-reference-data-types.md ├── S3TorrentRetrieve.md ├── HLAbortMPUploadsJava.md └── CopyingObjectUsingRuby.md ├── .github └── PULL_REQUEST_TEMPLATE.md ├── LICENSE-SUMMARY ├── README.md └── LICENSE-SAMPLECODE /code_examples/dotnet_examples/.gitignore: -------------------------------------------------------------------------------- 1 | **/.vs/ 2 | **/packages/ 3 | **/bin/ 4 | **/obj/ 5 | -------------------------------------------------------------------------------- /doc_source/glossary.md: -------------------------------------------------------------------------------- 1 | # AWS glossary 2 | 3 | For the latest AWS terminology, see the [AWS glossary](https://docs.aws.amazon.com/general/latest/gr/glos-chap.html) in the *AWS General Reference*\. -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | *Issue #, if available:* 2 | 3 | *Description of changes:* 4 | 5 | 6 | By submitting this pull request, I confirm that you can use, modify, copy, and redistribute this contribution, under the terms of your choice. 7 | -------------------------------------------------------------------------------- /code_examples/dotnet_examples/S3Examples/App.config: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | -------------------------------------------------------------------------------- /doc_source/Appendices.md: -------------------------------------------------------------------------------- 1 | # Appendices 2 | 3 | This Amazon Simple Storage Service Developer Guide appendix include the following sections\. 4 | 5 | **Topics** 6 | + [Appendix a: Using the SOAP API](SOAPAPI3.md) 7 | + [Appendix b: Authenticating requests \(AWS signature version 2\)](auth-request-sig-v2.md) -------------------------------------------------------------------------------- /doc_source/BucketAccess.md: -------------------------------------------------------------------------------- 1 | # Buckets and access control 2 | 3 | Each bucket has an associated access control policy\. This policy governs the creation, deletion and enumeration of objects within the bucket\. For more information, see [Identity and access management in Amazon S3](s3-access-control.md)\. -------------------------------------------------------------------------------- /code_examples/dotnet_examples/S3Examples/packages.config: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | -------------------------------------------------------------------------------- /LICENSE-SUMMARY: -------------------------------------------------------------------------------- 1 | Copyright ${THIS_YEAR} Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | 3 | The documentation is made available under the Creative Commons Attribution-ShareAlike 4.0 International License. See the LICENSE file. 4 | 5 | The sample code within this documentation is made available under a modified MIT license. See the LICENSE-SAMPLECODE file. 6 | -------------------------------------------------------------------------------- /doc_source/restoring-objects-rest.md: -------------------------------------------------------------------------------- 1 | # Restore an archived object using the REST API 2 | 3 | Amazon S3 provides an API for you to initiate an archive restoration\. For more information, see [POST Object restore](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPOSTrestore.html) in the *Amazon Simple Storage Service API Reference*\. -------------------------------------------------------------------------------- /doc_source/UsingTheBotoAPI.md: -------------------------------------------------------------------------------- 1 | # Using the AWS SDK for Python \(Boto\) 2 | 3 | Boto is a Python package that provides interfaces to AWS including Amazon S3\. For more information about Boto, go to the [AWS SDK for Python \(Boto\)](https://aws.amazon.com/sdk-for-python/)\. The getting started link on this page provides step\-by\-step instructions to get started\. -------------------------------------------------------------------------------- /doc_source/configure-requester-pays-rest.md: -------------------------------------------------------------------------------- 1 | # Configure Requester Pays with the REST API 2 | 3 | **Topics** 4 | + [Setting the requestPayment Bucket Configuration](RequesterPaysBucketConfiguration.md) 5 | + [Retrieving the requestPayment Configuration](BucketPayerValues.md) 6 | + [Downloading objects in Requester Pays buckets](ObjectsinRequesterPaysBuckets.md) -------------------------------------------------------------------------------- /doc_source/UploadObjSingleOpREST.md: -------------------------------------------------------------------------------- 1 | # Upload an object using the REST API 2 | 3 | You can use AWS SDK to upload an object\. However, if your application requires it, you can send REST requests directly\. You can send a PUT request to upload data in a single operation\. For more information, see [PUT Object](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html)\. -------------------------------------------------------------------------------- /doc_source/transitioning-object-versions.md: -------------------------------------------------------------------------------- 1 | # Transitioning object versions 2 | 3 | You can define lifecycle configuration rules for objects that have a well\-defined lifecycle to transition object versions to the `S3 Glacier` storage class at a specific time in the object's lifetime\. For more information, see [Object lifecycle management](object-lifecycle-mgmt.md)\. -------------------------------------------------------------------------------- /doc_source/DeletingAnObjectsUsingREST.md: -------------------------------------------------------------------------------- 1 | # Deleting an object using the REST API 2 | 3 | You can use the AWS SDKs to delete an object\. However, if your application requires it, you can send REST requests directly\. For more information, go to [DELETE Object](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html) in the *Amazon Simple Storage Service API Reference*\. -------------------------------------------------------------------------------- /doc_source/SelectObjectContentUsingOtherSDKs.md: -------------------------------------------------------------------------------- 1 | # Selecting content from objects using other SDKs 2 | 3 | You can select the contents of an object using Amazon S3 Select using other SDKs\. For more information, see the following: 4 | + Python: [Using the AWS SDK for Python \(Boto\)](UsingTheBotoAPI.md)\. 5 | + Ruby: [Using the AWS SDK for Ruby \- Version 3](UsingTheMPRubyAPI.md)\. -------------------------------------------------------------------------------- /doc_source/SelectObjectContentUsingRestApi.md: -------------------------------------------------------------------------------- 1 | # Selecting content from objects using the REST API 2 | 3 | You can use the AWS SDK to select content from objects\. However, if your application requires it, you can send REST requests directly\. For more information about the request and response format, see [SELECT Object Content](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html)\. -------------------------------------------------------------------------------- /doc_source/tagging-managing.md: -------------------------------------------------------------------------------- 1 | # Managing object tags 2 | 3 | This section explains how you can add object tags programmatically using the AWS SDK for Java or the Amazon S3 console\. 4 | 5 | **Topics** 6 | + [Managing object tags using the console](tagging-manage-console.md) 7 | + [Managing tags using the AWS SDK for Java](tagging-manage-javasdk.md) 8 | + [Managing tags using the AWS SDK for \.NET](tagging-manage-dotnet.md) -------------------------------------------------------------------------------- /doc_source/RetrieveObjSingleOpREST.md: -------------------------------------------------------------------------------- 1 | # Get an object Using the REST API 2 | 3 | You can use the AWS SDK to retrieve object keys from a bucket\. However, if your application requires it, you can send REST requests directly\. You can send a GET request to retrieve object keys\. For more information about the request and response format, go to [Get Object](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html)\. -------------------------------------------------------------------------------- /doc_source/UploadObjSingleOpCLI.md: -------------------------------------------------------------------------------- 1 | # Upload an object using the CLI 2 | 3 | You can use AWS SDK to upload an object\. However, if your application requires it, you can send AWS Command Line Interface requests directly\. You can send a PUT request to upload data in a single operation\. For more information, see [PUT Object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) in the AWS CLI Command Reference guide\. -------------------------------------------------------------------------------- /doc_source/ErrorMessage.md: -------------------------------------------------------------------------------- 1 | # Error message 2 | 3 | The error message contains a generic description of the error condition in English\. It is intended for a human audience\. Simple programs display the message directly to the end user if they encounter an error condition they don't know how or don't care to handle\. Sophisticated programs with more exhaustive error handling and proper internationalization are more likely to ignore the error message\. -------------------------------------------------------------------------------- /doc_source/tagging-manage-console.md: -------------------------------------------------------------------------------- 1 | # Managing object tags using the console 2 | 3 | You can use the Amazon S3 console to add tags to new objects when you upload them or you can add them to existing objects\. For instructions on how to add tags to objects using the Amazon S3 console, see [Adding Object Tags](https://docs.aws.amazon.com/AmazonS3/latest/user-guide/add-object-tags.html) in the Amazon Simple Storage Service Console User Guide\. -------------------------------------------------------------------------------- /doc_source/DeletingMultipleObjectsUsingREST.md: -------------------------------------------------------------------------------- 1 | # Deleting multiple objects using the REST API 2 | 3 | You can use the AWS SDKs to delete multiple objects using the Multi\-Object Delete API\. However, if your application requires it, you can send REST requests directly\. For more information, go to [Delete Multiple Objects](https://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html) in the *Amazon Simple Storage Service API Reference*\. -------------------------------------------------------------------------------- /doc_source/RetrievingObjectsfromVersioningSuspendedBuckets.md: -------------------------------------------------------------------------------- 1 | # Retrieving objects from versioning\-suspended buckets 2 | 3 | A `GET Object` request returns the current version of an object whether you've enabled versioning on a bucket or not\. The following figure shows how a simple `GET` returns the current version of an object\. 4 | 5 | ![\[Image NOT FOUND\]](http://docs.aws.amazon.com/AmazonS3/latest/dev/images/versioning_GET_suspended.png) -------------------------------------------------------------------------------- /doc_source/restoring-objects-console.md: -------------------------------------------------------------------------------- 1 | # Restore an archived object using the Amazon S3 console 2 | 3 | You can use the Amazon S3 console to restore a copy of an object that has been archived\. For instructions on how to restore an archive using the AWS Management Console, see [ How do I restore an S3 object that has been archived?](https://docs.aws.amazon.com/AmazonS3/latest/user-guide/restore-archived-objects.html) in the *Amazon Simple Storage Service Console User Guide*\. -------------------------------------------------------------------------------- /doc_source/manage-lifecycle-using-console.md: -------------------------------------------------------------------------------- 1 | # Manage an object's lifecycle using the Amazon S3 console 2 | 3 | You can specify S3 Lifecycle rules on a bucket using the Amazon S3 console\. 4 | 5 | For instructions on how to set up S3 Lifecycle rules using the AWS Management Console, see [ How Do I Create a Lifecycle Policy for an S3 Bucket?](https://docs.aws.amazon.com/AmazonS3/latest/user-guide/create-lifecycle.html) in the *Amazon Simple Storage Service Console User Guide*\. -------------------------------------------------------------------------------- /doc_source/UsingRouting.md: -------------------------------------------------------------------------------- 1 | # Request routing 2 | 3 | **Topics** 4 | + [Request redirection and the REST API](Redirects.md) 5 | + [DNS considerations](DNSConsiderations.md) 6 | 7 | Programs that make requests against buckets created using the API must support redirects\. Additionally, some clients that do not respect DNS TTLs might encounter issues\. 8 | 9 | This section describes routing and DNS issues to consider when designing your service or application for use with Amazon S3\. -------------------------------------------------------------------------------- /doc_source/SOAPAPI3.md: -------------------------------------------------------------------------------- 1 | # Appendix a: Using the SOAP API 2 | 3 | **Note** 4 | SOAP support over HTTP is deprecated, but it is still available over HTTPS\. New Amazon S3 features will not be supported for SOAP\. We recommend that you use either the REST API or the AWS SDKs\. 5 | 6 | This section contains information specific to the Amazon S3 SOAP API\. 7 | 8 | **Note** 9 | SOAP requests, both authenticated and anonymous, must be sent to Amazon S3 using SSL\. Amazon S3 returns an error when you send a SOAP request over HTTP\. -------------------------------------------------------------------------------- /doc_source/ListingObjectKeysUsingREST.md: -------------------------------------------------------------------------------- 1 | # Listing Keys Using the REST API 2 | 3 | You can use the AWS SDK to list the object keys in a bucket\. However, if your application requires it, you can send REST requests directly\. You can send a GET request to return some or all of the objects in a bucket or you can use selection criteria to return a subset of the objects in a bucket\. For more information, go to [GET Bucket \(List Objects\) Version 2](https://docs.aws.amazon.com/AmazonS3/latest/API/v2-RESTBucketGET.html)\. -------------------------------------------------------------------------------- /doc_source/HandlingErrors.md: -------------------------------------------------------------------------------- 1 | # Handling REST and SOAP errors 2 | 3 | **Topics** 4 | + [The REST error response](UsingRESTError.md) 5 | + [The SOAP error response](UsingSOAPError.md) 6 | + [Amazon S3 error best practices](ErrorBestPractices.md) 7 | 8 | This section describes REST and SOAP errors and how to handle them\. 9 | 10 | **Note** 11 | SOAP support over HTTP is deprecated, but it is still available over HTTPS\. New Amazon S3 features will not be supported for SOAP\. We recommend that you use either the REST API or the AWS SDKs\. -------------------------------------------------------------------------------- /doc_source/s3-glacier-select-sql-reference-sql-functions.md: -------------------------------------------------------------------------------- 1 | # SQL Functions 2 | 3 | Amazon S3 Select and S3 Glacier Select support several SQL functions\. 4 | 5 | **Topics** 6 | + [Aggregate Functions \(Amazon S3 Select only\)](s3-glacier-select-sql-reference-aggregate.md) 7 | + [Conditional Functions](s3-glacier-select-sql-reference-conditional.md) 8 | + [Conversion Functions](s3-glacier-select-sql-reference-conversion.md) 9 | + [Date Functions](s3-glacier-select-sql-reference-date.md) 10 | + [String Functions](s3-glacier-select-sql-reference-string.md) -------------------------------------------------------------------------------- /doc_source/ManageCorsUsingConsole.md: -------------------------------------------------------------------------------- 1 | # Enabling cross\-origin resource sharing \(CORS\) using the AWS Management Console 2 | 3 | You can use the AWS Management Console to set a CORS configuration on your bucket\. For instructions, see [How do I add cross\-domain resource sharing with CORS? ](https://docs.aws.amazon.com/AmazonS3/latest/user-guide/add-cors-configuration.html) in the *Amazon Simple Storage Service Console User Guide*\. 4 | 5 | **Important** 6 | In the new S3 console, the CORS configuration must be JSON\. For JSON examples, see [Cross\-origin resource sharing \(CORS\)](cors.md) -------------------------------------------------------------------------------- /doc_source/ShareObjectPreSignedURLVSExplorer.md: -------------------------------------------------------------------------------- 1 | # Generate a presigned object URL using AWS explorer for Visual Studio 2 | 3 | If you are using Visual Studio, you can generate a presigned URL for an object without writing any code by using AWS Explorer for Visual Studio\. Anyone with this URL can download the object\. For more information, go to [Using Amazon S3 from AWS Explorer](https://docs.aws.amazon.com/AWSToolkitVS/latest/UserGuide/using-s3.html)\. 4 | 5 | For instructions about how to install the AWS Explorer, see [Using the AWS SDKs, CLI, and Explorers](UsingAWSSDK.md)\. -------------------------------------------------------------------------------- /doc_source/lifecycle-additional-considerations.md: -------------------------------------------------------------------------------- 1 | # Additional considerations for lifecycle configuration 2 | 3 | When configuring the lifecycle of objects, you need to understand the following guidelines for transitioning objects, setting expiration dates, and other object configurations\. 4 | 5 | **Topics** 6 | + [Transitioning objects using Amazon S3 Lifecycle](lifecycle-transition-general-considerations.md) 7 | + [Understanding object expiration](lifecycle-expire-general-considerations.md) 8 | + [Lifecycle and other bucket configurations](lifecycle-and-other-bucket-config.md) -------------------------------------------------------------------------------- /doc_source/manage-acls-using-console.md: -------------------------------------------------------------------------------- 1 | # Managing ACLs in the AWS Management Console 2 | 3 | AWS Management Console provides a UI for you to grant ACL\-based access permissions to your buckets and objects\. For information on setting ACL\-based access permissions in the console, see [How Do I Set ACL Bucket Permissions?](https://docs.aws.amazon.com/AmazonS3/latest/user-guide/set-bucket-permissions.html) and [How Do I Set Permissions on an Object?](https://docs.aws.amazon.com/AmazonS3/latest/user-guide/set-object-permissions.html) in the *Amazon Simple Storage Service Console User Guide*\. -------------------------------------------------------------------------------- /doc_source/manage-lifecycle-using-ruby.md: -------------------------------------------------------------------------------- 1 | # Manage an object's lifecycle using the AWS SDK for Ruby 2 | 3 | You can use the AWS SDK for Ruby to manage S3 Lifecycle configuration on a bucket by using the class [ AWS::S3::BucketLifecycleConfiguration](https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/BucketLifecycle.html)\. For more information about using the AWS SDK for Ruby with Amazon S3, see [Using the AWS SDK for Ruby \- Version 3](UsingTheMPRubyAPI.md)\. For more information about managing lifecycle configuration, see [Object lifecycle management](object-lifecycle-mgmt.md)\. -------------------------------------------------------------------------------- /doc_source/ErrorDetails.md: -------------------------------------------------------------------------------- 1 | # Further details 2 | 3 | Many error responses contain additional structured data meant to be read and understood by a developer diagnosing programming errors\. For example, if you send a Content\-MD5 header with a REST PUT request that doesn't match the digest calculated on the server, you receive a BadDigest error\. The error response also includes as detail elements the digest we calculated, and the digest you told us to expect\. During development, you can use this information to diagnose the error\. In production, a well\-behaved program might include this information in its error log\. -------------------------------------------------------------------------------- /doc_source/s3-glacier-select-sql-reference.md: -------------------------------------------------------------------------------- 1 | # SQL Reference for Amazon S3 Select and S3 Glacier Select 2 | 3 | This reference contains a description of the structured query language \(SQL\) elements that are supported by Amazon S3 Select and S3 Glacier Select\. 4 | 5 | **Topics** 6 | + [SELECT Command](s3-glacier-select-sql-reference-select.md) 7 | + [Data Types](s3-glacier-select-sql-reference-data-types.md) 8 | + [Operators](s3-glacier-select-sql-reference-operators.md) 9 | + [Reserved Keywords](s3-glacier-select-sql-reference-keyword-list.md) 10 | + [SQL Functions](s3-glacier-select-sql-reference-sql-functions.md) -------------------------------------------------------------------------------- /doc_source/ErrorResponse.md: -------------------------------------------------------------------------------- 1 | # Error response 2 | 3 | **Topics** 4 | + [Error code](ErrorCode.md) 5 | + [Error message](ErrorMessage.md) 6 | + [Further details](ErrorDetails.md) 7 | 8 | When an Amazon S3 request is in error, the client receives an error response\. The exact format of the error response is API specific: For example, the REST error response differs from the SOAP error response\. However, all error responses have common elements\. 9 | 10 | **Note** 11 | SOAP support over HTTP is deprecated, but it is still available over HTTPS\. New Amazon S3 features will not be supported for SOAP\. We recommend that you use either the REST API or the AWS SDKs\. -------------------------------------------------------------------------------- /doc_source/qfacts.md: -------------------------------------------------------------------------------- 1 | # Amazon S3 multipart upload limits 2 | 3 | The following table provides multipart upload core specifications\. For more information, see [Multipart upload overview](mpuoverview.md)\. 4 | 5 | 6 | | Item | Specification | 7 | | --- | --- | 8 | | Maximum object size | 5 TB | 9 | | Maximum number of parts per upload | 10,000 | 10 | | Part numbers | 1 to 10,000 \(inclusive\) | 11 | | Part size | 5 MB to 5 GB\. There is no size limit on the last part of your multipart upload\. | 12 | | Maximum number of parts returned for a list parts request | 1000 | 13 | | Maximum number of multipart uploads returned in a list multipart uploads request | 1000 | -------------------------------------------------------------------------------- /doc_source/ConfigWebSiteREST.md: -------------------------------------------------------------------------------- 1 | # Managing websites with the REST API 2 | 3 | **Topics** 4 | 5 | You can use the AWS Management Console or the AWS SDK to configure a bucket as a website\. However, if your application requires it, you can send REST requests directly\. For more information, see the following sections in the Amazon Simple Storage Service API Reference\. 6 | + [PUT Bucket website](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTwebsite.html) 7 | + [GET Bucket website](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETwebsite.html) 8 | + [DELETE Bucket website](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketDELETEwebsite.html) -------------------------------------------------------------------------------- /doc_source/batch-ops-operations.md: -------------------------------------------------------------------------------- 1 | # Operations 2 | 3 | S3 Batch Operations supports several different operations\. The topics in this section describe each of these operations\. 4 | 5 | **Topics** 6 | + [Put object copy](batch-ops-copy-object.md) 7 | + [Initiate restore object](batch-ops-initiate-restore-object.md) 8 | + [Invoking a Lambda function from Amazon S3 batch operations](batch-ops-invoke-lambda.md) 9 | + [Put object ACL](batch-ops-put-object-acl.md) 10 | + [Put object tagging](batch-ops-put-object-tagging.md) 11 | + [Managing S3 Object Lock retention dates](batch-ops-retention-date.md) 12 | + [Managing S3 Object Lock legal hold](batch-ops-legal-hold.md) -------------------------------------------------------------------------------- /doc_source/CopyingObjctsMPUapi.md: -------------------------------------------------------------------------------- 1 | # Copying objects using the multipart upload API 2 | 3 | The examples in this section show you how to copy objects greater than 5 GB using the multipart upload API\. You can copy objects less than 5 GB in a single operation\. For more information, see [Copying Objects in a Single Operation](CopyingObjectsUsingAPIs.md)\. 4 | 5 | **Topics** 6 | + [Copy an object using the AWS SDK for Java multipart upload API](CopyingObjctsUsingLLJavaMPUapi.md) 7 | + [Copy an Amazon S3 object using the AWS SDK for \.NET multipart upload API](CopyingObjctsUsingLLNetMPUapi.md) 8 | + [Copy object using the REST multipart upload API](CopyingObjctsUsingRESTMPUapi.md) -------------------------------------------------------------------------------- /doc_source/ManageCorsUsing.md: -------------------------------------------------------------------------------- 1 | # Enabling cross\-origin resource sharing \(CORS\) 2 | 3 | Enable cross\-origin resource sharing by setting a CORS configuration on your bucket using the AWS Management Console, the REST API, or the AWS SDKs\. 4 | 5 | **Topics** 6 | + [Enabling cross\-origin resource sharing \(CORS\) using the AWS Management Console](ManageCorsUsingConsole.md) 7 | + [Enabling cross\-origin resource sharing \(CORS\) using the AWS SDK for Java](ManageCorsUsingJava.md) 8 | + [Enabling cross\-origin resource sharing \(CORS\) using the AWS SDK for \.NET](ManageCorsUsingDotNet.md) 9 | + [Enabling cross\-origin resource sharing \(CORS\) using the REST API](EnableCorsUsingREST.md) -------------------------------------------------------------------------------- /code_examples/php_examples/S3examples/s3-deleting-single-nonversioned-object.php: -------------------------------------------------------------------------------- 1 | 'latest', 14 | 'region' => 'us-east-1' 15 | ]); 16 | 17 | // Delete an object from the bucket. 18 | $s3->deleteObject([ 19 | 'Bucket' => $bucket, 20 | 'Key' => $keyname 21 | ]); 22 | -------------------------------------------------------------------------------- /code_examples/php_examples/S3examples/s3-list-multipart-upload.php: -------------------------------------------------------------------------------- 1 | 'latest', 13 | 'region' => 'us-east-1' 14 | ]); 15 | 16 | // Retrieve a list of the current multipart uploads. 17 | $result = $s3->listMultipartUploads([ 18 | 'Bucket' => $bucket 19 | ]); 20 | 21 | // Write the list of uploads to the page. 22 | print_r($result->toArray()); 23 | -------------------------------------------------------------------------------- /doc_source/manage-lifecycle-using-rest.md: -------------------------------------------------------------------------------- 1 | # Manage an object's lifecycle using the REST API 2 | 3 | You can use the AWS Management Console to set the S3 Lifecycle configuration on your bucket\. If your application requires it, you can also send REST requests directly\. The following sections in the *Amazon Simple Storage Service API Reference* describe the REST API related to the S3 Lifecycle configuration\. 4 | + [PUT Bucket lifecycle](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlifecycle.html) 5 | + [GET Bucket lifecycle](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlifecycle.html) 6 | + [DELETE Bucket lifecycle](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketDELETElifecycle.html) -------------------------------------------------------------------------------- /doc_source/SSEUsingConsole.md: -------------------------------------------------------------------------------- 1 | # Specifying Server\-Side Encryption Using the AWS Management Console 2 | 3 | When uploading an object using the AWS Management Console, you can specify server\-side encryption\. For an example of how to upload an object, see [Uploading S3 Objects](https://docs.aws.amazon.com/AmazonS3/latest/user-guide/upload-objects.html)\. 4 | 5 | When you copy an object using the AWS Management Console, the console copies the object as is\. That is, if the copy source is encrypted, the target object is encrypted\.The console also allows you to add encryption to an object\. For more information, see [How Do I Add Encryption to an S3 Object?](https://docs.aws.amazon.com/AmazonS3/latest/user-guide/add-object-encryption.html)\. -------------------------------------------------------------------------------- /code_examples/php_examples/S3examples/s3-check-sse.php: -------------------------------------------------------------------------------- 1 | 'latest', 14 | 'region' => 'us-east-1' 15 | ]); 16 | 17 | // Check which server-side encryption algorithm is used. 18 | $result = $s3->headObject([ 19 | 'Bucket' => $bucket, 20 | 'Key' => $keyname, 21 | ]); 22 | echo $result['ServerSideEncryption']; 23 | -------------------------------------------------------------------------------- /code_examples/php_examples/S3examples/s3-abort-multipart-upload.php: -------------------------------------------------------------------------------- 1 | 'latest', 15 | 'region' => 'us-east-1' 16 | ]); 17 | 18 | // Abort the multipart upload. 19 | $s3->abortMultipartUpload([ 20 | 'Bucket' => $bucket, 21 | 'Key' => $keyname, 22 | 'UploadId' => $uploadId, 23 | ]); 24 | -------------------------------------------------------------------------------- /doc_source/ErrorCode.md: -------------------------------------------------------------------------------- 1 | # Error code 2 | 3 | The error code is a string that uniquely identifies an error condition\. It is meant to be read and understood by programs that detect and handle errors by type\. Many error codes are common across SOAP and REST APIs, but some are API\-specific\. For example, NoSuchKey is universal, but UnexpectedContent can occur only in response to an invalid REST request\. In all cases, SOAP fault codes carry a prefix as indicated in the table of error codes, so that a NoSuchKey error is actually returned in SOAP as Client\.NoSuchKey\. 4 | 5 | **Note** 6 | SOAP support over HTTP is deprecated, but it is still available over HTTPS\. New Amazon S3 features will not be supported for SOAP\. We recommend that you use either the REST API or the AWS SDKs\. -------------------------------------------------------------------------------- /doc_source/CopyingObjectsUsingAPIs.md: -------------------------------------------------------------------------------- 1 | # Copying Objects in a Single Operation 2 | 3 | The examples in this section show how to copy objects up to 5 GB in a single operation\. For copying objects greater than 5 GB, you must use multipart upload API\. For more information, see [Copying objects using the multipart upload API](CopyingObjctsMPUapi.md)\. 4 | 5 | **Topics** 6 | + [Copy an Object Using the AWS SDK for Java](CopyingObjectUsingJava.md) 7 | + [Copy an Amazon S3 Object in a Single Operation Using the AWS SDK for \.NET](CopyingObjectUsingNetSDK.md) 8 | + [Copy an Object Using the AWS SDK for PHP](CopyingObjectUsingPHP.md) 9 | + [Copy an Object Using the AWS SDK for Ruby](CopyingObjectUsingRuby.md) 10 | + [Copy an Object Using the REST API](CopyingObjectUsingREST.md) -------------------------------------------------------------------------------- /doc_source/mpListPartsJavaAPI.md: -------------------------------------------------------------------------------- 1 | # Using the AWS Java SDK for a multipart upload \(low\-level API\) 2 | 3 | **Topics** 4 | + [Upload a file](llJavaUploadFile.md) 5 | + [List multipart uploads](LLlistMPuploadsJava.md) 6 | + [Abort a multipart upload](LLAbortMPUJava.md) 7 | 8 | The AWS SDK for Java exposes a low\-level API that closely resembles the Amazon S3 REST API for multipart uploads \(see [Uploading objects using multipart upload API](uploadobjusingmpu.md)\. Use the low\-level API when you need to pause and resume multipart uploads, vary part sizes during the upload, or do not know the size of the upload data in advance\. When you don't have these requirements, use the high\-level API \(see [Using the AWS Java SDK for multipart upload \(high\-level API\)](usingHLmpuJava.md)\)\. -------------------------------------------------------------------------------- /doc_source/EnableCorsUsingREST.md: -------------------------------------------------------------------------------- 1 | # Enabling cross\-origin resource sharing \(CORS\) using the REST API 2 | 3 | To set a CORS configuration on your bucket, you can use the AWS Management Console\. If your application requires it, you can also send REST requests directly\. The following sections in the *Amazon Simple Storage Service API Reference* describe the REST API actions related to the CORS configuration: 4 | + [PutBucketCors](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTcors.html) 5 | + [GetBucketCors](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETcors.html) 6 | + [DeleteBucketCors](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketDELETEcors.html) 7 | + [OPTIONS object](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html) -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## This guide has been archived 2 | This guide has been archived. Please see https://github.com/awsdocs/amazon-s3-userguide which combines information from the three retired Amazon S3 guides: Amazon S3 Developer Guide, Console User Guide, and Getting Started Guide. 3 | 4 | ## Amazon S3 Devloper Guide 5 | 6 | The open source version of the Amazon S3 Devloper Guide. You can submit feedback & requests for changes by submitting issues in this repo or by making proposed changes & submitting a pull request. 7 | 8 | ## License Summary 9 | 10 | The documentation is made available under the Creative Commons Attribution-ShareAlike 4.0 International License. See the LICENSE file. 11 | 12 | The sample code within this documentation is made available under a modified MIT license. See the LICENSE-SAMPLECODE file. 13 | -------------------------------------------------------------------------------- /doc_source/storage_lens_view_metrics_export.md: -------------------------------------------------------------------------------- 1 | # Viewing Amazon S3 Storage Lens metrics using a data export 2 | 3 | Amazon S3 Storage Lens metrics are generated daily in CSV or Apache Parquet\-formatted metrics export files and placed in an S3 bucket in your account\. From there, you can ingest the metrics export into the analytics tools of your choice, such as Amazon QuickSight and Amazon Athena, where you can analyze storage usage and activity trends\. 4 | 5 | **Topics** 6 | + [Using an AWS KMS CMK to encrypt your metrics exports](storage_lens_encrypt_permissions.md) 7 | + [What is an S3 Storage Lens export manifest?](storage_lens_whatis_metrics_export_manifest.md) 8 | + [Understanding the Amazon S3 Storage Lens export schema](storage_lens_understanding_metrics_export_schema.md) -------------------------------------------------------------------------------- /doc_source/hosting-websites-on-s3-examples.md: -------------------------------------------------------------------------------- 1 | # Example walkthroughs \- Hosting websites on Amazon S3 2 | 3 | This section provides two examples\. In the first example, you configure a bucket for website hosting, upload a sample index document, and test the website using the Amazon S3 website endpoint for the bucket\. The second example shows how you can use your own domain, such as `example.com`, instead of the Amazon S3 bucket website endpoint, and serve content from an Amazon S3 bucket configured as a website\. The example also shows how Amazon S3 offers the root domain support\. 4 | 5 | **Topics** 6 | + [Configuring a static website](HostingWebsiteOnS3Setup.md) 7 | + [Configuring a static website using a custom domain registered with Route 53](website-hosting-custom-domain-walkthrough.md) -------------------------------------------------------------------------------- /doc_source/S3OutpostsJavaExamples.md: -------------------------------------------------------------------------------- 1 | # Amazon S3 on Outposts examples using the SDK for Java 2 | 3 | With Amazon S3 on Outposts, you can create S3 buckets on your AWS Outposts and easily store and retrieve objects on\-premises for applications that require local data access, local data processing, and data residency\. You can use S3 on Outposts through the AWS Management Console, SDK for Java, AWS SDKs, or REST API\. For more information, see [Using Amazon S3 on Outposts](https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html)\. 4 | 5 | The following examples show how you can use S3 on Outposts with the AWS SDK for Java\. 6 | 7 | **Topics** 8 | + [Creating and managing Amazon S3 on Outposts bucket](S3OutpostsBucketJava.md) 9 | + [Working with objects using Amazon S3 on Outposts](S3OutpostsObjectJava.md) -------------------------------------------------------------------------------- /doc_source/configure-requester-pays-console.md: -------------------------------------------------------------------------------- 1 | # Configure Requester Pays by using the Amazon S3 console 2 | 3 | You can configure a bucket for Requester Pays by using the Amazon S3 console\. 4 | 5 | **To enable Requester Pays for an S3 bucket** 6 | 7 | 1. Sign in to the AWS Management Console and open the Amazon S3 console at [https://console\.aws\.amazon\.com/s3/](https://console.aws.amazon.com/s3/)\. 8 | 9 | 1. In the **Buckets** list, choose the name of the bucket that you want to enable Requester Pays for\. 10 | 11 | 1. Choose **Properties**\. 12 | 13 | 1. Under **Requester pays**, choose **Edit**\. 14 | 15 | 1. Choose **Enable**, and choose **Save changes**\. 16 | 17 | Amazon S3 enables Requester Pays for your bucket and displays your **Bucket overview**\. Under **Requester pays**, you see **Enabled** -------------------------------------------------------------------------------- /doc_source/using-mobile-sdks.md: -------------------------------------------------------------------------------- 1 | # Using the AWS Mobile SDKs for iOS and Android 2 | 3 | You can use the AWS Mobile SDKs for [Android](https://aws-amplify.github.io/docs/android/storage) and [iOS](https://aws-amplify.github.io/docs/ios/storage) to quickly and easily integrate robust cloud backends into your existing mobile apps\. You can configure and use features like user sign\-in, databases, push notifications, and more, without being an AWS expert\. 4 | 5 | The AWS Mobile SDKs provide easy access to Amazon S3 and many other AWS services\. To get started using the AWS Mobile SDKs, see [Getting Started with the AWS Mobile SDKs](https://docs.aws.amazon.com/aws-mobile/latest/developerguide/getting-started.html)\. 6 | 7 | ## More Info 8 | 9 | [Using the AWS Amplify JavaScript Library ](using-aws-amplify.md) -------------------------------------------------------------------------------- /doc_source/usingLLmpuPHP.md: -------------------------------------------------------------------------------- 1 | # Using the AWS PHP SDK for multipart upload \(low\-level API\) 2 | 3 | **Topics** 4 | + [Upload a file in multiple parts using the PHP SDK low\-level API](LLuploadFilePHP.md) 5 | + [List multipart uploads using the low\-level AWS SDK for PHP API](LLlistMPuploadsPHP.md) 6 | + [Abort a multipart upload](LLAbortMPUphp.md) 7 | 8 | The AWS SDK for PHP exposes a low\-level API that closely resembles the Amazon S3 REST API for multipart upload \(see [Using the REST API for multipart upload](UsingRESTAPImpUpload.md) \)\. Use the low\-level API when you need to pause and resume multipart uploads, vary part sizes during the upload, or if you do not know the size of the data in advance\. Use the AWS SDK for PHP high\-level abstractions \(see [Using the AWS PHP SDK for multipart upload](usingHLmpuPHP.md)\) whenever you don't have these requirements\. -------------------------------------------------------------------------------- /doc_source/s3-glacier-select-sql-reference-aggregate.md: -------------------------------------------------------------------------------- 1 | # Aggregate Functions \(Amazon S3 Select only\) 2 | 3 | Amazon S3 Select supports the following aggregate functions\. 4 | 5 | **Note** 6 | S3 Glacier Select does not support aggregate functions\. 7 | 8 | 9 | | Function | Argument Type | Return Type | 10 | | --- | --- | --- | 11 | | AVG\(expression\) | INT, FLOAT, DECIMAL | DECIMAL for an INT argument, FLOAT for a floating\-point argument; otherwise the same as the argument data type\. | 12 | | COUNT | \- | INT | 13 | | MAX\(expression\) | INT, DECIMAL | Same as the argument type\. | 14 | | MIN\(expression\) | INT, DECIMAL | Same as the argument type\. | 15 | | SUM\(expression\) | INT, FLOAT, DOUBLE, DECIMAL | INT for INT argument, FLOAT for a floating\-point argument; otherwise, the same as the argument data type\. | -------------------------------------------------------------------------------- /doc_source/MakingAuthenticatedRequests.md: -------------------------------------------------------------------------------- 1 | # Making requests using the AWS SDKs 2 | 3 | **Topics** 4 | + [Making requests using AWS account or IAM user credentials](AuthUsingAcctOrUserCredentials.md) 5 | + [Making requests using IAM user temporary credentials](AuthUsingTempSessionToken.md) 6 | + [Making requests using federated user temporary credentials](AuthUsingTempFederationToken.md) 7 | 8 | You can send authenticated requests to Amazon S3 using either the AWS SDK or by making the REST API calls directly in your application\. The AWS SDK API uses the credentials that you provide to compute the signature for authentication\. If you use the REST API directly in your applications, you must write the necessary code to compute the signature for authenticating your request\. For a list of available AWS SDKs go to, [Sample Code and Libraries](https://aws.amazon.com/code/)\. -------------------------------------------------------------------------------- /doc_source/UploadInSingleOp.md: -------------------------------------------------------------------------------- 1 | # Uploading an object in a single operation 2 | 3 | **Topics** 4 | + [Upload an object Using the AWS SDK for Java](UploadObjSingleOpJava.md) 5 | + [Upload an object using the AWS SDK for \.NET](UploadObjSingleOpNET.md) 6 | + [Upload an object using the AWS SDK for C\+\+](UploadObjSingleCpp.md) 7 | + [Upload an object using the AWS SDK for PHP](UploadObjSingleOpPHP.md) 8 | + [Upload an object using the AWS SDK for Ruby](UploadObjSingleOpRuby.md) 9 | + [Upload an object using the REST API](UploadObjSingleOpREST.md) 10 | + [Upload an object using the CLI](UploadObjSingleOpCLI.md) 11 | 12 | You can use the AWS SDK to upload objects\. The SDK provides wrapper libraries for you to upload data easily\. However, if your application requires it, you can use the REST API directly in your application\. You can also use the AWS Command Line Interface\. -------------------------------------------------------------------------------- /doc_source/AuthUsingTempSessionToken.md: -------------------------------------------------------------------------------- 1 | # Making requests using IAM user temporary credentials 2 | 3 | **Topics** 4 | + [Making requests using IAM user temporary credentials \- AWS SDK for Java](AuthUsingTempSessionTokenJava.md) 5 | + [Making requests using IAM user temporary credentials \- AWS SDK for \.NET](AuthUsingTempSessionTokenDotNet.md) 6 | + [Making requests using AWS account or IAM user temporary credentials \- AWS SDK for PHP](AuthUsingTempSessionTokenPHP.md) 7 | + [Making requests using IAM user temporary credentials \- AWS SDK for Ruby](AuthUsingTempSessionTokenRuby.md) 8 | 9 | An AWS Account or an IAM user can request temporary security credentials and use them to send authenticated requests to Amazon S3\. This section provides examples of how to use the AWS SDK for Java, \.NET, and PHP to obtain temporary security credentials and use them to authenticate your requests to Amazon S3\. -------------------------------------------------------------------------------- /doc_source/managing-acls.md: -------------------------------------------------------------------------------- 1 | # Managing ACLs 2 | 3 | There are several ways you can add grants to your resource ACL\. You can use the AWS Management Console, which provides a UI to manage permissions without writing any code\. You can also use the AWS Command Line Interface \(CLI\), REST API, or one of the AWS SDKs\. These libraries further simplify your programming tasks\. 4 | 5 | **Topics** 6 | + [Managing ACLs in the AWS Management Console](manage-acls-using-console.md) 7 | + [Managing ACLs Using the AWS SDK for Java](acl-using-java-sdk.md) 8 | + [Managing ACLs Using the AWS SDK for \.NET](acl-using-dot-net-sdk.md) 9 | + [Managing ACLs Using the REST API](acl-using-rest-api.md) 10 | 11 | For more information about managing ACLs using the AWS CLI, see [put\-bucket\-acl](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/s3api/put-bucket-acl.html) in the *AWS CLI Command Reference*\. -------------------------------------------------------------------------------- /code_examples/php_examples/S3examples/s3-upload-sse.php: -------------------------------------------------------------------------------- 1 | 'latest', 17 | 'region' => 'us-east-1' 18 | ]); 19 | 20 | // Upload a file with server-side encryption. 21 | $result = $s3->putObject([ 22 | 'Bucket' => $bucket, 23 | 'Key' => $keyname, 24 | 'SourceFile' => $filepath, 25 | 'ServerSideEncryption' => 'AES256', 26 | ]); 27 | -------------------------------------------------------------------------------- /doc_source/HowDoIWebsiteConfiguration.md: -------------------------------------------------------------------------------- 1 | # Configuring a bucket as a static website using the AWS Management Console 2 | 3 | Using the AWS Management Console, you can configure your Amazon S3 bucket as a static website without writing any code\. Depending on your website requirements, you can also use some optional configurations, including redirects, web traffic logging, and custom error documents\. 4 | 5 | **Required configurations:** 6 | + [Enabling website hosting](EnableWebsiteHosting.md) 7 | + [Configuring an index document](IndexDocumentSupport.md) 8 | + [Setting permissions for website access](WebsiteAccessPermissionsReqd.md) 9 | 10 | **Optional configurations:** 11 | + [\(Optional\) Configuring a custom error document](CustomErrorDocSupport.md) 12 | + [\(Optional\) Configuring a webpage redirect](how-to-page-redirect.md) 13 | + [\(Optional\) Logging web traffic](LoggingWebsiteTraffic.md) -------------------------------------------------------------------------------- /doc_source/S3OutpostsExamples.md: -------------------------------------------------------------------------------- 1 | # Amazon S3 on Outposts examples 2 | 3 | With Amazon S3 on Outposts, you can create S3 buckets on your AWS Outposts and easily store and retrieve objects on\-premises for applications that require local data access, local data processing, and data residency\. You can use S3 on Outposts through the AWS Management Console, AWS CLI, AWS SDKs, or REST API\. For more information, see [Using Amazon S3 on Outposts](https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html)\. 4 | 5 | This section contains the following examples of creating, managing Outposts buckets and performing object operations with S3 on Outposts\. In the examples, replace any variable values with those that suit your needs\. 6 | 7 | **Topics** 8 | + [Amazon S3 on Outposts examples using the AWS CLI](S3OutpostsCLIExamples.md) 9 | + [Amazon S3 on Outposts examples using the SDK for Java](S3OutpostsJavaExamples.md) -------------------------------------------------------------------------------- /doc_source/VersionSuspendedBehavior.md: -------------------------------------------------------------------------------- 1 | # Managing objects in a versioning\-suspended bucket 2 | 3 | **Topics** 4 | + [Adding objects to versioning\-suspended buckets](AddingObjectstoVersionSuspendedBuckets.md) 5 | + [Retrieving objects from versioning\-suspended buckets](RetrievingObjectsfromVersioningSuspendedBuckets.md) 6 | + [Deleting objects from versioning\-suspended buckets](DeletingObjectsfromVersioningSuspendedBuckets.md) 7 | 8 | You suspend versioning to stop accruing new versions of the same object in a bucket\. You might do this because you only want a single version of an object in a bucket, or you might not want to accrue charges for multiple versions\. 9 | 10 | When you suspend versioning, existing objects in your bucket do not change\. What changes is how Amazon S3 handles objects in future requests\. The topics in this section explain various object operations in a versioning\-suspended bucket\. -------------------------------------------------------------------------------- /doc_source/DeletingOneObject.md: -------------------------------------------------------------------------------- 1 | # Deleting one object per request 2 | 3 | **Topics** 4 | + [Deleting an object using the AWS SDK for Java](DeletingOneObjectUsingJava.md) 5 | + [Deleting an object using the AWS SDK for \.NET](DeletingOneObjectUsingNetSDK.md) 6 | + [Deleting an object using the AWS SDK for PHP](DeletingOneObjectUsingPHPSDK.md) 7 | + [Deleting an object using the REST API](DeletingAnObjectsUsingREST.md) 8 | + [Deleting an Object Using the AWS Command Line Interface](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) 9 | 10 | To delete one object per request, use the `DELETE` API \(see [DELETE Object](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html)\)\. To learn more about object deletion, see [Deleting objects](DeletingObjects.md)\. 11 | 12 | You can use either the REST API directly or the wrapper libraries provided by the AWS SDKs that simplify application development\. -------------------------------------------------------------------------------- /code_examples/php_examples/S3examples/s3-downloading-object.php: -------------------------------------------------------------------------------- 1 | 'latest', 15 | 'region' => 'us-east-1' 16 | ]); 17 | 18 | try { 19 | // Get the object. 20 | $result = $s3->getObject([ 21 | 'Bucket' => $bucket, 22 | 'Key' => $keyname 23 | ]); 24 | 25 | // Display the object in the browser. 26 | header("Content-Type: {$result['ContentType']}"); 27 | echo $result['Body']; 28 | } catch (S3Exception $e) { 29 | echo $e->getMessage() . PHP_EOL; 30 | } 31 | -------------------------------------------------------------------------------- /doc_source/enable-logging-console.md: -------------------------------------------------------------------------------- 1 | # Enabling logging using the console 2 | 3 | For information about enabling [Amazon S3 server access logging](ServerLogs.md) in the [AWS Management Console](https://console.aws.amazon.com/s3/), see [ How Do I Enable Server Access Logging for an S3 Bucket?](https://docs.aws.amazon.com/AmazonS3/latest/user-guide/server-access-logging.html) in the *Amazon Simple Storage Service Console User Guide*\. 4 | 5 | When you enable logging on a bucket, the console both enables logging on the source bucket and adds a grant in the target bucket's access control list \(ACL\) granting write permission to the Log Delivery group\. 6 | 7 | For information about how to enable logging programmatically, see [Enabling logging programmatically](enable-logging-programming.md)\. 8 | 9 | For information about the log record format, including the list of fields and their descriptions, see [Amazon S3 Server Access Log Format](LogFormat.md)\. -------------------------------------------------------------------------------- /doc_source/vulnerability-analysis-and-management.md: -------------------------------------------------------------------------------- 1 | # Configuration and vulnerability analysis in Amazon S3 2 | 3 | AWS handles basic security tasks like guest operating system \(OS\) and database patching, firewall configuration, and disaster recovery\. These procedures have been reviewed and certified by the appropriate third parties\. For more details, see the following resources: 4 | + [Compliance Validation for Amazon S3](s3-compliance.md) 5 | + [Shared Responsibility Model](https://aws.amazon.com/compliance/shared-responsibility-model/) 6 | + [Amazon Web Services: Overview of Security Processes](https://d0.awsstatic.com/whitepapers/Security/AWS_Security_Whitepaper.pdf) 7 | 8 | The following security best practices also address configuration and vulnerability analysis in Amazon S3: 9 | + [Identify and audit all your Amazon S3 buckets](security-best-practices.md#audit) 10 | + [Enable AWS Config](security-best-practices.md#config) -------------------------------------------------------------------------------- /code_examples/php_examples/S3examples/s3-specify-sse.php: -------------------------------------------------------------------------------- 1 | 'latest', 17 | 'region' => 'us-east-1' 18 | ]); 19 | 20 | // Copy an object and add server-side encryption. 21 | $s3->copyObject([ 22 | 'Bucket' => $targetBucket, 23 | 'Key' => $targetKeyname, 24 | 'CopySource' => "{$sourceBucket}/{$sourceKeyname}", 25 | 'ServerSideEncryption' => 'AES256', 26 | ]); 27 | -------------------------------------------------------------------------------- /code_examples/php_examples/S3examples/s3-copy-object-sse.php: -------------------------------------------------------------------------------- 1 | 'latest', 17 | 'region' => 'us-east-1' 18 | ]); 19 | 20 | // Copy an object and add server-side encryption. 21 | $s3->copyObject([ 22 | 'Bucket' => $targetBucket, 23 | 'Key' => $targetKeyname, 24 | 'CopySource' => "{$sourceBucket}/{$sourceKeyname}", 25 | 'ServerSideEncryption' => 'AES256', 26 | ]); 27 | -------------------------------------------------------------------------------- /LICENSE-SAMPLECODE: -------------------------------------------------------------------------------- 1 | Copyright ${THIS_YEAR} Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this 4 | software and associated documentation files (the "Software"), to deal in the Software 5 | without restriction, including without limitation the rights to use, copy, modify, 6 | merge, publish, distribute, sublicense, and/or sell copies of the Software, and to 7 | permit persons to whom the Software is furnished to do so. 8 | 9 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, 10 | INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 11 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT 12 | HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 13 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 14 | SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 15 | -------------------------------------------------------------------------------- /doc_source/replication-additional-configs.md: -------------------------------------------------------------------------------- 1 | # Additional replication configurations 2 | 3 | This section describes additional replication configuration options that are available in Amazon S3\. For information about core replication configuration, see [Overview of setting up replication](replication-how-setup.md)\. 4 | 5 | **Topics** 6 | + [Monitoring progress with replication metrics and Amazon S3 event notifications](replication-metrics.md) 7 | + [Meeting compliance requirements using S3 Replication Time Control \(S3 RTC\)](replication-time-control.md) 8 | + [Replicating delete markers between buckets](delete-marker-replication.md) 9 | + [Replicating metadata changes with Amazon S3 replica modification sync](replication-for-metadata-changes.md) 10 | + [Changing the replica owner](replication-change-owner.md) 11 | + [Replicating objects created with server\-side encryption \(SSE\) using encryption keys stored in AWS KMS](replication-config-for-kms-objects.md) -------------------------------------------------------------------------------- /code_examples/php_examples/S3examples/s3-uploading-object.php: -------------------------------------------------------------------------------- 1 | 'latest', 15 | 'region' => 'us-east-1' 16 | ]); 17 | 18 | try { 19 | // Upload data. 20 | $result = $s3->putObject([ 21 | 'Bucket' => $bucket, 22 | 'Key' => $keyname, 23 | 'Body' => 'Hello, world!', 24 | 'ACL' => 'public-read' 25 | ]); 26 | 27 | // Print the URL to the object. 28 | echo $result['ObjectURL'] . PHP_EOL; 29 | } catch (S3Exception $e) { 30 | echo $e->getMessage() . PHP_EOL; 31 | } 32 | -------------------------------------------------------------------------------- /doc_source/manage-objects-versioned-bucket.md: -------------------------------------------------------------------------------- 1 | # Managing objects in a versioning\-enabled bucket 2 | 3 | **Topics** 4 | + [Adding objects to versioning\-enabled buckets](AddingObjectstoVersioningEnabledBuckets.md) 5 | + [Listing objects in a versioning\-enabled bucket](list-obj-version-enabled-bucket.md) 6 | + [Retrieving object versions](RetrievingObjectVersions.md) 7 | + [Deleting object versions](DeletingObjectVersions.md) 8 | + [Transitioning object versions](transitioning-object-versions.md) 9 | + [Restoring previous versions](RestoringPreviousVersions.md) 10 | + [Versioned object permissions](VersionedObjectPermissionsandACLs.md) 11 | 12 | Objects that are stored in your bucket before you set the versioning state have a version ID of `null`\. When you enable versioning, existing objects in your bucket do not change\. What changes is how Amazon S3 handles the objects in future requests\. The topics in this section explain various object operations in a versioning\-enabled bucket\. -------------------------------------------------------------------------------- /doc_source/usingHLmpuDotNet.md: -------------------------------------------------------------------------------- 1 | # Using the AWS SDK for \.NET for multipart upload \(high\-level API\) 2 | 3 | **Topics** 4 | + [Upload a file to an S3 bucket using the AWS SDK for \.NET \(high\-level API\)](HLuploadFileDotNet.md) 5 | + [Upload a directory](HLuploadDirDotNet.md) 6 | + [Stop multipart uploads to an S3 Bucket using the AWS SDK for \.NET \(high\-level API\)](HLAbortDotNet.md) 7 | + [Track the progress of a multipart upload to an S3 Bucket using the AWS SDK for \.NET \(high\-level API\)](HLTrackProgressMPUDotNet.md) 8 | 9 | The AWS SDK for \.NET exposes a high\-level API that simplifies multipart uploads \(see [Uploading objects using multipart upload API](uploadobjusingmpu.md)\)\. You can upload data from a file, a directory, or a stream\. For more information about Amazon S3 multipart uploads, see [Multipart upload overview](mpuoverview.md)\. 10 | 11 | The `TransferUtility` class provides a methods for uploading files and directories, tracking upload progress, and stopping multipart uploads\. -------------------------------------------------------------------------------- /doc_source/using-iam-policies.md: -------------------------------------------------------------------------------- 1 | # Using Bucket Policies and User Policies 2 | 3 | Bucket policies and user policies are two access policy options available for granting permission to your Amazon S3 resources\. Both use JSON\-based access policy language\. 4 | 5 | The topics in this section describe the key policy language elements, with emphasis on Amazon S3–specific details, and provide example bucket and user policies\. 6 | 7 | **Important** 8 | Bucket policies are limited to 20 KB in size\. 9 | We recommend that you first review the introductory topics that explain the basic concepts and options available for you to manage access to your Amazon S3 resources\. For more information, see [Introduction to managing access to Amazon S3 resources](s3-access-control.md#intro-managing-access-s3-resources)\. 10 | 11 | **Topics** 12 | + [Policies and Permissions in Amazon S3](access-policy-language-overview.md) 13 | + [Bucket Policy Examples](example-bucket-policies.md) 14 | + [User Policy Examples](example-policies-s3.md) -------------------------------------------------------------------------------- /doc_source/DNSConsiderations.md: -------------------------------------------------------------------------------- 1 | # DNS considerations 2 | 3 | One of the design requirements of Amazon S3 is extremely high availability\. One of the ways we meet this requirement is by updating the IP addresses associated with the Amazon S3 endpoint in DNS as needed\. These changes are automatically reflected in short\-lived clients, but not in some long\-lived clients\. Long\-lived clients will need to take special action to re\-resolve the Amazon S3 endpoint periodically to benefit from these changes\. For more information about virtual machines \(VMs\), refer to the following: 4 | + For Java, Sun's JVM caches DNS lookups forever by default; go to the "InetAddress Caching" section of [the InetAddress documentation](https://docs.oracle.com/javase/9/docs/api/java/net/InetAddress.html) for information on how to change this behavior\. 5 | + For PHP, the persistent PHP VM that runs in the most popular deployment configurations caches DNS lookups until the VM is restarted\. Go to [the getHostByName PHP docs\.](http://us2.php.net/manual/en/function.gethostbyname.php) -------------------------------------------------------------------------------- /doc_source/using-aws-amplify.md: -------------------------------------------------------------------------------- 1 | # Using the AWS Amplify JavaScript Library 2 | 3 | AWS Amplify is an open source JavaScript library for web and mobile developers who build cloud\-enabled applications\. AWS Amplify provides customizable UI components and a declarative interface to work with an S3 bucket, along with other high\-level categories for AWS services\. 4 | 5 | To get started using the AWS Amplify JavaScript library, choose one of the following links: 6 | + [Getting Started with the AWS Amplify Library for the Web](https://docs.aws.amazon.com/aws-mobile/latest/developerguide/web-getting-started.html) 7 | + [Getting Started with the AWS Amplify Library for React Native ](https://docs.aws.amazon.com/aws-mobile/latest/developerguide/react-native-getting-started.html) 8 | 9 | For more information about AWS Amplify, see [AWS Amplify](https://github.com/aws/aws-amplify) on [GitHub](https://github.com/about)\. 10 | 11 | ## More Info 12 | 13 | [Using the AWS Mobile SDKs for iOS and Android ](using-mobile-sdks.md) -------------------------------------------------------------------------------- /code_examples/php_examples/S3examples/s3-request-aws.php: -------------------------------------------------------------------------------- 1 | 'us-east-1', 15 | 'version' => 'latest', 16 | ]); 17 | 18 | // Retrieve the list of buckets. 19 | $result = $s3->listBuckets(); 20 | 21 | try { 22 | // Retrieve a paginator for listing objects. 23 | $objects = $s3->getPaginator('ListObjects', [ 24 | 'Bucket' => $bucket 25 | ]); 26 | 27 | echo "Keys retrieved!" . PHP_EOL; 28 | 29 | // Print the list of objects to the page. 30 | foreach ($objects as $object) { 31 | echo $object['Key'] . PHP_EOL; 32 | } 33 | } catch (S3Exception $e) { 34 | echo $e->getMessage() . PHP_EOL; 35 | } 36 | -------------------------------------------------------------------------------- /code_examples/php_examples/S3examples/s3-multipart-upload-using-multipartuploader.php: -------------------------------------------------------------------------------- 1 | 'latest', 16 | 'region' => 'us-east-1' 17 | ]); 18 | 19 | // Prepare the upload parameters. 20 | $uploader = new MultipartUploader($s3, '/path/to/large/file.zip', [ 21 | 'bucket' => $bucket, 22 | 'key' => $keyname 23 | ]); 24 | 25 | // Perform the upload. 26 | try { 27 | $result = $uploader->upload(); 28 | echo "Upload complete: {$result['ObjectURL']}" . PHP_EOL; 29 | } catch (MultipartUploadException $e) { 30 | echo $e->getMessage() . PHP_EOL; 31 | } 32 | -------------------------------------------------------------------------------- /code_examples/dotnet_examples/S3Examples.sln: -------------------------------------------------------------------------------- 1 | 2 | Microsoft Visual Studio Solution File, Format Version 12.00 3 | # Visual Studio 14 4 | VisualStudioVersion = 14.0.25420.1 5 | MinimumVisualStudioVersion = 10.0.40219.1 6 | Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "S3Examples", "S3Examples\S3Examples.csproj", "{6272109B-98DC-4FC0-B821-BADB3B51496B}" 7 | EndProject 8 | Global 9 | GlobalSection(SolutionConfigurationPlatforms) = preSolution 10 | Debug|Any CPU = Debug|Any CPU 11 | Release|Any CPU = Release|Any CPU 12 | EndGlobalSection 13 | GlobalSection(ProjectConfigurationPlatforms) = postSolution 14 | {6272109B-98DC-4FC0-B821-BADB3B51496B}.Debug|Any CPU.ActiveCfg = Debug|Any CPU 15 | {6272109B-98DC-4FC0-B821-BADB3B51496B}.Debug|Any CPU.Build.0 = Debug|Any CPU 16 | {6272109B-98DC-4FC0-B821-BADB3B51496B}.Release|Any CPU.ActiveCfg = Release|Any CPU 17 | {6272109B-98DC-4FC0-B821-BADB3B51496B}.Release|Any CPU.Build.0 = Release|Any CPU 18 | EndGlobalSection 19 | GlobalSection(SolutionProperties) = preSolution 20 | HideSolutionNode = FALSE 21 | EndGlobalSection 22 | EndGlobal 23 | -------------------------------------------------------------------------------- /doc_source/AuthUsingTempFederationToken.md: -------------------------------------------------------------------------------- 1 | # Making requests using federated user temporary credentials 2 | 3 | You can request temporary security credentials and provide them to your federated users or applications who need to access your AWS resources\. This section provides examples of how you can use the AWS SDK to obtain temporary security credentials for your federated users or applications and send authenticated requests to Amazon S3 using those credentials\. For a list of available AWS SDKs, see [Sample Code and Libraries](https://aws.amazon.com/code/)\. 4 | 5 | **Note** 6 | Both the AWS account and an IAM user can request temporary security credentials for federated users\. However, for added security, only an IAM user with the necessary permissions should request these temporary credentials to ensure that the federated user gets at most the permissions of the requesting IAM user\. In some applications, you might find it suitable to create an IAM user with specific permissions for the sole purpose of granting temporary security credentials to your federated users and applications\. -------------------------------------------------------------------------------- /doc_source/usingLLmpuDotNet.md: -------------------------------------------------------------------------------- 1 | # Using the AWS SDK for \.NET for multipart upload \(low\-level API\) 2 | 3 | The AWS SDK for \.NET exposes a low\-level API that closely resembles the Amazon S3 REST API for multipart upload \(see [Using the REST API for multipart upload](UsingRESTAPImpUpload.md) \)\. Use the low\-level API when you need to pause and resume multipart uploads, vary part sizes during the upload, or when you do not know the size of the data in advance\. Use the high\-level API \(see [Using the AWS SDK for \.NET for multipart upload \(high\-level API\)](usingHLmpuDotNet.md)\), whenever you don't have these requirements\. 4 | 5 | **Topics** 6 | + [Upload a file to an S3 Bucket using the AWS SDK for \.NET \(low\-level API\)](LLuploadFileDotNet.md) 7 | + [List multipart uploads to an S3 Bucket using the AWS SDK for \.NET \(low\-level\)](LLlistMPuploadsDotNet.md) 8 | + [Track the progress of a multipart upload to an S3 Bucket using the AWS SDK for \.NET \(low\-level\)](LLTrackProgressMPUNet.md) 9 | + [Stop multipart uploads to an S3 Bucket using the AWS SDK for \.NET \(low\-level\)](LLAbortMPUnet.md) -------------------------------------------------------------------------------- /doc_source/UsingEncryption.md: -------------------------------------------------------------------------------- 1 | # Protecting data using encryption 2 | 3 | Data protection refers to protecting data while in\-transit \(as it travels to and from Amazon S3\) and at rest \(while it is stored on disks in Amazon S3 data centers\)\. You can protect data in transit using Secure Socket Layer/Transport Layer Security \(SSL/TLS\) or client\-side encryption\. You have the following options for protecting data at rest in Amazon S3: 4 | + **Server\-Side Encryption** – Request Amazon S3 to encrypt your object before saving it on disks in its data centers and then decrypt it when you download the objects\. 5 | + **Client\-Side Encryption** – Encrypt data client\-side and upload the encrypted data to Amazon S3\. In this case, you manage the encryption process, the encryption keys, and related tools\. 6 | 7 | For more information about server\-side encryption and client\-side encryption, review the topics listed below\. 8 | 9 | **Topics** 10 | + [Protecting data using server\-side encryption](serv-side-encryption.md) 11 | + [Protecting data using client\-side encryption](UsingClientSideEncryption.md) -------------------------------------------------------------------------------- /code_examples/php_examples/S3examples/s3-bucket-website-configuration.php: -------------------------------------------------------------------------------- 1 | 'latest', 13 | 'region' => 'us-east-1' 14 | ]); 15 | 16 | 17 | // Add the website configuration. 18 | $s3->putBucketWebsite([ 19 | 'Bucket' => $bucket, 20 | 'WebsiteConfiguration' => [ 21 | 'IndexDocument' => ['Suffix' => 'index.html'], 22 | 'ErrorDocument' => ['Key' => 'error.html'] 23 | ] 24 | ]); 25 | 26 | // Retrieve the website configuration. 27 | $result = $s3->getBucketWebsite([ 28 | 'Bucket' => $bucket 29 | ]); 30 | echo $result->getPath('IndexDocument/Suffix'); 31 | 32 | // Delete the website configuration. 33 | $s3->deleteBucketWebsite([ 34 | 'Bucket' => $bucket 35 | ]); 36 | -------------------------------------------------------------------------------- /code_examples/php_examples/S3examples/s3-deleting-multi-objects-nonversioned.php: -------------------------------------------------------------------------------- 1 | 'latest', 13 | 'region' => 'us-east-1' 14 | ]); 15 | 16 | // 1. Create a few objects. 17 | for ($i = 1; $i <= 3; $i++) { 18 | $s3->putObject([ 19 | 'Bucket' => $bucket, 20 | 'Key' => "key{$i}", 21 | 'Body' => "content {$i}", 22 | ]); 23 | } 24 | 25 | // 2. List the objects and get the keys. 26 | $keys = $s3->listObjects([ 27 | 'Bucket' => $bucket 28 | ]) ->getPath('Contents/*/Key'); 29 | 30 | // 3. Delete the objects. 31 | $s3->deleteObjects([ 32 | 'Bucket' => $bucket, 33 | 'Delete' => [ 34 | 'Objects' => array_map(function ($key) { 35 | return ['Key' => $key]; 36 | }, $keys) 37 | ], 38 | ]); 39 | -------------------------------------------------------------------------------- /doc_source/batch-ops-examples.md: -------------------------------------------------------------------------------- 1 | # S3 Batch Operations examples 2 | 3 | You can use S3 Batch Operations to perform large\-scale S3 Batch Operations on billions of S3 objects containing exabytes of data\. You can use the AWS Management Console, AWS Command Line Interface \(AWS CLI\), AWS SDKs, or REST API to manage your Batch Operations jobs\. 4 | 5 | This section contains the following examples of creating and managing Batch Operations jobs in Amazon S3\. In the examples, replace any variable values with those that suit your needs\. 6 | 7 | **Topics** 8 | + [S3 Batch Operations examples using the AWS CLI](batch-ops-examples-cli.md) 9 | + [S3 Batch Operations examples using the AWS SDK for Java](batch-ops-examples-java.md) 10 | + [Example: Using job tags to control permissions for S3 Batch Operations](batch-ops-job-tags-examples.md) 11 | + [Example: Requesting S3 Batch Operations completion reports](batch-ops-examples-reports.md) 12 | + [Example: Copying objects across AWS accounts using S3 Batch Operations](batch-ops-examples-xcopy.md) 13 | + [Example: Tracking an S3 Batch Operations job in Amazon EventBridge through AWS CloudTrail](batch-ops-examples-event-bridge-cloud-trail.md) -------------------------------------------------------------------------------- /doc_source/BucketPayerValues.md: -------------------------------------------------------------------------------- 1 | # Retrieving the requestPayment Configuration 2 | 3 | You can determine the `Payer` value that is set on a bucket by requesting the resource `requestPayment`\. 4 | 5 | **To return the requestPayment resource** 6 | + Use a GET request to obtain the `requestPayment` resource, as shown in the following request\. 7 | 8 | ``` 9 | 1. GET ?requestPayment HTTP/1.1 10 | 2. Host: [BucketName].s3.amazonaws.com 11 | 3. Date: Wed, 01 Mar 2009 12:00:00 GMT 12 | 4. Authorization: AWS [Signature] 13 | ``` 14 | 15 | If the request succeeds, Amazon S3 returns a response similar to the following\. 16 | 17 | ``` 18 | 1. HTTP/1.1 200 OK 19 | 2. x-amz-id-2: [id] 20 | 3. x-amz-request-id: [request_id] 21 | 4. Date: Wed, 01 Mar 2009 12:00:00 GMT 22 | 5. Content-Type: [type] 23 | 6. Content-Length: [length] 24 | 7. Connection: close 25 | 8. Server: AmazonS3 26 | 9. 27 | 10. 28 | 11. 29 | 12. Requester 30 | 13. 31 | ``` 32 | 33 | This response shows that the `payer` value is set to `Requester`\. -------------------------------------------------------------------------------- /doc_source/s3-glacier-select-sql-reference-conversion.md: -------------------------------------------------------------------------------- 1 | # Conversion Functions 2 | 3 | Amazon S3 Select and S3 Glacier Select support the following conversion functions\. 4 | 5 | **Topics** 6 | + [CAST](#s3-glacier-select-sql-reference-cast) 7 | 8 | ## CAST 9 | 10 | The `CAST` function converts an entity, such as an expression that evaluates to a single value, from one type to another\. 11 | 12 | ### Syntax 13 | 14 | ``` 15 | CAST ( expression AS data_type ) 16 | ``` 17 | 18 | ### Parameters 19 | 20 | *expression* 21 | A combination of one or more values, operators, and SQL functions that evaluate to a value\. 22 | 23 | *data\_type* 24 | The target data type, such as `INT`, to cast the expression to\. For a list of supported data types, see [Data Types](s3-glacier-select-sql-reference-data-types.md)\. 25 | 26 | ### Examples 27 | 28 | ``` 29 | CAST('2007-04-05T14:30Z' AS TIMESTAMP) 30 | CAST(0.456 AS FLOAT) 31 | ``` -------------------------------------------------------------------------------- /doc_source/UsingRESTAPImpUpload.md: -------------------------------------------------------------------------------- 1 | # Using the REST API for multipart upload 2 | 3 | The following sections in the Amazon Simple Storage Service API Reference describe the REST API for multipart upload\. 4 | + [Initiate Multipart Upload](https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadInitiate.html) 5 | + [Upload Part](https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPart.html) 6 | + [Complete Multipart Upload](https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadComplete.html) 7 | + [Stop Multipart Upload](https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadAbort.html) 8 | + [List Parts](https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadListParts.html) 9 | + [List Multipart Uploads](https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadListMPUpload.html) 10 | 11 | You can use these APIs to make your own REST requests, or you can use the AWS Command Line Interface, or you can use one the SDKs we provide\. For more information about using Multipart Upload with the AWS CLI, see [Using the AWS Command Line Interface for multipart upload](UsingCLImpUpload.md)\. For more information about the SDKs, see [API support for multipart upload](sdksupportformpu.md)\. -------------------------------------------------------------------------------- /doc_source/deleting-log-files-lifecycle.md: -------------------------------------------------------------------------------- 1 | # Deleting Amazon S3 log files 2 | 3 | An S3 bucket with server access logging enabled can accumulate many server log objects over time\. Your application might need these access logs for a specific period after creation, and after that, you might want to delete them\. You can use Amazon S3 lifecycle configuration to set rules so that Amazon S3 automatically queues these objects for deletion at the end of their life\. 4 | 5 | You can define a lifecycle configuration for a subset of objects in your S3 bucket by using a shared prefix \(that is, objects that have names that begin with a common string\)\. If you specified a prefix in your server access logging configuration, you can set a lifecycle configuration rule to delete log objects that have that prefix\. For example, if your log objects have the prefix `logs/`, you can set a lifecycle configuration rule to delete all objects in the bucket that have the prefix `/logs` after a specified period of time\. For more information about lifecycle configuration, see [Object lifecycle management](object-lifecycle-mgmt.md)\. 6 | 7 | ## Related resources 8 | 9 | [Amazon S3 server access logging](ServerLogs.md) -------------------------------------------------------------------------------- /doc_source/UsingCLImpUpload.md: -------------------------------------------------------------------------------- 1 | # Using the AWS Command Line Interface for multipart upload 2 | 3 | The following sections in the AWS Command Line Interface describe the operations for multipart upload\. 4 | + [Initiate Multipart Upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) 5 | + [Upload Part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) 6 | + [Upload Part \(Copy\)](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) 7 | + [Complete Multipart Upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) 8 | + [Abort Multipart Upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) 9 | + [List Parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) 10 | + [List Multipart Uploads](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-multipart-uploads.html) 11 | 12 | You can also use the REST API to make your own REST requests, or you can use one the SDKs we provide\. For more information about the REST API, see [Using the REST API for multipart upload](UsingRESTAPImpUpload.md)\. For more information about the SDKs, see [API support for multipart upload](sdksupportformpu.md)\. -------------------------------------------------------------------------------- /doc_source/monitoring-overview.md: -------------------------------------------------------------------------------- 1 | # Monitoring Amazon S3 2 | 3 | Monitoring is an important part of maintaining the reliability, availability, and performance of Amazon S3 and your AWS solutions\. You should collect monitoring data from all of the parts of your AWS solution so that you can more easily debug a multipoint failure if one occurs\. But before you start monitoring Amazon S3, you should create a monitoring plan that includes answers to the following questions: 4 | + What are your monitoring goals? 5 | + What resources will you monitor? 6 | + How often will you monitor these resources? 7 | + What monitoring tools will you use? 8 | + Who will perform the monitoring tasks? 9 | + Who should be notified when something goes wrong? 10 | 11 | **Topics** 12 | + [Monitoring tools](monitoring-automated-manual.md) 13 | + [Monitoring metrics with Amazon CloudWatch](cloudwatch-monitoring.md) 14 | + [Metrics configurations for buckets](metrics-configurations.md) 15 | + [Logging with Amazon S3](logging-with-S3.md) 16 | + [Logging Amazon S3 API calls using AWS CloudTrail](cloudtrail-logging.md) 17 | + [Using AWS CloudTrail to identify Amazon S3 requests](cloudtrail-request-identification.md) 18 | + [Tracing Amazon S3 requests using AWS X\-Ray](tracing_requests_using_xray.md) -------------------------------------------------------------------------------- /doc_source/storage_lens_view_metrics.md: -------------------------------------------------------------------------------- 1 | # Viewing storage usage and activity metrics with Amazon S3 Storage Lens 2 | 3 | By default, all dashboards are configured with **free metrics**, which include [usage metrics](https://docs.aws.amazon.com/AmazonS3/latest/dev/storage_lens_basics_metrics_recommendations.html#storage_lens_basics_metrics_types) aggregated down to the bucket level with a 14\-day data retention\. This means that you can see all the usage metrics that S3 Storage Lens aggregates, and your data will be available 14 days from the day it was aggregated\. 4 | 5 | **Advanced metrics and recommendations** include usage and activity metrics that can be aggregated by prefix\. [Activity metrics](https://docs.aws.amazon.com/AmazonS3/latest/dev/storage_lens_basics_metrics_recommendations.html#storage_lens_basics_metrics_types) can be aggregated by bucket with a 15\-month data retention policy\. There are additional charges for using S3 Storage Lens with advanced metrics\. For more information, see [Amazon S3 pricing](http://aws.amazon.com/s3/pricing)\. 6 | 7 | **Topics** 8 | + [Viewing S3 Storage Lens metrics on the dashboards](storage_lens_view_metrics_dashboard.md) 9 | + [Viewing Amazon S3 Storage Lens metrics using a data export](storage_lens_view_metrics_export.md) -------------------------------------------------------------------------------- /doc_source/S3LensExamples.md: -------------------------------------------------------------------------------- 1 | # Amazon S3 Storage Lens examples and console walk\-through 2 | 3 | Amazon S3 Storage Lens aggregates your usage and activity metrics and displays the information in an interactive dashboard on the Amazon S3 console or through a metrics data export that can be downloaded in CSV or Parquet format\. You can use the dashboard to visualize insights and trends, flag outliers, and provides recommendations for optimizing storage costs and applying data protection best practices\. You can use S3 Storage Lens through the AWS Management Console, AWS CLI, AWS SDKs, or REST API\. 4 | 5 | [![AWS Videos](http://img.youtube.com/vi/https://www.youtube.com/embed/TNmZEvwFiOA/0.jpg)](http://www.youtube.com/watch?v=https://www.youtube.com/embed/TNmZEvwFiOA) 6 | 7 | This section contains examples of creating, updating, and viewing S3 Storage Lens configurations and performing operations related to the feature\. If you are using S3 Storage Lens with AWS Organizations, these examples also cover those use cases\. In the examples, replace any variable values with those that are specific to you\. 8 | 9 | **Topics** 10 | + [Amazon S3 Storage Lens examples using the AWS CLI](S3LensCLIExamples.md) 11 | + [Amazon S3 Storage Lens examples using the SDK for Java](S3LensJavaExamples.md) -------------------------------------------------------------------------------- /doc_source/CopyingObjctsUsingRESTMPUapi.md: -------------------------------------------------------------------------------- 1 | # Copy object using the REST multipart upload API 2 | 3 | The following sections in the *Amazon Simple Storage Service API Reference* describe the REST API for multipart upload\. For copying an existing object you use the Upload Part \(Copy\) API and specify the source object by adding the `x-amz-copy-source` request header in your request\. 4 | + [Initiate Multipart Upload](https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadInitiate.html) 5 | + [Upload Part](https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPart.html) 6 | + [Upload Part \(Copy\)](https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html) 7 | + [Complete Multipart Upload](https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadComplete.html) 8 | + [Abort Multipart Upload](https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadAbort.html) 9 | + [List Parts](https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadListParts.html) 10 | + [List Multipart Uploads](https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadListMPUpload.html) 11 | 12 | You can use these APIs to make your own REST requests, or you can use one the SDKs we provide\. For more information about the SDKs, see [API support for multipart upload](sdksupportformpu.md)\. -------------------------------------------------------------------------------- /doc_source/DeletingMultipleObjects.md: -------------------------------------------------------------------------------- 1 | # Deleting multiple objects per request 2 | 3 | **Topics** 4 | + [Deleting multiple objects using the AWS SDK for Java](DeletingMultipleObjectsUsingJava.md) 5 | + [Deleting multiple objects using the AWS SDK for \.NET](DeletingMultipleObjectsUsingNetSDK.md) 6 | + [Deleting multiple objects using the AWS SDK for PHP](DeletingMultipleObjectsUsingPHPSDK.md) 7 | + [Deleting multiple objects using the REST API](DeletingMultipleObjectsUsingREST.md) 8 | 9 | Amazon S3 provides the Multi\-Object Delete API \(see [Delete \- Multi\-Object Delete](https://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html)\), which enables you to delete multiple objects in a single request\. The API supports two modes for the response: verbose and quiet\. By default, the operation uses verbose mode\. In verbose mode, the response includes the result of the deletion of each key that is specified in your request\. In quiet mode, the response includes only keys for which the delete operation encountered an error\. If all keys are successfully deleted when you're using quiet mode, Amazon S3 returns an empty response\. 10 | 11 | To learn more about object deletion, see [Deleting objects](DeletingObjects.md)\. 12 | 13 | You can use the REST API directly or use the AWS SDKs\. -------------------------------------------------------------------------------- /doc_source/UsingRESTError.md: -------------------------------------------------------------------------------- 1 | # The REST error response 2 | 3 | **Topics** 4 | + [Response headers](#UsingRESTErrorResponseHeaders) 5 | + [Error response](ErrorResponse.md) 6 | 7 | If a REST request results in an error, the HTTP reply has: 8 | + An XML error document as the response body 9 | + Content\-Type: application/xml 10 | + An appropriate 3xx, 4xx, or 5xx HTTP status code 11 | 12 | Following is an example of a REST Error Response\. 13 | 14 | ``` 15 | 1. 16 | 2. 17 | 3. NoSuchKey 18 | 4. The resource you requested does not exist 19 | 5. /mybucket/myfoto.jpg 20 | 6. 4442587FB7D0A2F9 21 | 7. 22 | ``` 23 | 24 | For more information about Amazon S3 errors, go to [ErrorCodeList](https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html)\. 25 | 26 | ## Response headers 27 | 28 | Following are response headers returned by all operations: 29 | + `x-amz-request-id:` A unique ID assigned to each request by the system\. In the unlikely event that you have problems with Amazon S3, Amazon can use this to help troubleshoot the problem\. 30 | + `x-amz-id-2:` A special token that will help us to troubleshoot problems\. -------------------------------------------------------------------------------- /doc_source/LLTrackProgressMPUNet.md: -------------------------------------------------------------------------------- 1 | # Track the progress of a multipart upload to an S3 Bucket using the AWS SDK for \.NET \(low\-level\) 2 | 3 | To track the progress of a multipart upload, use the `UploadPartRequest.StreamTransferProgress` event provided by the AWS SDK for \.NET low\-level multipart upload API\. The event occurs periodically\. It returns information such as the total number of bytes to transfer and the number of bytes transferred\. 4 | 5 | The following C\# example shows how to track the progress of multipart uploads\. For a complete C\# sample that includes the following code, see [Upload a file to an S3 Bucket using the AWS SDK for \.NET \(low\-level API\)](LLuploadFileDotNet.md)\. 6 | 7 | ``` 8 | UploadPartRequest uploadRequest = new UploadPartRequest 9 | { 10 | // Provide the request data. 11 | }; 12 | 13 | uploadRequest.StreamTransferProgress += 14 | new EventHandler(UploadPartProgressEventCallback); 15 | 16 | ... 17 | public static void UploadPartProgressEventCallback(object sender, StreamTransferProgressArgs e) 18 | { 19 | // Process the event. 20 | Console.WriteLine("{0}/{1}", e.TransferredBytes, e.TotalBytes); 21 | } 22 | ``` 23 | 24 | ## More info 25 | 26 | [AWS SDK for \.NET](https://aws.amazon.com/sdk-for-net/) -------------------------------------------------------------------------------- /code_examples/php_examples/S3examples/s3-listing-object-keys.php: -------------------------------------------------------------------------------- 1 | 'latest', 15 | 'region' => 'us-east-1' 16 | ]); 17 | 18 | // Use the high-level iterators (returns ALL of your objects). 19 | try { 20 | $objects = $s3->getPaginator('ListObjects', [ 21 | 'Bucket' => $bucket 22 | ]); 23 | 24 | echo "Keys retrieved!" . PHP_EOL; 25 | foreach ($objects as $object) { 26 | echo $object['Key'] . PHP_EOL; 27 | } 28 | } catch (S3Exception $e) { 29 | echo $e->getMessage() . PHP_EOL; 30 | } 31 | 32 | // Use the plain API (returns ONLY up to 1000 of your objects). 33 | try { 34 | $result = $s3->listObjects([ 35 | 'Bucket' => $bucket 36 | ]); 37 | 38 | echo "Keys retrieved!" . PHP_EOL; 39 | foreach ($result['Contents'] as $object) { 40 | echo $object['Key'] . PHP_EOL; 41 | } 42 | } catch (S3Exception $e) { 43 | echo $e->getMessage() . PHP_EOL; 44 | } 45 | -------------------------------------------------------------------------------- /doc_source/ManagingBucketWebsiteConfig.md: -------------------------------------------------------------------------------- 1 | # Programmatically configuring a bucket as a static website 2 | 3 | To host a static website on Amazon S3, you configure an Amazon S3 bucket for website hosting and then upload your website content to the bucket\. You can also use the AWS SDKs to create, update, and delete the website configuration programmatically\. The SDKs provide wrapper classes around the Amazon S3 REST API\. If your application requires it, you can send REST API requests directly from your application\. 4 | 5 | For more information about configuring your bucket for static website hosting using the AWS Management Console, see [Configuring a bucket as a static website using the AWS Management Console](HowDoIWebsiteConfiguration.md)\. 6 | 7 | For more information about using the AWS CLI to configure an S3 bucket as a static website, see [website](https://docs.aws.amazon.com/cli/latest/reference/s3/website.html) in the *AWS CLI Command Reference*\. For more information about programmatically configuring an S3 bucket as a static website, see the following topics\. 8 | 9 | **Topics** 10 | + [Managing websites with the AWS SDK for Java](ConfigWebSiteJava.md) 11 | + [Managing websites with the AWS SDK for \.NET](ConfigWebSiteDotNet.md) 12 | + [Managing websites with the AWS SDK for PHP](ConfigWebSitePHP.md) 13 | + [Managing websites with the REST API](ConfigWebSiteREST.md) -------------------------------------------------------------------------------- /doc_source/S3_ACLs_UsingACLs.md: -------------------------------------------------------------------------------- 1 | # Managing Access with ACLs 2 | 3 | **Topics** 4 | + [Access Control List \(ACL\) Overview](acl-overview.md) 5 | + [Managing ACLs](managing-acls.md) 6 | 7 | Access control lists \(ACLs\) are one of the resource\-based access policy options \(see [Overview of managing access](access-control-overview.md)\) that you can use to manage access to your buckets and objects\. You can use ACLs to grant basic read/write permissions to other AWS accounts\. There are limits to managing permissions using ACLs\. For example, you can grant permissions only to other AWS accounts; you cannot grant permissions to users in your account\. You cannot grant conditional permissions, nor can you explicitly deny permissions\. ACLs are suitable for specific scenarios\. For example, if a bucket owner allows other AWS accounts to upload objects, permissions to these objects can only be managed using object ACL by the AWS account that owns the object\. 8 | 9 | The following introductory topics explain the basic concepts and options that are available for you to manage access to your Amazon S3 resources, and provide guidelines for when to use which access policy options\. 10 | + [Introduction to managing access to Amazon S3 resources](s3-access-control.md#intro-managing-access-s3-resources) 11 | + [Guidelines for using the available access policy options](access-policy-alternatives-guidelines.md) -------------------------------------------------------------------------------- /doc_source/LLlistMPuploadsPHP.md: -------------------------------------------------------------------------------- 1 | # List multipart uploads using the low\-level AWS SDK for PHP API 2 | 3 | This topic shows how to use the low\-level API classes from version 3 of the AWS SDK for PHP to list all in\-progress multipart uploads on a bucket\. It assumes that you are already following the instructions for [Using the AWS SDK for PHP and Running PHP Examples](UsingTheMPphpAPI.md) and have the AWS SDK for PHP properly installed\. 4 | 5 | The following PHP example demonstrates listing all in\-progress multipart uploads on a bucket\. 6 | 7 | ``` 8 | require 'vendor/autoload.php'; 9 | 10 | use Aws\S3\S3Client; 11 | 12 | $bucket = '*** Your Bucket Name ***'; 13 | 14 | $s3 = new S3Client([ 15 | 'version' => 'latest', 16 | 'region' => 'us-east-1' 17 | ]); 18 | 19 | // Retrieve a list of the current multipart uploads. 20 | $result = $s3->listMultipartUploads([ 21 | 'Bucket' => $bucket 22 | ]); 23 | 24 | // Write the list of uploads to the page. 25 | print_r($result->toArray()); 26 | ``` 27 | 28 | ## Related resources 29 | + [ AWS SDK for PHP for Amazon S3 Aws\\S3\\S3Client Class](https://docs.aws.amazon.com/aws-sdk-php/v3/api/class-Aws.S3.S3Client.html) 30 | + [ Amazon S3 Multipart Uploads](https://docs.aws.amazon.com/aws-sdk-php/v3/guide/service/s3-multipart-upload.html) 31 | + [AWS SDK for PHP Documentation](http://aws.amazon.com/documentation/sdk-for-php/) -------------------------------------------------------------------------------- /doc_source/replication-example-walkthroughs.md: -------------------------------------------------------------------------------- 1 | # Replication walkthroughs 2 | 3 | The following examples show how to configure replication for common use cases\. The examples demonstrate replication configuration using the Amazon S3 console, AWS Command Line Interface \(AWS CLI\), and AWS SDKs \(Java and \.NET SDK examples are shown\)\. For information about installing and configuring the AWS CLI, see the following topics in the *AWS Command Line Interface User Guide*\. 4 | + [Installing the AWS Command Line Interface](https://docs.aws.amazon.com/cli/latest/userguide/installing.html) 5 | + [Configuring the AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html) \- You must set up at least one profile\. If you are exploring cross\-account scenarios, set up two profiles\. 6 | 7 | For information about AWS SDKs, see [AWS SDK for Java](https://aws.amazon.com/sdk-for-java/) and [AWS SDK for \.NET](https://aws.amazon.com/sdk-for-net/)\. 8 | 9 | 10 | 11 | **Topics** 12 | + [Example 1: Configuring for buckets in the same account](replication-walkthrough1.md) 13 | + [Example 2: Configuring for buckets in different accounts](replication-walkthrough-2.md) 14 | + [Example 3: Changing replica owner](replication-walkthrough-3.md) 15 | + [Example 4: Replicating encrypted objects](replication-walkthrough-4.md) 16 | + [Example 5: S3 Replication Time Control \(S3 RTC\)](replication-walkthrough-5.md) -------------------------------------------------------------------------------- /code_examples/php_examples/S3examples/s3-request-with-temp-credentials.php: -------------------------------------------------------------------------------- 1 | 'latest', 15 | 'region' => 'us-east-1' 16 | ]); 17 | 18 | $sessionToken = $sts->getSessionToken(); 19 | 20 | $s3 = new S3Client([ 21 | 'region' => 'us-east-1', 22 | 'version' => 'latest', 23 | 'credentials' => [ 24 | 'key' => $sessionToken['Credentials']['AccessKeyId'], 25 | 'secret' => $sessionToken['Credentials']['SecretAccessKey'], 26 | 'token' => $sessionToken['Credentials']['SessionToken'] 27 | ] 28 | ]); 29 | 30 | $result = $s3->listBuckets(); 31 | 32 | 33 | try { 34 | // Retrieve a paginator for listing objects. 35 | $objects = $s3->getPaginator('ListObjects', [ 36 | 'Bucket' => $bucket 37 | ]); 38 | 39 | echo "Keys retrieved!" . PHP_EOL; 40 | 41 | // List objects 42 | foreach ($objects as $object) { 43 | echo $object['Key'] . PHP_EOL; 44 | } 45 | } catch (S3Exception $e) { 46 | echo $e->getMessage() . PHP_EOL; 47 | } 48 | -------------------------------------------------------------------------------- /doc_source/UsingSOAPOperations.md: -------------------------------------------------------------------------------- 1 | # Common SOAP API elements 2 | 3 | **Note** 4 | SOAP support over HTTP is deprecated, but it is still available over HTTPS\. New Amazon S3 features will not be supported for SOAP\. We recommend that you use either the REST API or the AWS SDKs\. 5 | 6 | You can interact with Amazon S3 using SOAP 1\.1 over HTTP\. The Amazon S3 WSDL, which describes the Amazon S3 API in a machine\-readable way, is available at: [https://doc\.s3\.amazonaws\.com/2006\-03\-01/AmazonS3\.wsdl](https://doc.s3.amazonaws.com/2006-03-01/AmazonS3.wsdl)\. The Amazon S3 schema is available at [https://doc\.s3\.amazonaws\.com/2006\-03\-01/AmazonS3\.xsd](https://doc.s3.amazonaws.com/2006-03-01/AmazonS3.xsd)\. 7 | 8 | Most users will interact with Amazon S3 using a SOAP toolkit tailored for their language and development environment\. Different toolkits will expose the Amazon S3 API in different ways\. Please refer to your specific toolkit documentation to understand how to use it\. This section illustrates the Amazon S3 SOAP operations in a toolkit\-independent way by exhibiting the XML requests and responses as they appear "on the wire\." 9 | 10 | ## Common elements 11 | 12 | You can include the following authorization\-related elements with any SOAP request: 13 | + `AWSAccessKeyId:` The AWS Access Key ID of the requester 14 | + `Timestamp:` The current time on your system 15 | + `Signature:` The signature for the request -------------------------------------------------------------------------------- /doc_source/ObjectAndSubResource.md: -------------------------------------------------------------------------------- 1 | # Object subresources 2 | 3 | Amazon S3 defines a set of subresources associated with buckets and objects\. Subresources are subordinates to objects\. This means that subresources don't exist on their own\. They are always associated with some other entity, such as an object or a bucket\. 4 | 5 | The following table lists the subresources associated with Amazon S3 objects\. 6 | 7 | 8 | | Subresource | Description | 9 | | --- | --- | 10 | | acl | Contains a list of grants identifying the grantees and the permissions granted\. When you create an object, the acl identifies the object owner as having full control over the object\. You can retrieve an object ACL or replace it with an updated list of grants\. Any update to an ACL requires you to replace the existing ACL\. For more information about ACLs, see [Managing Access with ACLs](S3_ACLs_UsingACLs.md)\. | 11 | | torrent | Amazon S3 supports the BitTorrent protocol\. Amazon S3 uses the torrent subresource to return the torrent file associated with the specific object\. To retrieve a torrent file, you specify the torrent subresource in your GET request\. Amazon S3 creates a torrent file and returns it\. You can only retrieve the torrent subresource, you cannot create, update, or delete the torrent subresource\. For more information, see [Using BitTorrent with Amazon S3](S3Torrent.md)\. Amazon S3 does not support the BitTorrent protocol in AWS Regions launched after May 30, 2016\. | -------------------------------------------------------------------------------- /doc_source/RetMetaOfObjVersion.md: -------------------------------------------------------------------------------- 1 | # Retrieving the metadata of an object version 2 | 3 | If you only want to retrieve the metadata of an object \(and not its content\), you use the `HEAD` operation\. By default, you get the metadata of the most recent version\. To retrieve the metadata of a specific object version, you specify its version ID\. 4 | 5 | **To retrieve the metadata of an object version** 6 | 7 | 1. Set `versionId` to the ID of the version of the object whose metadata you want to retrieve\. 8 | 9 | 1. Send a `HEAD Object versionId` request\. 10 | 11 | **Example Retrieving the metadata of a versioned object** 12 | The following request retrieves the metadata of version 3HL4kqCxf3vjVBH40Nrjfkd of `my-image.jpg`\. 13 | 14 | ``` 15 | 1. HEAD /my-image.jpg?versionId=3HL4kqCxf3vjVBH40Nrjfkd HTTP/1.1 16 | 2. Host: bucket.s3.amazonaws.com 17 | 3. Date: Wed, 28 Oct 2009 22:32:00 GMT 18 | 4. Authorization: AWS AKIAIOSFODNN7EXAMPLE:0RQf4/cRonhpaBX5sCYVf1bNRuU= 19 | ``` 20 | 21 | The following shows a sample response\. 22 | 23 | ``` 24 | 1. HTTP/1.1 200 OK 25 | 2. x-amz-id-2: ef8yU9AS1ed4OpIszj7UDNEHGran 26 | 3. x-amz-request-id: 318BC8BC143432E5 27 | 4. x-amz-version-id: 3HL4kqtJlcpXroDTDmjVBH40Nrjfkd 28 | 5. Date: Wed, 28 Oct 2009 22:32:00 GMT 29 | 6. Last-Modified: Sun, 1 Jan 2006 12:00:00 GMT 30 | 7. ETag: "fba9dede5f27731c9771645a39863328" 31 | 8. Content-Length: 434234 32 | 9. Content-Type: text/plain 33 | 10. Connection: close 34 | 11. Server: AmazonS3 35 | ``` -------------------------------------------------------------------------------- /doc_source/UsingMFADelete.md: -------------------------------------------------------------------------------- 1 | # Using MFA delete 2 | 3 | If a bucket's versioning configuration is MFA Delete–enabled, the bucket owner must include the `x-amz-mfa` request header in requests to permanently delete an object version or change the versioning state of the bucket\. Requests that include `x-amz-mfa` must use HTTPS\. The header's value is the concatenation of your authentication device's serial number, a space, and the authentication code displayed on it\. If you do not include this request header, the request fails\. 4 | 5 | For more information about authentication devices, see [https://aws\.amazon\.com/iam/details/mfa/](https://aws.amazon.com/iam/details/mfa/)\. 6 | 7 | **Example Deleting an object from an MFA delete enabled bucket** 8 | The following example shows how to delete `my-image.jpg` \(with the specified version\), which is in a bucket configured with MFA Delete enabled\. Note the space between *\[SerialNumber\]* and *\[AuthenticationCode\]*\. For more information, see [DELETE Object](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html)\. 9 | 10 | ``` 11 | 1. DELETE /my-image.jpg?versionId=3HL4kqCxf3vjVBH40Nrjfkd HTTPS/1.1 12 | 2. Host: bucketName.s3.amazonaws.com 13 | 3. x-amz-mfa: 20899872 301749 14 | 4. Date: Wed, 28 Oct 2009 22:32:00 GMT 15 | 5. Authorization: AWS AKIAIOSFODNN7EXAMPLE:0RQf4/cRonhpaBX5sCYVf1bNRuU= 16 | ``` 17 | 18 | For more information about enabling MFA delete, see [MFA delete](Versioning.md#MultiFactorAuthenticationDelete)\. -------------------------------------------------------------------------------- /doc_source/UsingSOAPError.md: -------------------------------------------------------------------------------- 1 | # The SOAP error response 2 | 3 | **Note** 4 | SOAP support over HTTP is deprecated, but it is still available over HTTPS\. New Amazon S3 features will not be supported for SOAP\. We recommend that you use either the REST API or the AWS SDKs\. 5 | 6 | In SOAP, an error result is returned to the client as a SOAP fault, with the HTTP response code 500\. If you do not receive a SOAP fault, then your request was successful\. The Amazon S3 SOAP fault code is comprised of a standard SOAP 1\.1 fault code \(either "Server" or "Client"\) concatenated with the Amazon S3\-specific error code\. For example: "Server\.InternalError" or "Client\.NoSuchBucket"\. The SOAP fault string element contains a generic, human readable error message in English\. Finally, the SOAP fault detail element contains miscellaneous information relevant to the error\. 7 | 8 | For example, if you attempt to delete the object "Fred", which does not exist, the body of the SOAP response contains a "NoSuchKey" SOAP fault\. 9 | 10 | **Example** 11 | 12 | ``` 13 | 1. 14 | 2. 15 | 3. soapenv:Client.NoSuchKey 16 | 4. The specified key does not exist. 17 | 5. 18 | 6. Fred 19 | 7. 20 | 8. 21 | 9. 22 | ``` 23 | 24 | For more information about Amazon S3 errors, go to [ErrorCodeList](https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html)\. -------------------------------------------------------------------------------- /doc_source/DeletingObjectsfromVersioningSuspendedBuckets.md: -------------------------------------------------------------------------------- 1 | # Deleting objects from versioning\-suspended buckets 2 | 3 | If versioning is suspended, a `DELETE` request: 4 | + Can only remove an object whose version ID is `null` 5 | 6 | Doesn't remove anything if there isn't a null version of the object in the bucket\. 7 | + Inserts a delete marker into the bucket\. 8 | 9 | The following figure shows how a simple `DELETE` removes a null version and Amazon S3 inserts a delete marker in its place with a version ID of `null`\. 10 | 11 | ![\[Image NOT FOUND\]](http://docs.aws.amazon.com/AmazonS3/latest/dev/images/versioning_DELETE_versioningSuspended.png) 12 | 13 | Remember that a delete marker doesn't have content, so you lose the content of the null version when a delete marker replaces it\. 14 | 15 | The following figure shows a bucket that doesn't have a null version\. In this case, the `DELETE` removes nothing; Amazon S3 just inserts a delete marker\. 16 | 17 | ![\[Image NOT FOUND\]](http://docs.aws.amazon.com/AmazonS3/latest/dev/images/versioning_DELETE_versioningSuspendedNoNull.png) 18 | 19 | Even in a versioning\-suspended bucket, the bucket owner can permanently delete a specified version\. The following figure shows that deleting a specified object version permanently removes that object\. Only the bucket owner can delete a specified object version\. 20 | 21 | ![\[Image NOT FOUND\]](http://docs.aws.amazon.com/AmazonS3/latest/dev/images/versioning_DELETE_versioningEnabled2.png) -------------------------------------------------------------------------------- /code_examples/php_examples/S3examples/s3-copying-objects.php: -------------------------------------------------------------------------------- 1 | 'latest', 15 | 'region' => 'us-east-1' 16 | ]); 17 | 18 | // Copy an object. 19 | $s3->copyObject([ 20 | 'Bucket' => $targetBucket, 21 | 'Key' => "{$sourceKeyname}-copy", 22 | 'CopySource' => "{$sourceBucket}/{$sourceKeyname}", 23 | ]); 24 | 25 | // Perform a batch of CopyObject operations. 26 | $batch = array(); 27 | for ($i = 1; $i <= 3; $i++) { 28 | $batch[] = $s3->getCommand('CopyObject', [ 29 | 'Bucket' => $targetBucket, 30 | 'Key' => "{targetKeyname}-{$i}", 31 | 'CopySource' => "{$sourceBucket}/{$sourceKeyname}", 32 | ]); 33 | } 34 | try { 35 | $succeeded = $s3->execute($batch); 36 | $failed = array(); 37 | } catch (CommandTransferException $e) { 38 | $succeeded = $e->getSuccessfulCommands(); 39 | echo "Failed Commands:" . PHP_EOL; 40 | foreach ($e->getFailedCommands() as $failedCommand) { 41 | echo $e->getExceptionForFailedCommand($FailedCommand)->getMessage() . PHP_EOL; 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /doc_source/LLlistMPuploadsDotNet.md: -------------------------------------------------------------------------------- 1 | # List multipart uploads to an S3 Bucket using the AWS SDK for \.NET \(low\-level\) 2 | 3 | To list all of the in\-progress multipart uploads on a specific bucket, use the AWS SDK for \.NET low\-level multipart upload API's `ListMultipartUploadsRequest` class\. The `AmazonS3Client.ListMultipartUploads` method returns an instance of the `ListMultipartUploadsResponse` class that provides information about the in\-progress multipart uploads\. 4 | 5 | An in\-progress multipart upload is a multipart upload that has been initiated using the initiate multipart upload request, but has not yet been completed or stopped\. For more information about Amazon S3 multipart uploads, see [Multipart upload overview](mpuoverview.md)\. 6 | 7 | The following C\# example shows how to use the AWS SDK for \.NET to list all in\-progress multipart uploads on a bucket\. For information about the example's compatibility with a specific version of the AWS SDK for \.NET and instructions on how to create and test a working sample, see [Running the Amazon S3 \.NET Code Examples](UsingTheMPDotNetAPI.md#TestingDotNetApiSamples)\. 8 | 9 | ``` 10 | ListMultipartUploadsRequest request = new ListMultipartUploadsRequest 11 | { 12 | BucketName = bucketName // Bucket receiving the uploads. 13 | }; 14 | 15 | ListMultipartUploadsResponse response = await AmazonS3Client.ListMultipartUploadsAsync(request); 16 | ``` 17 | 18 | ## More info 19 | 20 | [AWS SDK for \.NET](https://aws.amazon.com/sdk-for-net/) -------------------------------------------------------------------------------- /doc_source/lifecycle-expire-general-considerations.md: -------------------------------------------------------------------------------- 1 | # Understanding object expiration 2 | 3 | When an object reaches the end of its lifetime, Amazon S3 queues it for removal and removes it asynchronously\. There might be a delay between the expiration date and the date at which Amazon S3 removes an object\. You are not charged for storage time associated with an object that has expired\. 4 | 5 | To find when an object is scheduled to expire, use the [HEAD Object](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectHEAD.html) or the [GET Object](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html) API operations\. These API operations return response headers that provide this information\. 6 | 7 | If you create an S3 Lifecycle expiration rule that causes objects that have been in S3 Intelligent\-Tiering, S3 Standard\-IA, or S3 One Zone\-IA storage for less than 30 days to expire, you are charged for 30 days\. If you create a Lifecycle expiration rule that causes objects that have been in S3 Glacier storage for less than 90 days to expire, you are charged for 90 days\. If you create a Lifecycle expiration rule that causes objects that have been in S3 Glacier Deep Archive storage for less than 180 days to expire, you are charged for 180 days\. For more information, see [Amazon S3 pricing](https://aws.amazon.com/s3/pricing/)and [How do I create a lifecycle policy for an S3 Bucket?](https://docs.aws.amazon.com/AmazonS3/latest/user-guide/create-lifecycle.html) in the *Amazon Simple Storage Service Console User Guide*\. -------------------------------------------------------------------------------- /doc_source/S3TorrentCharge.md: -------------------------------------------------------------------------------- 1 | # How you are charged for BitTorrent delivery 2 | 3 | There is no extra charge for use of BitTorrent with Amazon S3\. Data transfer via the BitTorrent protocol is metered at the same rate as client/server delivery\. To be precise, whenever a downloading BitTorrent client requests a "piece" of an object from the Amazon S3 "seeder," charges accrue just as if an anonymous request for that piece had been made using the REST or SOAP protocol\. These charges will appear on your Amazon S3 bill and usage reports in the same way\. The difference is that if a lot of clients are requesting the same object simultaneously via BitTorrent, then the amount of data Amazon S3 must serve to satisfy those clients will be lower than with client/server delivery\. This is because the BitTorrent clients are simultaneously uploading and downloading amongst themselves\. 4 | 5 | **Note** 6 | SOAP support over HTTP is deprecated, but it is still available over HTTPS\. New Amazon S3 features will not be supported for SOAP\. We recommend that you use either the REST API or the AWS SDKs\. 7 | 8 | The data transfer savings achieved from use of BitTorrent can vary widely depending on how popular your object is\. Less popular objects require heavier use of the "seeder" to serve clients, and thus the difference between BitTorrent distribution costs and client/server distribution costs might be small for such objects\. In particular, if only one client is ever downloading a particular object at a time, the cost of BitTorrent delivery will be the same as direct download\. -------------------------------------------------------------------------------- /doc_source/BucketBilling.md: -------------------------------------------------------------------------------- 1 | # Billing and usage reporting for S3 buckets 2 | 3 | When using Amazon Simple Storage Service \(Amazon S3\), you don't have to pay any upfront fees or commit to how much content you'll store\. As with the other Amazon Web Services \(AWS\) services, you pay as you go and pay only for what you use\. 4 | 5 | AWS provides the following reports for Amazon S3: 6 | + **Billing reports** – Multiple reports that provide high\-level views of all of the activity for the AWS services that you're using, including Amazon S3\. AWS always bills the owner of the S3 bucket for Amazon S3 fees, unless the bucket was created as a Requester Pays bucket\. For more information about Requester Pays, see [Requester Pays buckets](RequesterPaysBuckets.md)\. For more information about billing reports, see [AWS Billing reports for Amazon S3](aws-billing-reports.md)\. 7 | + **Usage report** – A summary of activity for a specific service, aggregated by hour, day, or month\. You can choose which usage type and operation to include\. You can also choose how the data is aggregated\. For more information, see [AWS usage report for Amazon S3](aws-usage-report.md)\. 8 | 9 | The following topics provide information about billing and usage reporting for Amazon S3\. 10 | 11 | **Topics** 12 | + [AWS Billing reports for Amazon S3](aws-billing-reports.md) 13 | + [AWS usage report for Amazon S3](aws-usage-report.md) 14 | + [Understanding your AWS billing and usage reports for Amazon S3](aws-usage-report-understand.md) 15 | + [Using cost allocation S3 bucket tags](CostAllocTagging.md) -------------------------------------------------------------------------------- /doc_source/LLAbortMPUphp.md: -------------------------------------------------------------------------------- 1 | # Abort a multipart upload 2 | 3 | This topic describes how to use a class from version 3 of the AWS SDK for PHP to stop a multipart upload that is in progress\. It assumes that you are already following the instructions for [Using the AWS SDK for PHP and Running PHP Examples](UsingTheMPphpAPI.md) and have the AWS SDK for PHP properly installed\. 4 | 5 | The following PHP example shows how to stop an in\-progress multipart upload using the `abortMultipartUpload()` method\. For information about running the PHP examples in this guide, see [Running PHP Examples](UsingTheMPphpAPI.md#running-php-samples)\. 6 | 7 | ``` 8 | require 'vendor/autoload.php'; 9 | 10 | use Aws\S3\S3Client; 11 | 12 | $bucket = '*** Your Bucket Name ***'; 13 | $keyname = '*** Your Object Key ***'; 14 | $uploadId = '*** Upload ID of upload to Abort ***'; 15 | 16 | $s3 = new S3Client([ 17 | 'version' => 'latest', 18 | 'region' => 'us-east-1' 19 | ]); 20 | 21 | // Abort the multipart upload. 22 | $s3->abortMultipartUpload([ 23 | 'Bucket' => $bucket, 24 | 'Key' => $keyname, 25 | 'UploadId' => $uploadId, 26 | ]); 27 | ``` 28 | 29 | ## Related resources 30 | + [ AWS SDK for PHP for Amazon S3 Aws\\S3\\S3Client Class](https://docs.aws.amazon.com/aws-sdk-php/v3/api/class-Aws.S3.S3Client.html) 31 | + [ Amazon S3 Multipart Uploads](https://docs.aws.amazon.com/aws-sdk-php/v3/guide/service/s3-multipart-upload.html) 32 | + [AWS SDK for PHP Documentation](http://aws.amazon.com/documentation/sdk-for-php/) -------------------------------------------------------------------------------- /code_examples/dotnet_examples/S3Examples/Properties/AssemblyInfo.cs: -------------------------------------------------------------------------------- 1 | using System.Reflection; 2 | using System.Runtime.CompilerServices; 3 | using System.Runtime.InteropServices; 4 | 5 | // General Information about an assembly is controlled through the following 6 | // set of attributes. Change these attribute values to modify the information 7 | // associated with an assembly. 8 | [assembly: AssemblyTitle("S3Examples")] 9 | [assembly: AssemblyDescription("")] 10 | [assembly: AssemblyConfiguration("")] 11 | [assembly: AssemblyCompany("")] 12 | [assembly: AssemblyProduct("S3Examples")] 13 | [assembly: AssemblyCopyright("Copyright © 2018")] 14 | [assembly: AssemblyTrademark("")] 15 | [assembly: AssemblyCulture("")] 16 | 17 | // Setting ComVisible to false makes the types in this assembly not visible 18 | // to COM components. If you need to access a type in this assembly from 19 | // COM, set the ComVisible attribute to true on that type. 20 | [assembly: ComVisible(false)] 21 | 22 | // The following GUID is for the ID of the typelib if this project is exposed to COM 23 | [assembly: Guid("6272109b-98dc-4fc0-b821-badb3b51496b")] 24 | 25 | // Version information for an assembly consists of the following four values: 26 | // 27 | // Major Version 28 | // Minor Version 29 | // Build Number 30 | // Revision 31 | // 32 | // You can specify all the values or you can default the Build and Revision Numbers 33 | // by using the '*' as shown below: 34 | // [assembly: AssemblyVersion("1.0.*")] 35 | [assembly: AssemblyVersion("1.0.0.0")] 36 | [assembly: AssemblyFileVersion("1.0.0.0")] 37 | -------------------------------------------------------------------------------- /doc_source/network-isolation.md: -------------------------------------------------------------------------------- 1 | # Infrastructure security in Amazon S3 2 | 3 | As a managed service, Amazon S3 is protected by the AWS global network security procedures that are described in the [Amazon Web Services: Overview of Security Processes](https://d1.awsstatic.com/whitepapers/aws-security-whitepaper.pdf)\. 4 | 5 | Access to Amazon S3 via the network is through AWS published APIs\. Clients must support Transport Layer Security \(TLS\) 1\.0\. We recommend TLS 1\.2\. Clients must also support cipher suites with Perfect Forward Secrecy \(PFS\) such as Ephemeral Diffie\-Hellman \(DHE\) or Elliptic Curve Diffie\-Hellman Ephemeral \(ECDHE\)\. Additionally, requests must be signed using AWS Signature V4 or AWS Signature V2, requiring valid credentials to be provided\. 6 | 7 | These APIs are callable from any network location\. However, Amazon S3 does support resource\-based access policies, which can include restrictions based on the source IP address\. You can also use Amazon S3 bucket policies to control access to buckets from specific virtual private cloud \(VPC\) \(VPC\) endpoints, or specific VPCs\. Effectively, this isolates network access to a given Amazon S3 bucket from only the specific VPC within the AWS network\. For more information, see [Example Bucket Policies for VPC Endpoints for Amazon S3](example-bucket-policies-vpc-endpoint.md)\. 8 | 9 | The following security best practices also address infrastructure security in Amazon S3: 10 | + [Consider VPC endpoints for Amazon S3 access](security-best-practices.md#end-points) 11 | + [Identify and audit all your Amazon S3 buckets](security-best-practices.md#audit) -------------------------------------------------------------------------------- /doc_source/CopyingObjectUsingREST.md: -------------------------------------------------------------------------------- 1 | # Copy an Object Using the REST API 2 | 3 | This example describes how to copy an object using REST\. For more information about the REST API, go to [PUT Object \(Copy\)](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectCOPY.html)\. 4 | 5 | This example copies the `flotsam` object from the `pacific` bucket to the `jetsam` object of the `atlantic` bucket, preserving its metadata\. 6 | 7 | ``` 8 | 1. PUT /jetsam HTTP/1.1 9 | 2. Host: atlantic.s3.amazonaws.com 10 | 3. x-amz-copy-source: /pacific/flotsam 11 | 4. Authorization: AWS AKIAIOSFODNN7EXAMPLE:ENoSbxYByFA0UGLZUqJN5EUnLDg= 12 | 5. Date: Wed, 20 Feb 2008 22:12:21 +0000 13 | ``` 14 | 15 | The signature was generated from the following information\. 16 | 17 | ``` 18 | 1. PUT\r\n 19 | 2. \r\n 20 | 3. \r\n 21 | 4. Wed, 20 Feb 2008 22:12:21 +0000\r\n 22 | 5. 23 | 6. x-amz-copy-source:/pacific/flotsam\r\n 24 | 7. /atlantic/jetsam 25 | ``` 26 | 27 | Amazon S3 returns the following response that specifies the ETag of the object and when it was last modified\. 28 | 29 | ``` 30 | 1. HTTP/1.1 200 OK 31 | 2. x-amz-id-2: Vyaxt7qEbzv34BnSu5hctyyNSlHTYZFMWK4FtzO+iX8JQNyaLdTshL0KxatbaOZt 32 | 3. x-amz-request-id: 6B13C3C5B34AF333 33 | 4. Date: Wed, 20 Feb 2008 22:13:01 +0000 34 | 5. 35 | 6. Content-Type: application/xml 36 | 7. Transfer-Encoding: chunked 37 | 8. Connection: close 38 | 9. Server: AmazonS3 39 | 10. 40 | 11. 41 | 12. 42 | 13. 2008-02-20T22:13:01 43 | 14. "7e9c608af58950deeb370c98608ed097" 44 | 15. 45 | ``` -------------------------------------------------------------------------------- /doc_source/LLAbortMPUnet.md: -------------------------------------------------------------------------------- 1 | # Stop multipart uploads to an S3 Bucket using the AWS SDK for \.NET \(low\-level\) 2 | 3 | You can stop an in\-progress multipart upload by calling the `AmazonS3Client.AbortMultipartUploadAsync` method\. In addition to stopping the upload, this method deletes all parts that were uploaded to Amazon S3\. 4 | 5 | To stop a multipart upload, you provide the upload ID, and the bucket and key names that are used in the upload\. After you have stopped a multipart upload, you can't use the upload ID to upload additional parts\. For more information about Amazon S3 multipart uploads, see [Multipart upload overview](mpuoverview.md)\. 6 | 7 | The following C\# example shows how to stop a multipart upload\. For a complete C\# sample that includes the following code, see [Upload a file to an S3 Bucket using the AWS SDK for \.NET \(low\-level API\)](LLuploadFileDotNet.md)\. 8 | 9 | ``` 10 | AbortMultipartUploadRequest abortMPURequest = new AbortMultipartUploadRequest 11 | { 12 | BucketName = existingBucketName, 13 | Key = keyName, 14 | UploadId = initResponse.UploadId 15 | }; 16 | await AmazonS3Client.AbortMultipartUploadAsync(abortMPURequest); 17 | ``` 18 | 19 | You can also stop all in\-progress multipart uploads that were initiated prior to a specific time\. This clean\-up operation is useful for stopping multipart uploads that didn't complete or were stopped\. For more information, see [Stop multipart uploads to an S3 Bucket using the AWS SDK for \.NET \(high\-level API\)](HLAbortDotNet.md)\. 20 | 21 | ## More info 22 | 23 | [AWS SDK for \.NET](https://aws.amazon.com/sdk-for-net/) -------------------------------------------------------------------------------- /doc_source/usingHLmpuJava.md: -------------------------------------------------------------------------------- 1 | # Using the AWS Java SDK for multipart upload \(high\-level API\) 2 | 3 | **Topics** 4 | + [Upload a file](HLuploadFileJava.md) 5 | + [Stop multipart uploads](HLAbortMPUploadsJava.md) 6 | + [Track multipart upload progress](HLTrackProgressMPUJava.md) 7 | 8 | The AWS SDK for Java exposes a high\-level API, called `TransferManager`, that simplifies multipart uploads \(see [Uploading objects using multipart upload API](uploadobjusingmpu.md)\)\. You can upload data from a file or a stream\. You can also set advanced options, such as the part size you want to use for the multipart upload, or the number of concurrent threads you want to use when uploading the parts\. You can also set optional object properties, the storage class, or the ACL\. You use the `PutObjectRequest` and the `TransferManagerConfiguration` classes to set these advanced options\. 9 | 10 | When possible, `TransferManager` attempts to use multiple threads to upload multiple parts of a single upload at once\. When dealing with large content sizes and high bandwidth, this can increase throughput significantly\. 11 | 12 | In addition to file\-upload functionality, the `TransferManager` class enables you to stop an in\-progress multipart upload\. An upload is considered to be in progress after you initiate it and until you complete or stop it\. The `TransferManager` stops all in\-progress multipart uploads on a specified bucket that were initiated before a specified date and time\. 13 | 14 | For more information about multipart uploads, including additional functionality offered by the low\-level API methods, see [Uploading objects using multipart upload API](uploadobjusingmpu.md)\. -------------------------------------------------------------------------------- /doc_source/RestoringPreviousVersions.md: -------------------------------------------------------------------------------- 1 | # Restoring previous versions 2 | 3 | One of the value propositions of versioning is the ability to retrieve previous versions of an object\. There are two approaches to doing so: 4 | + Copy a previous version of the object into the same bucket 5 | 6 | The copied object becomes the current version of that object and all object versions are preserved\. 7 | + Permanently delete the current version of the object 8 | 9 | When you delete the current object version, you, in effect, turn the previous version into the current version of that object\. 10 | 11 | Because all object versions are preserved, you can make any earlier version the current version by copying a specific version of the object into the same bucket\. In the following figure, the source object \(ID = 111111\) is copied into the same bucket\. Amazon S3 supplies a new ID \(88778877\) and it becomes the current version of the object\. So, the bucket has both the original object version \(111111\) and its copy \(88778877\)\. 12 | 13 | ![\[Image NOT FOUND\]](http://docs.aws.amazon.com/AmazonS3/latest/dev/images/versioning_COPY2.png) 14 | 15 | A subsequent `GET` will retrieve version 88778877\. 16 | 17 | The following figure shows how deleting the current version \(121212\) of an object, which leaves the previous version \(111111\) as the current object\. 18 | 19 | ![\[Image NOT FOUND\]](http://docs.aws.amazon.com/AmazonS3/latest/dev/images/versioning_COPY_delete2.png) 20 | 21 | A subsequent `GET` will retrieve version 111111\. 22 | 23 | For more information on retrieving previous versions of an object, see [Retrieving object versions](RetrievingObjectVersions.md)\. -------------------------------------------------------------------------------- /code_examples/java_examples/S3Examples/DualStackEndpoints.java: -------------------------------------------------------------------------------- 1 | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: MIT-0 (For details, see https://github.com/awsdocs/amazon-s3-developer-guide/blob/master/LICENSE-SAMPLECODE.) 3 | 4 | import com.amazonaws.AmazonServiceException; 5 | import com.amazonaws.SdkClientException; 6 | import com.amazonaws.auth.profile.ProfileCredentialsProvider; 7 | import com.amazonaws.services.s3.AmazonS3; 8 | import com.amazonaws.services.s3.AmazonS3ClientBuilder; 9 | 10 | public class DualStackEndpoints { 11 | 12 | public static void main(String[] args) { 13 | String clientRegion = "*** Client region ***"; 14 | String bucketName = "*** Bucket name ***"; 15 | 16 | try { 17 | // Create an Amazon S3 client with dual-stack endpoints enabled. 18 | AmazonS3 s3Client = AmazonS3ClientBuilder.standard() 19 | .withCredentials(new ProfileCredentialsProvider()) 20 | .withRegion(clientRegion) 21 | .withDualstackEnabled(true) 22 | .build(); 23 | 24 | s3Client.listObjects(bucketName); 25 | } 26 | catch(AmazonServiceException e) { 27 | // The call was transmitted successfully, but Amazon S3 couldn't process 28 | // it, so it returned an error response. 29 | e.printStackTrace(); 30 | } 31 | catch(SdkClientException e) { 32 | // Amazon S3 couldn't be contacted for a response, or the client 33 | // couldn't parse the response from Amazon S3. 34 | e.printStackTrace(); 35 | } 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /doc_source/batch-ops-copy-object.md: -------------------------------------------------------------------------------- 1 | # Put object copy 2 | 3 | The Put object copy operation copies each object specified in the manifest\. You can copy objects to a different bucket in the same AWS Region or to a bucket in a different Region\. S3 Batch Operations supports most options available through Amazon S3 for copying objects\. These options include setting object metadata, setting permissions, and changing an object's storage class\. For more information about the functionality available through Amazon S3 for copying objects, see [Copying objects](CopyingObjectsExamples.md)\. 4 | 5 | ## Restrictions and limitations 6 | + All source objects must be in one bucket\. 7 | + All destination objects must be in one bucket\. 8 | + You must have read permissions for the source bucket and write permissions for the destination bucket\. 9 | + Objects to be copied can be up to 5 GB in size\. 10 | + Put object copy jobs must be created in the destination region, which is the region you intend to copy the objects to\. 11 | + All Put object copy options are supported except for conditional checks on ETags and server\-side encryption with customer\-provided encryption keys \(SSE\-C\)\. 12 | + If the buckets are unversioned, you will overwrite objects with the same key names\. 13 | + Objects are not necessarily copied in the same order as they are listed in the manifest\. For versioned buckets, if preserving current/non\-current version order is important, you should copy all non\-current versions first\. Then, after the first job is complete, copy the current versions in a subsequent job\. 14 | + Copying objects to the Reduced Redundancy Storage \(RRS\) class is not supported\. -------------------------------------------------------------------------------- /doc_source/ObjectOperations.md: -------------------------------------------------------------------------------- 1 | # Operations on objects 2 | 3 | Amazon S3 enables you to store, retrieve, and delete objects\. You can retrieve an entire object or a portion of an object\. If you enabled S3 Versioning on your bucket, you can retrieve a specific version of the object\. You can also retrieve a subresource associated with your object and update it where applicable\. You can make a copy of your existing object\. Depending on the object size, the following upload and copy related considerations apply: 4 | + **Uploading objects—**You can upload objects of up to 5 GB in size in a single operation\. For objects greater than 5 GB you must use the multipart upload API\. 5 | 6 | Using the multipart upload API you can upload objects up to 5 TB each\. For more information, see [Uploading objects using multipart upload API](uploadobjusingmpu.md)\. 7 | + **Copying objects—**The copy operation creates a copy of an object that is already stored in Amazon S3\. 8 | 9 | You can create a copy of your object up to 5 GB in size in a single atomic operation\. However, for copying an object greater than 5 GB, you must use the multipart upload API\. For more information, see [Copying objects](CopyingObjectsExamples.md)\. 10 | 11 | You can use the REST API \(see [Making requests using the REST API](RESTAPI.md)\) to work with objects or use one of the following AWS SDK libraries: 12 | + [AWS SDK for Java](https://aws.amazon.com/sdk-for-java/) 13 | + [AWS SDK for \.NET](https://aws.amazon.com/sdk-for-net/) 14 | + [AWS SDK for PHP](https://aws.amazon.com/sdk-for-php/) 15 | 16 | These libraries provide a high\-level abstraction that makes working with objects easy\. However, if your application requires, you can use the REST API directly\. -------------------------------------------------------------------------------- /doc_source/UsingHTTPPOST.md: -------------------------------------------------------------------------------- 1 | # Browser\-based uploads using POST \(AWS signature version 2\) 2 | 3 | Amazon S3 supports POST, which allows your users to upload content directly to Amazon S3\. POST is designed to simplify uploads, reduce upload latency, and save you money on applications where users upload data to store in Amazon S3\. 4 | 5 | **Note** 6 | The request authentication discussed in this section is based on AWS Signature Version 2, a protocol for authenticating inbound API requests to AWS services\. 7 | Amazon S3 now supports Signature Version 4, a protocol for authenticating inbound API requests to AWS services, in all AWS regions\. At this time, AWS regions created before January 30, 2014 will continue to support the previous protocol, Signature Version 2\. Any new regions after January 30, 2014 will support only Signature Version 4 and therefore all requests to those regions must be made with Signature Version 4\. For more information, see [Authenticating Requests in Browser\-Based Uploads Using POST \(AWS Signature Version 4\)](https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-authentication-HTTPPOST.html) in the *Amazon Simple Storage Service API Reference*\. 8 | 9 | The following figure shows an upload using Amazon S3 POST\. 10 | 11 | ![\[Image NOT FOUND\]](http://docs.aws.amazon.com/AmazonS3/latest/dev/images/s3_post.png) 12 | 13 | 14 | **Uploading using POST** 15 | 16 | | | | 17 | | --- |--- | 18 | | 1 | The user opens a web browser and accesses your web page\. | 19 | | 2 | Your web page contains an HTTP form that contains all the information necessary for the user to upload content to Amazon S3\. | 20 | | 3 | The user uploads content directly to Amazon S3\. | 21 | 22 | **Note** 23 | Query string authentication is not supported for POST\. -------------------------------------------------------------------------------- /doc_source/Welcome.md: -------------------------------------------------------------------------------- 1 | # What is Amazon S3? 2 | 3 | Amazon Simple Storage Service is storage for the Internet\. It is designed to make web\-scale computing easier for developers\. 4 | 5 | Amazon S3 has a simple web services interface that you can use to store and retrieve any amount of data, at any time, from anywhere on the web\. It gives any developer access to the same highly scalable, reliable, fast, inexpensive data storage infrastructure that Amazon uses to run its own global network of web sites\. The service aims to maximize benefits of scale and to pass those benefits on to developers\. 6 | 7 | This guide explains the core concepts of Amazon S3, such as buckets, access points, and objects, and how to work with these resources using the Amazon S3 application programming interface \(API\)\. 8 | 9 | ## How do I\.\.\.? 10 | 11 | 12 | | Information | Relevant sections | 13 | | --- | --- | 14 | | General product overview and pricing | [Amazon S3](https://aws.amazon.com/s3/) | 15 | | Get a quick hands\-on introduction to Amazon S3 | [Amazon Simple Storage Service Getting Started Guide](https://docs.aws.amazon.com/AmazonS3/latest/gsg/) | 16 | | Learn about Amazon S3 key terminology and concepts | [Introduction to Amazon S3](Introduction.md) | 17 | | How do I work with buckets? | [Working with Amazon S3 Buckets](UsingBucket.md) | 18 | | How do I work with access points? | [Managing data access with Amazon S3 access points ](access-points.md) | 19 | | How do I work with objects? | [Working with Amazon S3 objects](UsingObjects.md) | 20 | | How do I make requests? | [Making requests](MakingRequests.md) | 21 | | How do I manage access to my resources? | [Identity and access management in Amazon S3](s3-access-control.md) | -------------------------------------------------------------------------------- /doc_source/AddingObjectstoVersionSuspendedBuckets.md: -------------------------------------------------------------------------------- 1 | # Adding objects to versioning\-suspended buckets 2 | 3 | Once you suspend versioning on a bucket, Amazon S3 automatically adds a `null` version ID to every subsequent object stored thereafter \(using `PUT`, `POST`, or `COPY`\) in that bucket\. 4 | 5 | The following figure shows how Amazon S3 adds the version ID of `null` to an object when it is added to a version\-suspended bucket\. 6 | 7 | ![\[Image NOT FOUND\]](http://docs.aws.amazon.com/AmazonS3/latest/dev/images/versioning_PUT_versionSuspended.png) 8 | 9 | If a null version is already in the bucket and you add another object with the same key, the added object overwrites the original null version\. 10 | 11 | If there are versioned objects in the bucket, the version you `PUT` becomes the current version of the object\. The following figure shows how adding an object to a bucket that contains versioned objects does not overwrite the object already in the bucket\. In this case, version 111111 was already in the bucket\. Amazon S3 attaches a version ID of null to the object being added and stores it in the bucket\. Version 111111 is not overwritten\. 12 | 13 | ![\[Image NOT FOUND\]](http://docs.aws.amazon.com/AmazonS3/latest/dev/images/versioning_PUT_versionSuspended3.png) 14 | 15 | If a null version already exists in a bucket, the null version is overwritten, as shown in the following figure\. 16 | 17 | ![\[Image NOT FOUND\]](http://docs.aws.amazon.com/AmazonS3/latest/dev/images/versioning_PUT_versionSuspended4.png) 18 | 19 | Note that although the key and version ID \(`null`\) of null version are the same before and after the `PUT`, the contents of the null version originally stored in the bucket is replaced by the contents of the object `PUT` into the bucket\. -------------------------------------------------------------------------------- /code_examples/java_examples/S3Examples/DeleteObjectNonVersionedBucket.java: -------------------------------------------------------------------------------- 1 | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: MIT-0 (For details, see https://github.com/awsdocs/amazon-s3-developer-guide/blob/master/LICENSE-SAMPLECODE.) 3 | 4 | import java.io.IOException; 5 | 6 | import com.amazonaws.AmazonServiceException; 7 | import com.amazonaws.SdkClientException; 8 | import com.amazonaws.auth.profile.ProfileCredentialsProvider; 9 | import com.amazonaws.services.s3.AmazonS3; 10 | import com.amazonaws.services.s3.AmazonS3ClientBuilder; 11 | import com.amazonaws.services.s3.model.DeleteObjectRequest; 12 | 13 | public class DeleteObjectNonVersionedBucket { 14 | 15 | public static void main(String[] args) throws IOException { 16 | String clientRegion = "*** Client region ***"; 17 | String bucketName = "*** Bucket name ***"; 18 | String keyName = "*** Key name ****"; 19 | 20 | try { 21 | AmazonS3 s3Client = AmazonS3ClientBuilder.standard() 22 | .withCredentials(new ProfileCredentialsProvider()) 23 | .withRegion(clientRegion) 24 | .build(); 25 | 26 | s3Client.deleteObject(new DeleteObjectRequest(bucketName, keyName)); 27 | } 28 | catch(AmazonServiceException e) { 29 | // The call was transmitted successfully, but Amazon S3 couldn't process 30 | // it, so it returned an error response. 31 | e.printStackTrace(); 32 | } 33 | catch(SdkClientException e) { 34 | // Amazon S3 couldn't be contacted for a response, or the client 35 | // couldn't parse the response from Amazon S3. 36 | e.printStackTrace(); 37 | } 38 | } 39 | } -------------------------------------------------------------------------------- /code_examples/dotnet_examples/S3Examples/AbortMPUUsingHighLevelAPITest.cs: -------------------------------------------------------------------------------- 1 | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: MIT-0 (For details, see https://github.com/awsdocs/amazon-s3-developer-guide/blob/master/LICENSE-SAMPLECODE.) 3 | 4 | using Amazon.S3; 5 | using Amazon.S3.Transfer; 6 | using System; 7 | using System.Threading.Tasks; 8 | 9 | namespace Amazon.DocSamples.S3 10 | { 11 | class AbortMPUUsingHighLevelAPITest 12 | { 13 | private const string bucketName = "*** provide bucket name ***"; 14 | // Specify your bucket region (an example region is shown). 15 | private static readonly RegionEndpoint bucketRegion = RegionEndpoint.USWest2; 16 | private static IAmazonS3 s3Client; 17 | 18 | public static void Main() 19 | { 20 | s3Client = new AmazonS3Client(bucketRegion); 21 | AbortMPUAsync().Wait(); 22 | } 23 | 24 | private static async Task AbortMPUAsync() 25 | { 26 | try 27 | { 28 | var transferUtility = new TransferUtility(s3Client); 29 | 30 | // Abort all in-progress uploads initiated before the specified date. 31 | await transferUtility.AbortMultipartUploadsAsync( 32 | bucketName, DateTime.Now.AddDays(-7)); 33 | } 34 | catch (AmazonS3Exception e) 35 | { 36 | Console.WriteLine("Error encountered on server. Message:'{0}' when writing an object", e.Message); 37 | } 38 | catch (Exception e) 39 | { 40 | Console.WriteLine("Unknown encountered on server. Message:'{0}' when writing an object", e.Message); 41 | } 42 | } 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /doc_source/s3-glacier-select-sql-reference-operators.md: -------------------------------------------------------------------------------- 1 | # Operators 2 | 3 | Amazon S3 Select and S3 Glacier Select support the following operators\. 4 | 5 | ## Logical Operators 6 | 7 | 8 | + `AND` 9 | + `NOT` 10 | + `OR` 11 | 12 | ## Comparison Operators 13 | 14 | 15 | + `<` 16 | + `>` 17 | + `<=` 18 | + `>=` 19 | + `=` 20 | + `<>` 21 | + `!=` 22 | + `BETWEEN` 23 | + `IN` – For example: `IN ('a', 'b', 'c')` 24 | 25 | 26 | 27 | ## Pattern Matching Operators 28 | + `LIKE` 29 | + `_` \(Matches any character\) 30 | + `%` \(Matches any sequence of characters\) 31 | 32 | ## Unitary Operators 33 | + IS NULL 34 | + IS NOT NULL 35 | 36 | ## Math Operators 37 | 38 | Addition, subtraction, multiplication, division, and modulo are supported\. 39 | + \+ 40 | + \- 41 | + \* 42 | + / 43 | + % 44 | 45 | ## Operator Precedence 46 | 47 | The following table shows the operators' precedence in decreasing order\. 48 | 49 | 50 | | Operator/Element | Associativity | Required | 51 | | --- | --- | --- | 52 | | \- | right | unary minus | 53 | | \*, /, % | left | multiplication, division, modulo | 54 | | \+, \- | left | addition, subtraction | 55 | | IN | | set membership | 56 | | BETWEEN | | range containment | 57 | | LIKE | | string pattern matching | 58 | | <> | | less than, greater than | 59 | | = | right | equality, assignment | 60 | | NOT | right | logical negation | 61 | | AND | left | logical conjunction | 62 | | OR | left | logical disjunction | -------------------------------------------------------------------------------- /doc_source/auth-request-sig-v2.md: -------------------------------------------------------------------------------- 1 | # Appendix b: Authenticating requests \(AWS signature version 2\) 2 | 3 | **Important** 4 | This section describes how to authenticate requests using AWS Signature Version 2\. Signature Version 2 is being turned off \(deprecated\), Amazon S3 will only accept API requests that are signed using Signature Version 4\. For more information, see [AWS Signature Version 2 Turned Off \(Deprecated\) for Amazon S3](UsingAWSSDK.md#UsingAWSSDK-sig2-deprecation) 5 | Signature Version 4 is supported in all AWS Regions, and it is the only version that is supported for new Regions\. For more information, see [Authenticating Requests \(AWS Signature Version 4\)](https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html) in the *Amazon Simple Storage Service API Reference*\. 6 | Amazon S3 offers you the ability to identify what API signature version was used to sign a request\. It is important to identify if any of your workflows are utilizing Signature Version 2 signing and upgrading them to use Signature Version 4 to prevent impact to your business\. 7 | If you are using CloudTrail event logs\(recommended option\), please see [Using AWS CloudTrail to identify Amazon S3 signature version 2 requests ](cloudtrail-request-identification.md#cloudtrail-identification-sigv2-requests) on how to query and identify such requests\. 8 | If you are using the Amazon S3 Server Access logs, see [ Using Amazon S3 access logs to identify signature version 2 requests ](using-s3-access-logs-to-identify-requests.md#using-s3-access-logs-to-identify-sigv2-requests) 9 | 10 | **Topics** 11 | + [Authenticating requests using the REST API](S3_Authentication2.md) 12 | + [Signing and authenticating REST requests](RESTAuthentication.md) 13 | + [Browser\-based uploads using POST \(AWS signature version 2\)](UsingHTTPPOST.md) -------------------------------------------------------------------------------- /doc_source/uploadobjusingmpu-ruby-sdk.md: -------------------------------------------------------------------------------- 1 | # Using the AWS SDK for Ruby for Multipart Upload 2 | 3 | The AWS SDK for Ruby version 3 supports Amazon S3 multipart uploads in two ways\. For the first option, you can use managed file uploads\. For more information, see [Uploading Files to Amazon S3](https://aws.amazon.com/blogs/developer/uploading-files-to-amazon-s3/) in the *AWS Developer Blog*\. Managed file uploads are the recommended method for uploading files to a bucket\. They provides the following benefits: 4 | + Manages multipart uploads for objects larger than 15MB\. 5 | + Correctly opens files in binary mode to avoid encoding issues\. 6 | + Uses multiple threads for uploading parts of large objects in parallel\. 7 | 8 | Alternatively, you can use the following multipart upload client operations directly: 9 | + [create\_multipart\_upload](https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#create_multipart_upload-instance_method) – Initiates a multipart upload and returns an upload ID\. 10 | + [upload\_part](https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#upload_part-instance_method) – Uploads a part in a multipart upload\. 11 | + [upload\_part\_copy](https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#upload_part_copy-instance_method) – Uploads a part by copying data from an existing object as data source\. 12 | + [complete\_multipart\_upload](https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#complete_multipart_upload-instance_method) – Completes a multipart upload by assembling previously uploaded parts\. 13 | + [abort\_multipart\_upload](https://docs.aws.amazon.com/sdk-for-ruby/v3/api/Aws/S3/Client.html#abort_multipart_upload-instance_method) – Stops a multipart upload\. 14 | 15 | For more information, see [Using the AWS SDK for Ruby \- Version 3](UsingTheMPRubyAPI.md)\. -------------------------------------------------------------------------------- /doc_source/UploadObjSingleOpPHP.md: -------------------------------------------------------------------------------- 1 | # Upload an object using the AWS SDK for PHP 2 | 3 | This topic guides you through using classes from the AWS SDK for PHP to upload an object of up to 5 GB in size\. For larger files you must use multipart upload API\. For more information, see [Uploading objects using multipart upload API](uploadobjusingmpu.md)\. 4 | 5 | This topic assumes that you are already following the instructions for [Using the AWS SDK for PHP and Running PHP Examples](UsingTheMPphpAPI.md) and have the AWS SDK for PHP properly installed\. 6 | 7 | **Example of Creating an Object in an Amazon S3 bucket by Uploading Data** 8 | The following PHP example creates an object in a specified bucket by uploading data using the `putObject()` method\. For information about running the PHP examples in this guide, go to [Running PHP Examples](UsingTheMPphpAPI.md#running-php-samples)\. 9 | 10 | ``` 11 | require 'vendor/autoload.php'; 12 | 13 | use Aws\S3\S3Client; 14 | use Aws\S3\Exception\S3Exception; 15 | 16 | $bucket = '*** Your Bucket Name ***'; 17 | $keyname = '*** Your Object Key ***'; 18 | 19 | $s3 = new S3Client([ 20 | 'version' => 'latest', 21 | 'region' => 'us-east-1' 22 | ]); 23 | 24 | try { 25 | // Upload data. 26 | $result = $s3->putObject([ 27 | 'Bucket' => $bucket, 28 | 'Key' => $keyname, 29 | 'Body' => 'Hello, world!', 30 | 'ACL' => 'public-read' 31 | ]); 32 | 33 | // Print the URL to the object. 34 | echo $result['ObjectURL'] . PHP_EOL; 35 | } catch (S3Exception $e) { 36 | echo $e->getMessage() . PHP_EOL; 37 | } 38 | ``` 39 | 40 | ## Related Resources 41 | + [ AWS SDK for PHP for Amazon S3 Aws\\S3\\S3Client Class](https://docs.aws.amazon.com/aws-sdk-php/v3/api/class-Aws.S3.S3Client.html) 42 | + [AWS SDK for PHP Documentation](http://aws.amazon.com/documentation/sdk-for-php/) -------------------------------------------------------------------------------- /code_examples/dotnet_examples/S3Examples/GenPresignedURLTest.cs: -------------------------------------------------------------------------------- 1 | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: MIT-0 (For details, see https://github.com/awsdocs/amazon-s3-developer-guide/blob/master/LICENSE-SAMPLECODE.) 3 | 4 | using Amazon.S3; 5 | using Amazon.S3.Model; 6 | using System; 7 | 8 | namespace Amazon.DocSamples.S3 9 | { 10 | class GenPresignedURLTest 11 | { 12 | private const string bucketName = "*** bucket name ***"; 13 | private const string objectKey = "*** object key ***"; 14 | // Specify your bucket region (an example region is shown). 15 | private static readonly RegionEndpoint bucketRegion = RegionEndpoint.USWest2; 16 | private static IAmazonS3 s3Client; 17 | 18 | public static void Main() 19 | { 20 | s3Client = new AmazonS3Client(bucketRegion); 21 | string urlString = GeneratePreSignedURL(); 22 | } 23 | static string GeneratePreSignedURL() 24 | { 25 | string urlString = ""; 26 | try 27 | { 28 | GetPreSignedUrlRequest request1 = new GetPreSignedUrlRequest 29 | { 30 | BucketName = bucketName, 31 | Key = objectKey, 32 | Expires = DateTime.Now.AddMinutes(5) 33 | }; 34 | urlString = s3Client.GetPreSignedURL(request1); 35 | } 36 | catch (AmazonS3Exception e) 37 | { 38 | Console.WriteLine("Error encountered on server. Message:'{0}' when writing an object", e.Message); 39 | } 40 | catch (Exception e) 41 | { 42 | Console.WriteLine("Unknown encountered on server. Message:'{0}' when writing an object", e.Message); 43 | } 44 | return urlString; 45 | } 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /code_examples/php_examples/S3examples/s3-request-fed-user-with-temp-credentials.php: -------------------------------------------------------------------------------- 1 | 'latest', 18 | 'region' => 'us-east-1'] 19 | ); 20 | 21 | // Fetch the federated credentials. 22 | $sessionToken = $sts->getFederationToken([ 23 | 'Name' => 'User1', 24 | 'DurationSeconds' => '3600', 25 | 'Policy' => json_encode([ 26 | 'Statement' => [ 27 | 'Sid' => 'randomstatementid' . time(), 28 | 'Action' => ['s3:ListBucket'], 29 | 'Effect' => 'Allow', 30 | 'Resource' => 'arn:aws:s3:::' . $bucket 31 | ] 32 | ]) 33 | ]); 34 | 35 | // The following will be part of your less trusted code. You provide temporary 36 | // security credentials so the code can send authenticated requests to Amazon S3. 37 | 38 | $s3 = new S3Client([ 39 | 'region' => 'us-east-1', 40 | 'version' => 'latest', 41 | 'credentials' => [ 42 | 'key' => $sessionToken['Credentials']['AccessKeyId'], 43 | 'secret' => $sessionToken['Credentials']['SecretAccessKey'], 44 | 'token' => $sessionToken['Credentials']['SessionToken'] 45 | ] 46 | ]); 47 | 48 | try { 49 | $result = $s3->listObjects([ 50 | 'Bucket' => $bucket 51 | ]); 52 | } catch (S3Exception $e) { 53 | echo $e->getMessage() . PHP_EOL; 54 | } 55 | -------------------------------------------------------------------------------- /code_examples/dotnet_examples/S3Examples/DeleteObjectNonVersionedBucketTest1.cs: -------------------------------------------------------------------------------- 1 | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: MIT-0 (For details, see https://github.com/awsdocs/amazon-s3-developer-guide/blob/master/LICENSE-SAMPLECODE.) 3 | 4 | using Amazon.S3; 5 | using Amazon.S3.Model; 6 | using System; 7 | using System.Threading.Tasks; 8 | 9 | namespace Amazon.DocSamples.S3 10 | { 11 | class DeleteObjectNonVersionedBucketTest 12 | { 13 | private const string bucketName = "*** bucket name ***"; 14 | private const string keyName = "*** object key ***"; 15 | // Specify your bucket region (an example region is shown). 16 | private static readonly RegionEndpoint bucketRegion = RegionEndpoint.USWest2; 17 | private static IAmazonS3 client; 18 | 19 | public static void Main() 20 | { 21 | client = new AmazonS3Client(bucketRegion); 22 | DeleteObjectNonVersionedBucketAsync().Wait(); 23 | } 24 | 25 | private static async Task DeleteObjectNonVersionedBucketAsync() 26 | { 27 | try 28 | { 29 | var deleteObjectRequest = new DeleteObjectRequest 30 | { 31 | BucketName = bucketName, 32 | Key = keyName 33 | }; 34 | 35 | Console.WriteLine("Deleting an object"); 36 | await client.DeleteObjectAsync(deleteObjectRequest); 37 | } 38 | catch (AmazonS3Exception e) 39 | { 40 | Console.WriteLine("Error encountered on server. Message:'{0}' when writing an object", e.Message); 41 | } 42 | catch (Exception e) 43 | { 44 | Console.WriteLine("Unknown encountered on server. Message:'{0}' when writing an object", e.Message); 45 | } 46 | } 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /doc_source/ObjectsinRequesterPaysBuckets.md: -------------------------------------------------------------------------------- 1 | # Downloading objects in Requester Pays buckets 2 | 3 | Because requesters are charged for downloading data from Requester Pays buckets, the requests must contain a special parameter, `x-amz-request-payer`, which confirms that the requester knows he or she will be charged for the download\. To access objects in Requester Pays buckets, requests must include one of the following\. 4 | + For GET, HEAD, and POST requests, include `x-amz-request-payer : requester` in the header 5 | + For signed URLs, include `x-amz-request-payer=requester` in the request 6 | 7 | If the request succeeds and the requester is charged, the response includes the header `x-amz-request-charged:requester`\. If `x-amz-request-payer` is not in the request, Amazon S3 returns a 403 error and charges the bucket owner for the request\. 8 | 9 | **Note** 10 | Bucket owners do not need to add `x-amz-request-payer` to their requests\. 11 | Ensure that you have included `x-amz-request-payer` and its value in your signature calculation\. For more information, see [Constructing the CanonicalizedAmzHeaders Element](RESTAuthentication.md#RESTAuthenticationConstructingCanonicalizedAmzHeaders)\. 12 | 13 | **To download objects from a Requester Pays bucket** 14 | + Use a `GET` request to download an object from a Requester Pays bucket, as shown in the following request\. 15 | 16 | ``` 17 | 1. GET / [destinationObject] HTTP/1.1 18 | 2. Host: [BucketName].s3.amazonaws.com 19 | 3. x-amz-request-payer : requester 20 | 4. Date: Wed, 01 Mar 2009 12:00:00 GMT 21 | 5. Authorization: AWS [Signature] 22 | ``` 23 | 24 | If the GET request succeeds and the requester is charged, the response includes `x-amz-request-charged:requester`\. 25 | 26 | Amazon S3 can return an `Access Denied` error for requests that try to get objects from a Requester Pays bucket\. For more information, see [Error Responses](https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html)\. -------------------------------------------------------------------------------- /doc_source/S3TorrentPublish.md: -------------------------------------------------------------------------------- 1 | # Publishing content using Amazon S3 and BitTorrent 2 | 3 | Every anonymously readable object stored in Amazon S3 is automatically available for download using BitTorrent\. The process for changing the ACL on an object to allow anonymous `READ` operations is described in [Identity and access management in Amazon S3](s3-access-control.md)\. 4 | 5 | You can direct your clients to your BitTorrent accessible objects by giving them the \.torrent file directly or by publishing a link to the ?torrent URL of your object, as described by [GetObjectTorrent](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTorrent.html) in the *Amazon Simple Storage Service API Reference*\. One important thing to note is that the \.torrent file describing an Amazon S3 object is generated on demand the first time it is requested \(via the REST ?torrent resource\)\. Generating the \.torrent for an object takes time proportional to the size of that object\. For large objects, this time can be significant\. Therefore, before publishing a ?torrent link, we suggest making the first request for it yourself\. Amazon S3 might take several minutes to respond to this first request, as it generates the \.torrent file\. Unless you update the object in question, subsequent requests for the \.torrent will be fast\. Following this procedure before distributing a ?torrent link will ensure a smooth BitTorrent downloading experience for your customers\. 6 | 7 | To stop distributing a file using BitTorrent, simply remove anonymous access to it\. This can be accomplished by either deleting the file from Amazon S3, or modifying your access control policy to prohibit anonymous reads\. After doing so, Amazon S3 will no longer act as a "seeder" in the BitTorrent network for your file, and will no longer serve the \.torrent file via the ?torrent REST API\. However, after a \.torrent for your file is published, this action might not stop public downloads of your object that happen exclusively using the BitTorrent peer to peer network\. -------------------------------------------------------------------------------- /doc_source/cors-troubleshooting.md: -------------------------------------------------------------------------------- 1 | # Troubleshooting CORS issues 2 | 3 | If you encounter unexpected behavior while accessing buckets set with the CORS configuration, try the following steps to troubleshoot: 4 | 5 | 1. Verify that the CORS configuration is set on the bucket\. 6 | 7 | For instructions, see [Editing Bucket Permissions](https://docs.aws.amazon.com/AmazonS3/latest/user-guide/EditingBucketPermissions.html) in the *Amazon Simple Storage Service Console User Guide*\. If the CORS configuration is set, the console displays an **Edit CORS Configuration** link in the **Permissions** section of the **Properties** bucket\. 8 | 9 | 1. Capture the complete request and response using a tool of your choice\. For each request Amazon S3 receives, there must be a CORS rule that matches the data in your request, as follows: 10 | 11 | 1. Verify that the request has the Origin header\. 12 | 13 | If the header is missing, Amazon S3 doesn't treat the request as a cross\-origin request, and doesn't send CORS response headers in the response\. 14 | 15 | 1. Verify that the Origin header in your request matches at least one of the `AllowedOrigin` elements in the specified `CORSRule`\. 16 | 17 | The scheme, the host, and the port values in the Origin request header must match the `AllowedOrigin` elements in the `CORSRule`\. For example, if you set the `CORSRule` to allow the origin `http://www.example.com`, then both `https://www.example.com` and `http://www.example.com:80` origins in your request don't match the allowed origin in your configuration\. 18 | 19 | 1. Verify that the method in your request \(or in a preflight request, the method specified in the `Access-Control-Request-Method`\) is one of the `AllowedMethod` elements in the same `CORSRule`\. 20 | 21 | 1. For a preflight request, if the request includes an `Access-Control-Request-Headers` header, verify that the `CORSRule` includes the `AllowedHeader` entries for each value in the `Access-Control-Request-Headers` header\. -------------------------------------------------------------------------------- /doc_source/storage_lens_understanding_metrics_export_schema.md: -------------------------------------------------------------------------------- 1 | # Understanding the Amazon S3 Storage Lens export schema 2 | 3 | The following table contains the schema of your S3 Storage Lens metrics export\. 4 | 5 | 6 | | Attribute Name | Data Type | Column Name | Description | 7 | | --- | --- | --- | --- | 8 | | VersionNumber | String | version\_number | The version of the S3 Storage Lens metrics being used\. | 9 | | ConfigurationId | String | configuration\_id | The name of the configuration\_id of your S3 Storage Lens configuration\. | 10 | | ReportDate | String | report\_date | The date the metrics were tracked\. | 11 | | AwsAccountNumber | String | aws\_account\_number | Your AWS account number\. | 12 | | AwsRegion | String | aws\_region | The AWS Region for which the metrics are being tracked\. | 13 | | StorageClass | String | storage\_class | The storage class of the bucket in question\. | 14 | | RecordType | ENUM | record\_type | The type of artifact that is being reported \(ACCOUNT, BUCKET, or PREFIX\)\. | 15 | | RecordValue | String | record\_value | The record value\. This field is populated when the record\_type is PREFIX\. The record value is only URL\-encoded in the CSV format | 16 | | BucketName | String | bucket\_name | The name of the bucket that is being reported\. | 17 | | MetricName | String | metric\_name | The name of the metric that is being reported\. | 18 | | MetricValue | Long | metric\_value | The value of the metric that is being reported\. | 19 | 20 | ## Example of an S3 Storage Lens metrics export 21 | 22 | The following is an example of an S3 Storage Lens metrics export based on this schema\. 23 | 24 | ![\[Image NOT FOUND\]](http://docs.aws.amazon.com/AmazonS3/latest/dev/images/sample_storage_lens_export.png)![\[Image NOT FOUND\]](http://docs.aws.amazon.com/AmazonS3/latest/dev/)![\[Image NOT FOUND\]](http://docs.aws.amazon.com/AmazonS3/latest/dev/) -------------------------------------------------------------------------------- /code_examples/java_examples/S3Examples/CopyObjectSingleOperation.java: -------------------------------------------------------------------------------- 1 | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: MIT-0 (For details, see https://github.com/awsdocs/amazon-s3-developer-guide/blob/master/LICENSE-SAMPLECODE.) 3 | 4 | import java.io.IOException; 5 | 6 | import com.amazonaws.AmazonServiceException; 7 | import com.amazonaws.SdkClientException; 8 | import com.amazonaws.auth.profile.ProfileCredentialsProvider; 9 | import com.amazonaws.services.s3.AmazonS3; 10 | import com.amazonaws.services.s3.AmazonS3ClientBuilder; 11 | import com.amazonaws.services.s3.model.CopyObjectRequest; 12 | 13 | public class CopyObjectSingleOperation { 14 | 15 | public static void main(String[] args) throws IOException { 16 | String clientRegion = "*** Client region ***"; 17 | String bucketName = "*** Bucket name ***"; 18 | String sourceKey = "*** Source object key *** "; 19 | String destinationKey = "*** Destination object key ***"; 20 | 21 | try { 22 | AmazonS3 s3Client = AmazonS3ClientBuilder.standard() 23 | .withCredentials(new ProfileCredentialsProvider()) 24 | .withRegion(clientRegion) 25 | .build(); 26 | 27 | // Copy the object into a new object in the same bucket. 28 | CopyObjectRequest copyObjRequest = new CopyObjectRequest(bucketName, sourceKey, bucketName, destinationKey); 29 | s3Client.copyObject(copyObjRequest); 30 | } 31 | catch(AmazonServiceException e) { 32 | // The call was transmitted successfully, but Amazon S3 couldn't process 33 | // it, so it returned an error response. 34 | e.printStackTrace(); 35 | } 36 | catch(SdkClientException e) { 37 | // Amazon S3 couldn't be contacted for a response, or the client 38 | // couldn't parse the response from Amazon S3. 39 | e.printStackTrace(); 40 | } 41 | } 42 | } -------------------------------------------------------------------------------- /code_examples/php_examples/S3examples/s3-deleting-multi-objects-versioned.php: -------------------------------------------------------------------------------- 1 | 'latest', 14 | 'region' => 'us-east-1' 15 | ]); 16 | 17 | // 1. Enable object versioning for the bucket. 18 | $s3->putBucketVersioning([ 19 | 'Bucket' => $bucket, 20 | 'Status' => 'Enabled', 21 | ]); 22 | 23 | // 2. Create a few versions of an object. 24 | for ($i = 1; $i <= 3; $i++) { 25 | $s3->putObject([ 26 | 'Bucket' => $bucket, 27 | 'Key' => $keyname, 28 | 'Body' => "content {$i}", 29 | ]); 30 | } 31 | 32 | // 3. List the objects versions and get the keys and version IDs. 33 | $versions = $s3->listObjectVersions(['Bucket' => $bucket]) 34 | ->getPath('Versions'); 35 | 36 | // 4. Delete the object versions. 37 | $s3->deleteObjects([ 38 | 'Bucket' => $bucket, 39 | 'Delete' => [ 40 | 'Objects' => array_map(function ($version) { 41 | return [ 42 | 'Key' => $version['Key'], 43 | 'VersionId' => $version['VersionId'] 44 | }, $versions), 45 | ], 46 | ]); 47 | 48 | echo "The following objects were deleted successfully:". PHP_EOL; 49 | foreach ($result['Deleted'] as $object) { 50 | echo "Key: {$object['Key']}, VersionId: {$object['VersionId']}" . PHP_EOL; 51 | } 52 | 53 | echo PHP_EOL . "The following objects could not be deleted:" . PHP_EOL; 54 | foreach ($result['Errors'] as $object) { 55 | echo "Key: {$object['Key']}, VersionId: {$object['VersionId']}" . PHP_EOL; 56 | } 57 | 58 | // 5. Suspend object versioning for the bucket. 59 | $s3->putBucketVersioning([ 60 | 'Bucket' => $bucket, 61 | 'Status' => 'Suspended', 62 | ]); 63 | -------------------------------------------------------------------------------- /doc_source/HTTPPOSTFlash.md: -------------------------------------------------------------------------------- 1 | # POST with adobe flash 2 | 3 | This section describes how to use `POST` with Adobe Flash\. 4 | 5 | ## Adobe flash player security 6 | 7 | By default, the Adobe Flash Player security model prohibits Adobe Flash Players from making network connections to servers outside the domain that serves the SWF file\. 8 | 9 | To override the default, you must upload a publicly readable crossdomain\.xml file to the bucket that will accept POST uploads\. The following is a sample crossdomain\.xml file\. 10 | 11 | ``` 12 | 1. 13 | 2. 15 | 4. 16 | 5. 17 | 6. 18 | ``` 19 | 20 | **Note** 21 | For more information about the Adobe Flash security model, go to the Adobe website\. 22 | Adding the crossdomain\.xml file to your bucket allows any Adobe Flash Player to connect to the crossdomain\.xml file within your bucket; however, it does not grant access to the actual Amazon S3 bucket\. 23 | 24 | ## Adobe flash considerations 25 | 26 | The FileReference API in Adobe Flash adds the `Filename` form field to the POST request\. When you build Adobe Flash applications that upload to Amazon S3 by using the FileReference API action, include the following condition in your policy: 27 | 28 | ``` 29 | 1. ['starts-with', '$Filename', ''] 30 | ``` 31 | 32 | Some versions of the Adobe Flash Player do not properly handle HTTP responses that have an empty body\. To configure POST to return a response that does not have an empty body, set `success_action_status` to 201\. Amazon S3 will then return an XML document with a 201 status code\. For information about the content of the XML document, see [POST Object](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPOST.html)\. For information about form fields, see [HTML form fields](HTTPPOSTForms.md#HTTPPOSTFormFields)\. -------------------------------------------------------------------------------- /doc_source/access-points.md: -------------------------------------------------------------------------------- 1 | # Managing data access with Amazon S3 access points 2 | 3 | Amazon S3 Access Points simplify managing data access at scale for shared datasets in S3\. Access points are named network endpoints that are attached to buckets that you can use to perform S3 object operations, such as `GetObject` and `PutObject`\. Each access point has distinct permissions and network controls that S3 applies for any request that is made through that access point\. Each access point enforces a customized access point policy that works in conjunction with the bucket policy that is attached to the underlying bucket\. You can configure any access point to accept requests only from a virtual private cloud \(VPC\) to restrict Amazon S3 data access to a private network\. You can also configure custom block public access settings for each access point\. 4 | 5 | **Note** 6 | You can only use access points to perform operations on objects\. You can't use access points to perform other Amazon S3 operations, such as modifying or deleting buckets\. For a complete list of S3 operations that support access points, see [Access point compatibility with S3 operations and AWS services](using-access-points.md#access-points-service-api-support)\. 7 | Access points work with some, but not all, AWS services and features\. For example, you can't configure Cross\-Region Replication to operate through an access point\. For a complete list of AWS services that are compatible with S3 access points, see [Access point compatibility with S3 operations and AWS services](using-access-points.md#access-points-service-api-support)\. 8 | 9 | This section explains how to work with Amazon S3 access points\. For information about working with buckets, see [Working with Amazon S3 Buckets](UsingBucket.md)\. For information about working with objects, see [Working with Amazon S3 objects](UsingObjects.md)\. 10 | 11 | **Topics** 12 | + [Creating access points](creating-access-points.md) 13 | + [Using access points](using-access-points.md) 14 | + [Access points restrictions and limitations](access-points-restrictions-limitations.md) -------------------------------------------------------------------------------- /doc_source/S3Torrent.md: -------------------------------------------------------------------------------- 1 | # Using BitTorrent with Amazon S3 2 | 3 | **Topics** 4 | + [How you are charged for BitTorrent delivery](S3TorrentCharge.md) 5 | + [Using BitTorrent to retrieve objects stored in Amazon S3](S3TorrentRetrieve.md) 6 | + [Publishing content using Amazon S3 and BitTorrent](S3TorrentPublish.md) 7 | 8 | BitTorrent is an open, peer\-to\-peer protocol for distributing files\. You can use the BitTorrent protocol to retrieve any publicly\-accessible object in Amazon S3\. This section describes why you might want to use BitTorrent to distribute your data out of Amazon S3 and how to do so\. 9 | 10 | Amazon S3 supports the BitTorrent protocol so that developers can save costs when distributing content at high scale\. Amazon S3 is useful for simple, reliable storage of any data\. The default distribution mechanism for Amazon S3 data is via client/server download\. In client/server distribution, the entire object is transferred point\-to\-point from Amazon S3 to every authorized user who requests that object\. While client/server delivery is appropriate for a wide variety of use cases, it is not optimal for everybody\. Specifically, the costs of client/server distribution increase linearly as the number of users downloading objects increases\. This can make it expensive to distribute popular objects\. 11 | 12 | BitTorrent addresses this problem by recruiting the very clients that are downloading the object as distributors themselves: Each client downloads some pieces of the object from Amazon S3 and some from other clients, while simultaneously uploading pieces of the same object to other interested "peers\." The benefit for publishers is that for large, popular files the amount of data actually supplied by Amazon S3 can be substantially lower than what it would have been serving the same clients via client/server download\. Less data transferred means lower costs for the publisher of the object\. 13 | 14 | **Note** 15 | Amazon S3 does not support the BitTorrent protocol in AWS Regions launched after May 30, 2016\. 16 | You can only get a torrent file for objects that are less than 5 GBs in size\. -------------------------------------------------------------------------------- /doc_source/inter-network-traffic-privacy.md: -------------------------------------------------------------------------------- 1 | # Internetwork traffic privacy 2 | 3 | This topic describes how Amazon S3 secures connections from the service to other locations\. 4 | 5 | ## Traffic between service and on\-premises clients and applications 6 | 7 | You have two connectivity options between your private network and AWS: 8 | + An AWS Site\-to\-Site VPN connection\. For more information, see [What is AWS Site\-to\-Site VPN?](https://docs.aws.amazon.com/vpn/latest/s2svpn/VPC_VPN.html) 9 | + An AWS Direct Connect connection\. For more information, see [What is AWS Direct Connect?](https://docs.aws.amazon.com/directconnect/latest/UserGuide/Welcome.html) 10 | 11 | Access to Amazon S3 via the network is through AWS published APIs\. Clients must support Transport Layer Security \(TLS\) 1\.0\. We recommend TLS 1\.2 or above\. Clients must also support cipher suites with Perfect Forward Secrecy \(PFS\), such as Ephemeral Diffie\-Hellman \(DHE\) or Elliptic Curve Diffie\-Hellman Ephemeral \(ECDHE\)\. Most modern systems such as Java 7 and later support these modes\. Additionally, you must sign requests using an access key ID and a secret access key that are associated with an IAM principal, or you can use the [AWS Security Token Service \(STS\)](https://docs.aws.amazon.com/STS/latest/APIReference/Welcome.html) to generate temporary security credentials to sign requests\. 12 | 13 | ## Traffic between AWS resources in the same Region 14 | 15 | An virtual private cloud \(VPC\) \(VPC\) endpoint for Amazon S3 is a logical entity within a VPC that allows connectivity only to Amazon S3\. The VPC routes requests to Amazon S3 and routes responses back to the VPC\. For more information, see [VPC Endpoints](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-endpoints.html) in the *VPC User Guide*\. For example bucket policies that you can use to control S3 bucket access from VPC endpoints, see [Example Bucket Policies for VPC Endpoints for Amazon S3](example-bucket-policies-vpc-endpoint.md)\. -------------------------------------------------------------------------------- /doc_source/CopyingObjectUsingJava.md: -------------------------------------------------------------------------------- 1 | # Copy an Object Using the AWS SDK for Java 2 | 3 | **Example** 4 | The following example shows how to copy an object in Amazon S3 using the AWS SDK for Java\. For instructions on creating and testing a working sample, see [Testing the Amazon S3 Java Code Examples](UsingTheMPJavaAPI.md#TestingJavaSamples)\. 5 | 6 | ``` 7 | import com.amazonaws.AmazonServiceException; 8 | import com.amazonaws.SdkClientException; 9 | import com.amazonaws.auth.profile.ProfileCredentialsProvider; 10 | import com.amazonaws.regions.Regions; 11 | import com.amazonaws.services.s3.AmazonS3; 12 | import com.amazonaws.services.s3.AmazonS3ClientBuilder; 13 | import com.amazonaws.services.s3.model.CopyObjectRequest; 14 | 15 | import java.io.IOException; 16 | 17 | public class CopyObjectSingleOperation { 18 | 19 | public static void main(String[] args) throws IOException { 20 | Regions clientRegion = Regions.DEFAULT_REGION; 21 | String bucketName = "*** Bucket name ***"; 22 | String sourceKey = "*** Source object key *** "; 23 | String destinationKey = "*** Destination object key ***"; 24 | 25 | try { 26 | AmazonS3 s3Client = AmazonS3ClientBuilder.standard() 27 | .withCredentials(new ProfileCredentialsProvider()) 28 | .withRegion(clientRegion) 29 | .build(); 30 | 31 | // Copy the object into a new object in the same bucket. 32 | CopyObjectRequest copyObjRequest = new CopyObjectRequest(bucketName, sourceKey, bucketName, destinationKey); 33 | s3Client.copyObject(copyObjRequest); 34 | } catch (AmazonServiceException e) { 35 | // The call was transmitted successfully, but Amazon S3 couldn't process 36 | // it, so it returned an error response. 37 | e.printStackTrace(); 38 | } catch (SdkClientException e) { 39 | // Amazon S3 couldn't be contacted for a response, or the client 40 | // couldn't parse the response from Amazon S3. 41 | e.printStackTrace(); 42 | } 43 | } 44 | } 45 | ``` -------------------------------------------------------------------------------- /doc_source/access-points-restrictions-limitations.md: -------------------------------------------------------------------------------- 1 | # Access points restrictions and limitations 2 | 3 | Amazon S3 access points have the following restrictions and limitations: 4 | + You can only create access points for buckets that you own\. 5 | + Each access point is associated with exactly one bucket, which you must specify when you create the access point\. After you create an access point, you can't associate it with a different bucket\. However, you can delete an access point and then create another one with the same name associated with a different bucket\. 6 | + After you create an access point, you can't change its virtual private cloud \(VPC\) configuration\. 7 | + Access point policies are limited to 20 KB in size\. 8 | + You can create a maximum of 1,000 access points per AWS account per Region\. If you need more than 1,000 access points for a single account in a single Region, you can request a service quota increase\. For more information about service quotas and requesting an increase, see [AWS Service Quotas](https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html) in the *AWS General Reference*\. 9 | + You can't use an access point as a destination for S3 Cross\-Region Replication\. For more information about replication, see [Replication](replication.md)\. 10 | + You can only address access points using virtual\-host\-style URLs\. For more information about virtual\-host\-style addressing, see [Accessing a Bucket](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro)\. 11 | + APIs that control access point functionality \(for example, `PutAccessPoint` and `GetAccessPointPolicy`\) don't support cross\-account calls\. 12 | + You must use AWS Signature Version 4 when making requests to an access point using the REST APIs\. For more information about authenticating requests, see [Authenticating Requests \(AWS Signature Version 4\)](https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html) in the *Amazon Simple Storage Service API Reference*\. 13 | + Access points only support access over HTTPS\. 14 | + Access points don't support anonymous access\. -------------------------------------------------------------------------------- /doc_source/LoggingWebsiteTraffic.md: -------------------------------------------------------------------------------- 1 | # \(Optional\) Logging web traffic 2 | 3 | You can optionally enable Amazon S3 server access logging for a bucket that is configured as a static website\. Server access logging provides detailed records for the requests that are made to your bucket\. For more information, see [Amazon S3 server access logging](ServerLogs.md)\. If you plan to use Amazon CloudFront to [speed up your website](website-hosting-cloudfront-walkthrough.md), you can also use CloudFront logging\. For more information, see [Configuring and Using Access Logs](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/AccessLogs.html) in the *Amazon CloudFront Developer Guide*\. 4 | 5 | **To enable server access logging for your static website bucket** 6 | 7 | 1. Open the Amazon S3 console at [https://console\.aws\.amazon\.com/s3/](https://console.aws.amazon.com/s3/)\. 8 | 9 | 1. In the same Region where you created the bucket that is configured as a static website, create a bucket for logging, for example `logs.example.com`\. 10 | 11 | 1. Create a folder for the server access logging log files \(for example, `logs`\)\. 12 | 13 | When you group your log data files in a folder, they are easier to locate\. 14 | 15 | 1. \(Optional\) If you want to use CloudFront to improve your website performance, create a folder for the CloudFront log files \(for example, `cdn`\)\. 16 | 17 | 1. In the **Buckets** list, choose your bucket\. 18 | 19 | 1. Choose **Properties**\. 20 | 21 | 1. Under **Server access logging**, choose **Edit**\. 22 | 23 | 1. Choose **Enable**\. 24 | 25 | 1. Under the **Target bucket**, choose the bucket and folder destination for the server access logs: 26 | + Browse to the folder and bucket location: 27 | 28 | 1. Choose **Browse S3**\. 29 | 30 | 1. Choose the bucket name, and then choose the logs folder\. 31 | 32 | 1. Choose **Choose path**\. 33 | + Enter the S3 bucket path, for example, **s3://logs\.example\.com/logs/**\. 34 | 35 | 1. Choose **Save changes**\. 36 | 37 | In your log bucket, you can now access your logs\. Amazon S3 writes website access logs to your log bucket every 2 hours\. -------------------------------------------------------------------------------- /doc_source/RequesterPaysBucketConfiguration.md: -------------------------------------------------------------------------------- 1 | # Setting the requestPayment Bucket Configuration 2 | 3 | Only the bucket owner can set the `RequestPaymentConfiguration.payer` configuration value of a bucket to `BucketOwner`, the default, or `Requester`\. Setting the `requestPayment` resource is optional\. By default, the bucket is not a Requester Pays bucket\. 4 | 5 | To revert a Requester Pays bucket to a regular bucket, you use the value `BucketOwner`\. Typically, you would use `BucketOwner` when uploading data to the Amazon S3 bucket, and then you would set the value to `Requester` before publishing the objects in the bucket\. 6 | 7 | **To set requestPayment** 8 | + Use a `PUT` request to set the `Payer` value to `Requester` on a specified bucket\. 9 | 10 | ``` 11 | 1. PUT ?requestPayment HTTP/1.1 12 | 2. Host: [BucketName].s3.amazonaws.com 13 | 3. Content-Length: 173 14 | 4. Date: Wed, 01 Mar 2009 12:00:00 GMT 15 | 5. Authorization: AWS [Signature] 16 | 6. 17 | 7. 18 | 8. Requester 19 | 9. 20 | ``` 21 | 22 | If the request succeeds, Amazon S3 returns a response similar to the following\. 23 | 24 | ``` 25 | 1. HTTP/1.1 200 OK 26 | 2. x-amz-id-2: [id] 27 | 3. x-amz-request-id: [request_id] 28 | 4. Date: Wed, 01 Mar 2009 12:00:00 GMT 29 | 5. Content-Length: 0 30 | 6. Connection: close 31 | 7. Server: AmazonS3 32 | 8. x-amz-request-charged:requester 33 | ``` 34 | 35 | You can set Requester Pays only at the bucket level; you cannot set Requester Pays for specific objects within the bucket\. 36 | 37 | You can configure a bucket to be `BucketOwner` or `Requester` at any time\. Realize, however, that there might be a small delay, on the order of minutes, before the new configuration value takes effect\. 38 | 39 | **Note** 40 | Bucket owners who give out presigned URLs should think twice before configuring a bucket to be Requester Pays, especially if the URL has a very long lifetime\. The bucket owner is charged each time the requester uses a presigned URL that uses the bucket owner's credentials\. -------------------------------------------------------------------------------- /code_examples/java_examples/S3Examples/HighLevelAbortMultipartUpload.java: -------------------------------------------------------------------------------- 1 | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: MIT-0 (For details, see https://github.com/awsdocs/amazon-s3-developer-guide/blob/master/LICENSE-SAMPLECODE.) 3 | 4 | import java.util.Date; 5 | 6 | import com.amazonaws.AmazonServiceException; 7 | import com.amazonaws.SdkClientException; 8 | import com.amazonaws.auth.profile.ProfileCredentialsProvider; 9 | import com.amazonaws.services.s3.AmazonS3; 10 | import com.amazonaws.services.s3.AmazonS3ClientBuilder; 11 | import com.amazonaws.services.s3.transfer.TransferManager; 12 | import com.amazonaws.services.s3.transfer.TransferManagerBuilder; 13 | 14 | public class HighLevelAbortMultipartUpload { 15 | 16 | public static void main(String[] args) { 17 | String clientRegion = "*** Client region ***"; 18 | String bucketName = "*** Bucket name ***"; 19 | 20 | try { 21 | AmazonS3 s3Client = AmazonS3ClientBuilder.standard() 22 | .withRegion(clientRegion) 23 | .withCredentials(new ProfileCredentialsProvider()) 24 | .build(); 25 | TransferManager tm = TransferManagerBuilder.standard() 26 | .withS3Client(s3Client) 27 | .build(); 28 | 29 | // sevenDays is the duration of seven days in milliseconds. 30 | long sevenDays = 1000 * 60 * 60 * 24 * 7; 31 | Date oneWeekAgo = new Date(System.currentTimeMillis() - sevenDays); 32 | tm.abortMultipartUploads(bucketName, oneWeekAgo); 33 | } 34 | catch(AmazonServiceException e) { 35 | // The call was transmitted successfully, but Amazon S3 couldn't process 36 | // it, so it returned an error response. 37 | e.printStackTrace(); 38 | } 39 | catch(SdkClientException e) { 40 | // Amazon S3 couldn't be contacted for a response, or the client couldn't 41 | // parse the response from Amazon S3. 42 | e.printStackTrace(); 43 | } 44 | } 45 | } -------------------------------------------------------------------------------- /doc_source/ConfigWebSitePHP.md: -------------------------------------------------------------------------------- 1 | # Managing websites with the AWS SDK for PHP 2 | 3 | This topic explains how to use classes from the AWS SDK for PHP to configure and manage an Amazon S3 bucket for website hosting\. It assumes that you are already following the instructions for [Using the AWS SDK for PHP and Running PHP Examples](UsingTheMPphpAPI.md) and have the AWS SDK for PHP properly installed\. For more information about the Amazon S3 website feature, see [Hosting a static website using Amazon S3](WebsiteHosting.md)\. 4 | 5 | 6 | 7 | 8 | 9 | The following PHP example adds a website configuration to the specified bucket\. The `create_website_config` method explicitly provides the index document and error document names\. The example also retrieves the website configuration and prints the response\. For more information about the Amazon S3 website feature, see [Hosting a static website using Amazon S3](WebsiteHosting.md)\. 10 | 11 | For instructions on creating and testing a working sample, see [Using the AWS SDK for PHP and Running PHP Examples](UsingTheMPphpAPI.md)\. 12 | 13 | ``` 14 | require 'vendor/autoload.php'; 15 | 16 | use Aws\S3\S3Client; 17 | 18 | $bucket = '*** Your Bucket Name ***'; 19 | 20 | $s3 = new S3Client([ 21 | 'version' => 'latest', 22 | 'region' => 'us-east-1' 23 | ]); 24 | 25 | 26 | // Add the website configuration. 27 | $s3->putBucketWebsite([ 28 | 'Bucket' => $bucket, 29 | 'WebsiteConfiguration' => [ 30 | 'IndexDocument' => ['Suffix' => 'index.html'], 31 | 'ErrorDocument' => ['Key' => 'error.html'] 32 | ] 33 | ]); 34 | 35 | // Retrieve the website configuration. 36 | $result = $s3->getBucketWebsite([ 37 | 'Bucket' => $bucket 38 | ]); 39 | echo $result->getPath('IndexDocument/Suffix'); 40 | 41 | // Delete the website configuration. 42 | $s3->deleteBucketWebsite([ 43 | 'Bucket' => $bucket 44 | ]); 45 | ``` 46 | 47 | ## Related resources 48 | + [ AWS SDK for PHP for Amazon S3 Aws\\S3\\S3Client Class](https://docs.aws.amazon.com/aws-sdk-php/v3/api/class-Aws.S3.S3Client.html) 49 | + [AWS SDK for PHP Documentation](http://aws.amazon.com/documentation/sdk-for-php/) -------------------------------------------------------------------------------- /doc_source/uploadobjusingmpu.md: -------------------------------------------------------------------------------- 1 | # Uploading objects using multipart upload API 2 | 3 | Multipart upload allows you to upload a single object as a set of parts\. Each part is a contiguous portion of the object's data\. You can upload these object parts independently and in any order\. If transmission of any part fails, you can retransmit that part without affecting other parts\. After all parts of your object are uploaded, Amazon S3 assembles these parts and creates the object\. In general, when your object size reaches 100 MB, you should consider using multipart uploads instead of uploading the object in a single operation\. 4 | 5 | Using multipart upload provides the following advantages: 6 | + Improved throughput \- You can upload parts in parallel to improve throughput\. 7 | + Quick recovery from any network issues \- Smaller part size minimizes the impact of restarting a failed upload due to a network error\. 8 | + Pause and resume object uploads \- You can upload object parts over time\. Once you initiate a multipart upload there is no expiry; you must explicitly complete or stop the multipart upload\. 9 | + Begin an upload before you know the final object size \- You can upload an object as you are creating it\. 10 | 11 | **Topics** 12 | + [Multipart upload overview](mpuoverview.md) 13 | + [Using the AWS Java SDK for multipart upload \(high\-level API\)](usingHLmpuJava.md) 14 | + [Using the AWS Java SDK for a multipart upload \(low\-level API\)](mpListPartsJavaAPI.md) 15 | + [Using the AWS SDK for \.NET for multipart upload \(high\-level API\)](usingHLmpuDotNet.md) 16 | + [Using the AWS SDK for \.NET for multipart upload \(low\-level API\)](usingLLmpuDotNet.md) 17 | + [Using the AWS PHP SDK for multipart upload](usingHLmpuPHP.md) 18 | + [Using the AWS PHP SDK for multipart upload \(low\-level API\)](usingLLmpuPHP.md) 19 | + [Using the AWS SDK for Ruby for Multipart Upload](uploadobjusingmpu-ruby-sdk.md) 20 | + [Using the REST API for multipart upload](UsingRESTAPImpUpload.md) 21 | + [Using the AWS Command Line Interface for multipart upload](UsingCLImpUpload.md) 22 | + [ Using the AWS SDK for JavaScript for Multipart Upload](https://docs.aws.amazon.com/AWSJavaScriptSDK/latest/AWS/S3.html#createMultipartUpload-property) -------------------------------------------------------------------------------- /doc_source/GettingObjectsUsingAPIs.md: -------------------------------------------------------------------------------- 1 | # Getting objects 2 | 3 | **Topics** 4 | + [Related resources](#RelatedResources013) 5 | + [Get an object Using the AWS SDK for Java](RetrievingObjectUsingJava.md) 6 | + [Get an object Using the AWS SDK for \.NET](RetrievingObjectUsingNetSDK.md) 7 | + [Get an object Using the AWS SDK for PHP](RetrieveObjSingleOpPHP.md) 8 | + [Get an object Using the REST API](RetrieveObjSingleOpREST.md) 9 | + [Share an object with others](ShareObjectPreSignedURL.md) 10 | 11 | You can retrieve objects directly from Amazon S3\. You have the following options when retrieving an object: 12 | + **Retrieve an entire object—**A single GET operation can return you the entire object stored in Amazon S3\. 13 | + **Retrieve object in parts—**Using the `Range` HTTP header in a GET request, you can retrieve a specific range of bytes in an object stored in Amazon S3\. 14 | 15 | You resume fetching other parts of the object whenever your application is ready\. This resumable download is useful when you need only portions of your object data\. It is also useful where network connectivity is poor and you need to react to failures\. 16 | **Note** 17 | Amazon S3 doesn't support retrieving multiple ranges of data per GET request\. 18 | 19 | When you retrieve an object, its metadata is returned in the response headers\. There are times when you want to override certain response header values returned in a GET response\. For example, you might override the `Content-Disposition` response header value in your GET request\. The REST GET Object API \(see [GET Object](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html)\) allows you to specify query string parameters in your GET request to override these values\. 20 | 21 | The AWS SDKs for Java, \.NET, and PHP also provide necessary objects you can use to specify values for these response headers in your GET request\. 22 | 23 | When retrieving objects that are stored encrypted using server\-side encryption, you must provide appropriate request headers\. For more information, see [Protecting data using encryption](UsingEncryption.md)\. 24 | 25 | ## Related resources 26 | + [Using the AWS SDKs, CLI, and Explorers](UsingAWSSDK.md) -------------------------------------------------------------------------------- /doc_source/RelatedResources012.md: -------------------------------------------------------------------------------- 1 | # Amazon S3 resources 2 | 3 | Following is a table that lists related resources that you'll find useful as you work with this service\. 4 | 5 | 6 | | Resource | Description | 7 | | --- | --- | 8 | | [ Amazon Simple Storage Service Getting Started Guide](https://docs.aws.amazon.com/AmazonS3/latest/gsg/) | The Getting Started Guide provides a quick tutorial of the service based on a simple use case\. | 9 | | [ Amazon Simple Storage Service API Reference](https://docs.aws.amazon.com/AmazonS3/latest/API/) | The API Reference describes Amazon S3 operations in detail\. | 10 | | [Amazon S3Technical FAQ](https://aws.amazon.com/s3/faqs/) | The FAQ covers the top questions developers have asked about this product\. | 11 | | [AWS Developer Resource Center](https://aws.amazon.com/resources/) | A central starting point to find documentation, code samples, release notes, and other information to help you build innovative applications with AWS\. | 12 | | [AWS Management Console](https://aws.amazon.com/console/) | The console allows you to perform most of the functions of Amazon S3without programming\. | 13 | | [https://forums\.aws\.amazon\.com/](https://forums.aws.amazon.com/) | A community\-based forum for developers to discuss technical questions related to AWS\. | 14 | | [AWS Support Center](https://aws.amazon.com/support) | The home page for AWS Technical Support, including access to our Developer Forums, Technical FAQs, Service Status page, and Premium Support\. | 15 | | [AWS Premium Support](https://aws.amazon.com/premiumsupport/) | The primary web page for information about AWS Premium Support, a one\-on\-one, fast\-response support channel to help you build and run applications on AWS Infrastructure Services\. | 16 | | [Amazon S3 product information](https://aws.amazon.com/s3/) | The primary web page for information about Amazon S3\. | 17 | | [Contact Us](https://aws.amazon.com/contact-us/) | A central contact point for inquiries concerning AWS billing, account, events, abuse, etc\. | 18 | | [Conditions of Use](https://aws.amazon.com/legal) | Detailed information about the copyright and trademark usage at Amazon\.com and other topics\. | -------------------------------------------------------------------------------- /doc_source/security.md: -------------------------------------------------------------------------------- 1 | # Amazon S3 Security 2 | 3 | Cloud security at AWS is the highest priority\. As an AWS customer, you benefit from a data center and network architecture that are built to meet the requirements of the most security\-sensitive organizations\. 4 | 5 | Security is a shared responsibility between AWS and you\. The [shared responsibility model](https://aws.amazon.com/compliance/shared-responsibility-model/) describes this as security *of* the cloud and security *in* the cloud: 6 | + **Security of the cloud** – AWS is responsible for protecting the infrastructure that runs AWS services in the AWS Cloud\. AWS also provides you with services that you can use securely\. The effectiveness of our security is regularly tested and verified by third\-party auditors as part of the [AWS compliance programs](https://aws.amazon.com/compliance/programs/)\. To learn about the compliance programs that apply to Amazon S3, see [AWS Services in Scope by Compliance Program](https://aws.amazon.com/compliance/services-in-scope/)\. 7 | + **Security in the cloud** – Your responsibility is determined by the AWS service that you use\. You are also responsible for other factors including the sensitivity of your data, your organization’s requirements, and applicable laws and regulations\. 8 | 9 | This documentation will help you understand how to apply the shared responsibility model when using Amazon S3\. The following topics show you how to configure Amazon S3 to meet your security and compliance objectives\. You'll also learn how to use other AWS services that can help you monitor and secure your Amazon S3 resources\. 10 | 11 | **Topics** 12 | + [Data protection in Amazon S3](DataDurability.md) 13 | + [Identity and access management in Amazon S3](s3-access-control.md) 14 | + [Logging and monitoring in Amazon S3](s3-incident-response.md) 15 | + [Compliance Validation for Amazon S3](s3-compliance.md) 16 | + [Resilience in Amazon S3](disaster-recovery-resiliency.md) 17 | + [Infrastructure security in Amazon S3](network-isolation.md) 18 | + [Configuration and vulnerability analysis in Amazon S3](vulnerability-analysis-and-management.md) 19 | + [Bucket owner condition](bucket-owner-condition.md) 20 | + [Security Best Practices for Amazon S3](security-best-practices.md) -------------------------------------------------------------------------------- /code_examples/dotnet_examples/S3Examples/ManageingBucketACLTest.cs: -------------------------------------------------------------------------------- 1 | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: MIT-0 (For details, see https://github.com/awsdocs/amazon-s3-developer-guide/blob/master/LICENSE-SAMPLECODE.) 3 | 4 | using Amazon.S3; 5 | using Amazon.S3.Model; 6 | using System; 7 | using System.Threading.Tasks; 8 | 9 | namespace Amazon.DocSamples.S3 10 | { 11 | class ManagingBucketACLTest 12 | { 13 | private const string newBucketName = "*** bucket name ***"; 14 | // Specify your bucket region (an example region is shown). 15 | private static readonly RegionEndpoint bucketRegion = RegionEndpoint.USWest2; 16 | private static IAmazonS3 client; 17 | 18 | public static void Main() 19 | { 20 | client = new AmazonS3Client(bucketRegion); 21 | CreateBucketUseCannedACLAsync().Wait(); 22 | } 23 | 24 | private static async Task CreateBucketUseCannedACLAsync() 25 | { 26 | try 27 | { 28 | // Add bucket (specify canned ACL). 29 | PutBucketRequest putBucketRequest = new PutBucketRequest() 30 | { 31 | BucketName = newBucketName, 32 | BucketRegion = S3Region.EUW1, // S3Region.US, 33 | // Add canned ACL. 34 | CannedACL = S3CannedACL.LogDeliveryWrite 35 | }; 36 | PutBucketResponse putBucketResponse = await client.PutBucketAsync(putBucketRequest); 37 | 38 | // Retrieve bucket ACL. 39 | GetACLResponse getACLResponse = await client.GetACLAsync(new GetACLRequest 40 | { 41 | BucketName = newBucketName 42 | }); 43 | } 44 | catch (AmazonS3Exception amazonS3Exception) 45 | { 46 | Console.WriteLine("S3 error occurred. Exception: " + amazonS3Exception.ToString()); 47 | } 48 | catch (Exception e) 49 | { 50 | Console.WriteLine("Exception: " + e.ToString()); 51 | } 52 | } 53 | } 54 | } -------------------------------------------------------------------------------- /doc_source/batch-ops-put-object-tagging.md: -------------------------------------------------------------------------------- 1 | # Put object tagging 2 | 3 | The Put Object Tagging operation replaces the Amazon S3 object tags of each object listed in the manifest\. An Amazon S3 object tag is a key\-value pair of strings that you can use to store metadata about an object\. 4 | 5 | To create a Put Object Tagging job, you provide a set of tags that you want to apply\. S3 Batch Operations apply the same set of tags to each object\. The tag set that you provide replaces whatever tag sets are already associated with the objects in the manifest\. S3 Batch Operations do not support adding tags to objects while leaving the existing tags in place\. 6 | 7 | If the objects in your manifest are in a versioned bucket, you can apply the tag set to specific versions of each object\. You do this by specifying a version ID for each object in the manifest\. If you don't include a version ID for any object, then S3 Batch Operations will apply the tag set to the latest version of each object\. 8 | 9 | ## Restrictions and limitations 10 | + The role that you specify to run the Put Object Tagging job must have permissions to perform the underlying Amazon S3 PUT Object tagging operation\. For more information about the permissions required, see [PUT Object tagging](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUTtagging.html) in the *Amazon Simple Storage Service API Reference*\. 11 | + S3 Batch Operations use the Amazon S3 PUT Object tagging operation to apply tags to each object in the manifest\. Therefore, all restrictions and limitations that apply to the underlying PUT Object tagging operation also apply to S3 Batch Operations Put Object Tagging jobs\. For more information, see the [Related resources](#batch-ops-put-object-tagging-related-resources) section of this page\. 12 | 13 | ## Related resources 14 | + [Object tagging](object-tagging.md) 15 | + [GET Object tagging](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGETtagging.html) in the *Amazon Simple Storage Service API Reference* 16 | + [PUT Object tagging](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUTtagging.html) in the *Amazon Simple Storage Service API Reference* -------------------------------------------------------------------------------- /doc_source/s3-glacier-select-sql-reference-data-types.md: -------------------------------------------------------------------------------- 1 | # Data Types 2 | 3 | Amazon S3 Select and S3 Glacier Select support several primitive data types\. 4 | 5 | ## Data Type Conversions 6 | 7 | The general rule is to follow the `CAST` function if defined\. If `CAST` is not defined, then all input data is treated as a string\. It must be cast into the relevant data types when necessary\. 8 | 9 | For more information about the `CAST` function, see [CAST](s3-glacier-select-sql-reference-conversion.md#s3-glacier-select-sql-reference-cast)\. 10 | 11 | ## Supported Data Types 12 | 13 | Amazon S3 Select and S3 Glacier Select support the following set of primitive data types\. 14 | 15 | 16 | | Name | Description | Examples | 17 | | --- | --- | --- | 18 | | bool | TRUE or FALSE | FALSE | 19 | | int, integer | 8\-byte signed integer in the range \-9,223,372,036,854,775,808 to 9,223,372,036,854,775,807\. | 100000 | 20 | | string | UTF8\-encoded variable\-length string\. The default limit is one character\. The maximum character limit is 2,147,483,647\. | 'xyz' | 21 | | float | 8\-byte floating point number\. | CAST\(0\.456 AS FLOAT\) | 22 | | decimal, numeric | Base\-10 number, with maximum precision of 38 \(that is, the maximum number of significant digits\), and with scale within the range of \-231 to 231\-1 \(that is, the base\-10 exponent\)\. Amazon S3 Select ignores scale and precision when you provide both at the same time\. | 123\.456 | 23 | | timestamp | Time stamps represent a specific moment in time, always include a local offset, and are capable of arbitrary precision\. In the text format, time stamps follow the [W3C note on date and time formats](https://www.w3.org/TR/NOTE-datetime), but they must end with the literal "T" if not at least whole\-day precision\. Fractional seconds are allowed, with at least one digit of precision, and an unlimited maximum\. Local\-time offsets can be represented as either hour:minute offsets from UTC, or as the literal "Z" to denote a local time of UTC\. They are required on time stamps with time and are not allowed on date values\. | CAST\('2007\-04\-05T14:30Z' AS TIMESTAMP\) | -------------------------------------------------------------------------------- /doc_source/S3TorrentRetrieve.md: -------------------------------------------------------------------------------- 1 | # Using BitTorrent to retrieve objects stored in Amazon S3 2 | 3 | Any object in Amazon S3 that can be read anonymously can also be downloaded via BitTorrent\. Doing so requires use of a BitTorrent client application\. Amazon does not distribute a BitTorrent client application, but there are many free clients available\. The Amazon S3BitTorrent implementation has been tested to work with the official BitTorrent client \(go to [http://www\.bittorrent\.com/](http://www.bittorrent.com/)\)\. 4 | 5 | The starting point for a BitTorrent download is a \.torrent file\. This small file describes for BitTorrent clients both the data to be downloaded and where to get started finding that data\. A \.torrent file is a small fraction of the size of the actual object to be downloaded\. Once you feed your BitTorrent client application an Amazon S3 generated \.torrent file, it should start downloading immediately from Amazon S3 and from any "peer" BitTorrent clients\. 6 | 7 | Retrieving a \.torrent file for any publicly available object is easy\. Simply add a "?torrent" query string parameter at the end of the REST GET request for the object\. No authentication is required\. Once you have a BitTorrent client installed, downloading an object using BitTorrent download might be as easy as opening this URL in your web browser\. 8 | 9 | There is no mechanism to fetch the \.torrent for an Amazon S3 object using the SOAP API\. 10 | 11 | **Note** 12 | SOAP support over HTTP is deprecated, but it is still available over HTTPS\. New Amazon S3 features will not be supported for SOAP\. We recommend that you use either the REST API or the AWS SDKs\. 13 | 14 | **Example** 15 | This example retrieves the Torrent file for the "Nelson" object in the "quotes" bucket\. 16 | `Sample Request` 17 | 18 | ``` 19 | 1. GET /quotes/Nelson?torrent HTTP/1.0 20 | 2. Date: Wed, 25 Nov 2009 12:00:00 GMT 21 | ``` 22 | `Sample Response` 23 | 24 | ``` 25 | 1. HTTP/1.1 200 OK 26 | 2. x-amz-request-id: 7CD745EBB7AB5ED9 27 | 3. Date: Wed, 25 Nov 2009 12:00:00 GMT 28 | 4. Content-Disposition: attachment; filename=Nelson.torrent; 29 | 5. Content-Type: application/x-bittorrent 30 | 6. Content-Length: 537 31 | 7. Server: AmazonS3 32 | 8. 33 | 9. 34 | ``` -------------------------------------------------------------------------------- /doc_source/HLAbortMPUploadsJava.md: -------------------------------------------------------------------------------- 1 | # Stop multipart uploads 2 | 3 | **Example** 4 | The following example uses the high\-level Java API \(the `TransferManager` class\) to stop all in\-progress multipart uploads that were initiated on a specific bucket over a week ago\. For instructions on creating and testing a working sample, see [Testing the Amazon S3 Java Code Examples](UsingTheMPJavaAPI.md#TestingJavaSamples)\. 5 | 6 | ``` 7 | import com.amazonaws.AmazonServiceException; 8 | import com.amazonaws.SdkClientException; 9 | import com.amazonaws.auth.profile.ProfileCredentialsProvider; 10 | import com.amazonaws.regions.Regions; 11 | import com.amazonaws.services.s3.AmazonS3; 12 | import com.amazonaws.services.s3.AmazonS3ClientBuilder; 13 | import com.amazonaws.services.s3.transfer.TransferManager; 14 | import com.amazonaws.services.s3.transfer.TransferManagerBuilder; 15 | 16 | import java.util.Date; 17 | 18 | public class HighLevelAbortMultipartUpload { 19 | 20 | public static void main(String[] args) { 21 | Regions clientRegion = Regions.DEFAULT_REGION; 22 | String bucketName = "*** Bucket name ***"; 23 | 24 | try { 25 | AmazonS3 s3Client = AmazonS3ClientBuilder.standard() 26 | .withRegion(clientRegion) 27 | .withCredentials(new ProfileCredentialsProvider()) 28 | .build(); 29 | TransferManager tm = TransferManagerBuilder.standard() 30 | .withS3Client(s3Client) 31 | .build(); 32 | 33 | // sevenDays is the duration of seven days in milliseconds. 34 | long sevenDays = 1000 * 60 * 60 * 24 * 7; 35 | Date oneWeekAgo = new Date(System.currentTimeMillis() - sevenDays); 36 | tm.abortMultipartUploads(bucketName, oneWeekAgo); 37 | } catch (AmazonServiceException e) { 38 | // The call was transmitted successfully, but Amazon S3 couldn't process 39 | // it, so it returned an error response. 40 | e.printStackTrace(); 41 | } catch (SdkClientException e) { 42 | // Amazon S3 couldn't be contacted for a response, or the client couldn't 43 | // parse the response from Amazon S3. 44 | e.printStackTrace(); 45 | } 46 | } 47 | } 48 | ``` -------------------------------------------------------------------------------- /code_examples/dotnet_examples/S3Examples/CopyObjectTest.cs: -------------------------------------------------------------------------------- 1 | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: MIT-0 (For details, see https://github.com/awsdocs/amazon-s3-developer-guide/blob/master/LICENSE-SAMPLECODE.) 3 | 4 | using Amazon.S3; 5 | using Amazon.S3.Model; 6 | using System; 7 | using System.Threading.Tasks; 8 | 9 | namespace Amazon.DocSamples.S3 10 | { 11 | class CopyObjectTest 12 | { 13 | private const string sourceBucket = "*** provide the name of the bucket with source object ***"; 14 | private const string destinationBucket = "*** provide the name of the bucket to copy the object to ***"; 15 | private const string objectKey = "*** provide the name of object to copy ***"; 16 | private const string destObjectKey = "*** provide the destination object key name ***"; 17 | // Specify your bucket region (an example region is shown). 18 | private static readonly RegionEndpoint bucketRegion = RegionEndpoint.USWest2; 19 | private static IAmazonS3 s3Client; 20 | 21 | public static void Main() 22 | { 23 | s3Client = new AmazonS3Client(bucketRegion); 24 | Console.WriteLine("Copying an object"); 25 | CopyingObjectAsync().Wait(); 26 | } 27 | 28 | private static async Task CopyingObjectAsync() 29 | { 30 | try 31 | { 32 | CopyObjectRequest request = new CopyObjectRequest 33 | { 34 | SourceBucket = sourceBucket, 35 | SourceKey = objectKey, 36 | DestinationBucket = destinationBucket, 37 | DestinationKey = destObjectKey 38 | }; 39 | CopyObjectResponse response = await s3Client.CopyObjectAsync(request); 40 | } 41 | catch (AmazonS3Exception e) 42 | { 43 | Console.WriteLine("Error encountered on server. Message:'{0}' when writing an object", e.Message); 44 | } 45 | catch (Exception e) 46 | { 47 | Console.WriteLine("Unknown encountered on server. Message:'{0}' when writing an object", e.Message); 48 | } 49 | } 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /code_examples/dotnet_examples/S3Examples/TransferAccelerationTest.cs: -------------------------------------------------------------------------------- 1 | // Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: MIT-0 (For details, see https://github.com/awsdocs/amazon-s3-developer-guide/blob/master/LICENSE-SAMPLECODE.) 3 | 4 | using Amazon.S3; 5 | using Amazon.S3.Model; 6 | using System; 7 | using System.Threading.Tasks; 8 | 9 | namespace Amazon.DocSamples.S3 10 | { 11 | class TransferAccelerationTest 12 | { 13 | private const string bucketName = "*** bucket name ***"; 14 | // Specify your bucket region (an example region is shown). 15 | private static readonly RegionEndpoint bucketRegion = RegionEndpoint.USWest2; 16 | private static IAmazonS3 s3Client; 17 | public static void Main() 18 | { 19 | s3Client = new AmazonS3Client(bucketRegion); 20 | EnableAccelerationAsync().Wait(); 21 | } 22 | 23 | static async Task EnableAccelerationAsync() 24 | { 25 | try 26 | { 27 | var putRequest = new PutBucketAccelerateConfigurationRequest 28 | { 29 | BucketName = bucketName, 30 | AccelerateConfiguration = new AccelerateConfiguration 31 | { 32 | Status = BucketAccelerateStatus.Enabled 33 | } 34 | }; 35 | await s3Client.PutBucketAccelerateConfigurationAsync(putRequest); 36 | 37 | var getRequest = new GetBucketAccelerateConfigurationRequest 38 | { 39 | BucketName = bucketName 40 | }; 41 | var response = await s3Client.GetBucketAccelerateConfigurationAsync(getRequest); 42 | 43 | Console.WriteLine("Acceleration state = '{0}' ", response.Status); 44 | } 45 | catch (AmazonS3Exception amazonS3Exception) 46 | { 47 | Console.WriteLine( 48 | "Error occurred. Message:'{0}' when setting transfer acceleration", 49 | amazonS3Exception.Message); 50 | } 51 | } 52 | } 53 | } -------------------------------------------------------------------------------- /code_examples/php_examples/S3examples/s3-multipart-upload-using-lowlevel-php-sdk-api.php: -------------------------------------------------------------------------------- 1 | 'latest', 15 | 'region' => 'us-east-1' 16 | ]); 17 | 18 | $result = $s3->createMultipartUpload([ 19 | 'Bucket' => $bucket, 20 | 'Key' => $keyname, 21 | 'StorageClass' => 'REDUCED_REDUNDANCY', 22 | 'ACL' => 'public-read', 23 | 'Metadata' => [ 24 | 'param1' => 'value 1', 25 | 'param2' => 'value 2', 26 | 'param3' => 'value 3' 27 | ] 28 | ]); 29 | $uploadId = $result['UploadId']; 30 | 31 | // Upload the file in parts. 32 | try { 33 | $file = fopen($filename, 'r'); 34 | $partNumber = 1; 35 | while (!feof($file)) { 36 | $result = $s3->uploadPart([ 37 | 'Bucket' => $bucket, 38 | 'Key' => $keyname, 39 | 'UploadId' => $uploadId, 40 | 'PartNumber' => $partNumber, 41 | 'Body' => fread($file, 5 * 1024 * 1024), 42 | ]); 43 | $parts['Parts'][$partNumber] = [ 44 | 'PartNumber' => $partNumber, 45 | 'ETag' => $result['ETag'], 46 | ]; 47 | $partNumber++; 48 | 49 | echo "Uploading part {$partNumber} of {$filename}." . PHP_EOL; 50 | } 51 | fclose($file); 52 | } catch (S3Exception $e) { 53 | $result = $s3->abortMultipartUpload([ 54 | 'Bucket' => $bucket, 55 | 'Key' => $keyname, 56 | 'UploadId' => $uploadId 57 | ]); 58 | 59 | echo "Upload of {$filename} failed." . PHP_EOL; 60 | } 61 | 62 | // Complete the multipart upload. 63 | $result = $s3->completeMultipartUpload([ 64 | 'Bucket' => $bucket, 65 | 'Key' => $keyname, 66 | 'UploadId' => $uploadId, 67 | 'MultipartUpload' => $parts, 68 | ]); 69 | $url = $result['Location']; 70 | 71 | echo "Uploaded {$filename} to {$url}." . PHP_EOL; 72 | -------------------------------------------------------------------------------- /doc_source/CopyingObjectUsingRuby.md: -------------------------------------------------------------------------------- 1 | # Copy an Object Using the AWS SDK for Ruby 2 | 3 | The following tasks guide you through using the Ruby classes to copy an object in Amazon S3, from one bucket to another or to copy an object within the same bucket\. 4 | 5 | 6 | **Copying Objects** 7 | 8 | | | | 9 | | --- |--- | 10 | | 1 | Use the Amazon S3 modularized gem for version 3 of the AWS SDK for Ruby, require 'aws\-sdk\-s3', and provide your AWS credentials\. For more information about how to provide your credentials, see [Making requests using AWS account or IAM user credentials](AuthUsingAcctOrUserCredentials.md)\. | 11 | | 2 | Provide the request information, such as source bucket name, source key name, destination bucket name, and destination key\. | 12 | 13 | The following Ruby code example demonstrates the preceding tasks using the `#copy_object` method to copy an object from one bucket to another\. 14 | 15 | **Example** 16 | 17 | ``` 18 | require 'aws-sdk-s3' 19 | 20 | # Copies an object from one Amazon S3 bucket to another. 21 | # 22 | # Prerequisites: 23 | # 24 | # - Two S3 buckets (a source bucket and a target bucket). 25 | # - An object in the source bucket to be copied. 26 | # 27 | # @param s3_client [Aws::S3::Client] An initialized Amazon S3 client. 28 | # @param source_bucket_name [String] The source bucket's name. 29 | # @param source_key [String] The name of the object 30 | # in the source bucket to be copied. 31 | # @param target_bucket_name [String] The target bucket's name. 32 | # @param target_key [String] The name of the copied object. 33 | # @return [Boolean] true if the object was copied; otherwise, false. 34 | # @example 35 | # s3_client = Aws::S3::Client.new(region: 'us-east-1') 36 | # exit 1 unless object_copied?( 37 | # s3_client, 38 | # 'doc-example-bucket1', 39 | # 'my-source-file.txt', 40 | # 'doc-example-bucket2', 41 | # 'my-target-file.txt' 42 | # ) 43 | def object_copied?( 44 | s3_client, 45 | source_bucket_name, 46 | source_key, 47 | target_bucket_name, 48 | target_key) 49 | 50 | return true if s3_client.copy_object( 51 | bucket: target_bucket_name, 52 | copy_source: source_bucket_name + '/' + source_key, 53 | key: target_key 54 | ) 55 | rescue StandardError => e 56 | puts "Error while copying object: #{e.message}" 57 | end 58 | ``` --------------------------------------------------------------------------------