├── .gitignore ├── README.md ├── index.php ├── page.htm └── s3upload.js /.gitignore: -------------------------------------------------------------------------------- 1 | composer.* 2 | keys.php 3 | vendor/* 4 | 5 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # AWS S3 Multipart Upload in Browser 2 | ## What is this? 3 | Uploading files from the browser directly to S3 is needed in many applications. Fine-grained authorization is handled by the server, and the browser only handles file upload. This library does that. 4 | 5 | ## What's so special? 6 | Unfortunately S3 does not allow uploading files larger than 5GB in one chunk, and all the examples in AWS docs either support one chunk, or support **multipart uploads** only on the server. 7 | 8 | As we don't want to proxy the upload traffic to a server (which negates the whole purpose of using S3), we need an S3 multipart upload solution from the browser. 9 | 10 | The code by [s3-multipart-upload-browser](https://github.com/ienzam/s3-multipart-upload-browser) does some of that, but many of its features don't work. Most notably, it does not support AWS API V3. 11 | 12 | ## Features 13 | 14 | This uploader supports upload prefixes (uploading only in a certain directory), fine-grained feedback, parallelized chunks, cancellation of the upload (and removal of the parts on S3), automated parts cleanup, drag & drop, progress bar and chunk size management. 15 | 16 | ## Requirements 17 | - The browser Javascript code relies on jQuery. It also requires a browser with Blob, File and XHR2 support (most do since 2012). 18 | - The server-side code is in PHP, but is straightforward enough to port to any language and is less than 200 LOC. 19 | - If using with PHP, it needs AWS credentials and the AWS PHP SDK V3+ as well. 20 | 21 | That's all! 22 | 23 | ## Installation 24 | Just put all files in a web directory, set the AWS credentials in index.php or in keys.php file, set your bucket's CORS config (can be done via a function in the code too), and you're ready. 25 | 26 | ## Notes 27 | I strongly recommend setting your S3 bucket for [auto-removal of unfinished multipart upload parts](https://aws.amazon.com/blogs/aws/s3-lifecycle-management-update-support-for-multipart-uploads-and-delete-markers/). Otherwise, any incomplete upload will leave useless files on your bucket, for which you will be charged. 28 | 29 | I also suggest using the largest supported chunk size (5GB) to make the XHR connections minimal. Uploading a 200GB file with 5GB chunks needs 40 parts, but using 100MB chunks requires 2000. Authorizing parts with AWS is both slow and pricy. 30 | 31 | ## License 32 | This work is released under MIT license. 33 | -------------------------------------------------------------------------------- /index.php: -------------------------------------------------------------------------------- 1 | 'latest', 49 | 'region' => 'us-east-1', 50 | 'signature_version' => 'v4', 51 | 'credentials' => [ 52 | 'key' => aws_key(), 53 | 'secret' => aws_secret(), 54 | ] 55 | ]); 56 | if ($command===null) 57 | return $s3; 58 | $args=func_get_args(); 59 | array_shift($args); 60 | try { 61 | $res=call_user_func_array([$s3,$command],$args); 62 | return $res; 63 | } 64 | catch (AwsException $e) 65 | { 66 | echo $e->getMessage(),PHP_EOL; 67 | } 68 | return null; 69 | } 70 | /** 71 | * Output data as json with proper header 72 | * @param mixed $data 73 | */ 74 | function json_output($data) 75 | { 76 | header('Content-Type: application/json'); 77 | die(json_encode($data)); 78 | } 79 | /** 80 | * Deletes all multipart uploads that are not completed. 81 | * 82 | * Useful to clear up the clutter from your bucket 83 | * You can also set the bucket to delete them every day 84 | * @return integer number of deleted objects 85 | */ 86 | function abortPendingUploads($bucket) 87 | { 88 | $count=0; 89 | $res=s3("listMultipartUploads",["Bucket"=>bucket()]); 90 | if (is_array($res["Uploads"])) 91 | foreach ($res["Uploads"] as $item) 92 | { 93 | 94 | $r=s3("abortMultipartUpload",[ 95 | "Bucket"=>$bucket, 96 | "Key"=>$item["Key"], 97 | "UploadId"=>$item["UploadId"], 98 | ]); 99 | $count++; 100 | } 101 | return $count; 102 | } 103 | /** 104 | * Enables CORS on bucket 105 | * 106 | * This needs to be called exactly once on a bucket before browser uploads. 107 | * @param string $bucket 108 | */ 109 | function setCORS($bucket) 110 | { 111 | $res=s3("getBucketCors",["Bucket"=>$bucket]); 112 | $res=s3("putBucketCors", 113 | [ 114 | "Bucket"=>$bucket, 115 | "CORSConfiguration"=>[ 116 | "CORSRules"=>[ 117 | [ 118 | 'AllowedHeaders'=>['*'], 119 | 'AllowedMethods'=> ['POST','GET','HEAD','PUT'], 120 | "AllowedOrigins"=>["localhost","*"], 121 | ], 122 | ], 123 | ], 124 | ]); 125 | } 126 | 127 | if (isset($_POST['command'])) 128 | { 129 | $command=$_POST['command']; 130 | if ($command=="create") 131 | { 132 | $res=s3("createMultipartUpload",[ 133 | 'Bucket' => bucket(), 134 | 'Key' => prefix().$_POST['fileInfo']['name'], 135 | 'ContentType' => $_REQUEST['fileInfo']['type'], 136 | 'Metadata' => $_REQUEST['fileInfo'] 137 | ]); 138 | json_output(array( 139 | 'uploadId' => $res->get('UploadId'), 140 | 'key' => $res->get('Key'), 141 | )); 142 | } 143 | 144 | if ($command=="part") 145 | { 146 | $command=s3("getCommand","UploadPart",[ 147 | 'Bucket' => bucket(), 148 | 'Key' => $_REQUEST['sendBackData']['key'], 149 | 'UploadId' => $_REQUEST['sendBackData']['uploadId'], 150 | 'PartNumber' => $_REQUEST['partNumber'], 151 | 'ContentLength' => $_REQUEST['contentLength'] 152 | ]); 153 | 154 | // Give it at least 24 hours for large uploads 155 | $request=s3("createPresignedRequest",$command,"+48 hours"); 156 | json_output([ 157 | 'url' => (string)$request->getUri(), 158 | ]); 159 | } 160 | 161 | if ($command=="complete") 162 | { 163 | $partsModel = s3("listParts",[ 164 | 'Bucket' => bucket(), 165 | 'Key' => $_REQUEST['sendBackData']['key'], 166 | 'UploadId' => $_REQUEST['sendBackData']['uploadId'], 167 | ]); 168 | $model = s3("completeMultipartUpload",[ 169 | 'Bucket' => bucket(), 170 | 'Key' => $_REQUEST['sendBackData']['key'], 171 | 'UploadId' => $_REQUEST['sendBackData']['uploadId'], 172 | 'MultipartUpload' => [ 173 | "Parts"=>$partsModel["Parts"], 174 | ], 175 | ]); 176 | json_output([ 177 | 'success' => true 178 | ]); 179 | } 180 | if ($command=="abort") 181 | { 182 | $model = s3("abortMultipartUpload",[ 183 | 'Bucket' => bucket(), 184 | 'Key' => $_REQUEST['sendBackData']['key'], 185 | 'UploadId' => $_REQUEST['sendBackData']['uploadId'] 186 | ]); 187 | json_output([ 188 | 'success' => true 189 | ]); 190 | } 191 | 192 | exit(0); 193 | } 194 | 195 | 196 | include "page.htm"; -------------------------------------------------------------------------------- /page.htm: -------------------------------------------------------------------------------- 1 |

S3 Bulk Uploader

2 | 3 | 4 | 95 | 96 | 97 |

You can upload files to S3 here.

98 |
99 |
100 | 101 | 102 | 103 | Select File... 104 | 105 | 106 | 110 |
111 |
112 |
113 |
114 |
115 | 116 |
117 |
118 | 119 |
120 | 202 | -------------------------------------------------------------------------------- /s3upload.js: -------------------------------------------------------------------------------- 1 | function S3MultiUpload(file) { 2 | this.PART_SIZE = 5 * 1024 * 1024 * 1024; // Minimum part size defined by aws s3 is 5 MB, maximum 5 GB 3 | this.SERVER_LOC = '?'; // Location of the server 4 | this.completed = false; 5 | this.file = file; 6 | this.fileInfo = { 7 | name: this.file.name, 8 | type: this.file.type, 9 | size: this.file.size, 10 | lastModifiedDate: this.file.lastModifiedDate 11 | }; 12 | this.sendBackData = null; 13 | this.uploadXHR = []; 14 | // Progress monitoring 15 | this.byterate = [] 16 | this.lastUploadedSize = [] 17 | this.lastUploadedTime = [] 18 | this.loaded = []; 19 | this.total = []; 20 | 21 | } 22 | 23 | /** 24 | * Creates the multipart upload 25 | */ 26 | S3MultiUpload.prototype.createMultipartUpload = function() { 27 | var self = this; 28 | $.post(self.SERVER_LOC, { 29 | command: 'create', 30 | fileInfo: self.fileInfo, 31 | }).done(function(data) { 32 | self.sendBackData = data; 33 | self.uploadParts(); 34 | }).fail(function(jqXHR, textStatus, errorThrown) { 35 | self.onServerError('create', jqXHR, textStatus, errorThrown); 36 | }); 37 | }; 38 | 39 | /** 40 | * Call this function to start uploading to server 41 | */ 42 | S3MultiUpload.prototype.start = function() { 43 | this.createMultipartUpload(); 44 | }; 45 | 46 | /** private */ 47 | S3MultiUpload.prototype.uploadParts = function() { 48 | var blobs = this.blobs = [], promises = []; 49 | var start = 0; 50 | var parts =0; 51 | var end, blob; 52 | var partNum = 0; 53 | 54 | while(start < this.file.size) { 55 | end = Math.min(start + this.PART_SIZE, this.file.size); 56 | filePart = this.file.slice(start, end); 57 | // this is to prevent push blob with 0Kb 58 | if (filePart.size > 0) 59 | blobs.push(filePart); 60 | start = this.PART_SIZE * ++partNum; 61 | } 62 | 63 | for (var i = 0; i < blobs.length; i++) { 64 | blob = blobs[i]; 65 | promises.push(this.uploadXHR[i]=$.post(this.SERVER_LOC, { 66 | command: 'part', 67 | sendBackData: this.sendBackData, 68 | partNumber: i+1, 69 | contentLength: blob.size 70 | })); 71 | } 72 | $.when.apply(null, promises) 73 | .then(this.sendAll.bind(this), this.onServerError) 74 | .done(this.onPrepareCompleted); 75 | }; 76 | 77 | /** 78 | * Sends all the created upload parts in a loop 79 | */ 80 | S3MultiUpload.prototype.sendAll = function() { 81 | var blobs = this.blobs; 82 | var length = blobs.length; 83 | if (length==1) 84 | this.sendToS3(arguments[0], blobs[0], 0); 85 | else for (var i = 0; i < length; i++) { 86 | this.sendToS3(arguments[i][0], blobs[i], i); 87 | } 88 | }; 89 | /** 90 | * Used to send each uploadPart 91 | * @param array data parameters of the part 92 | * @param blob blob data bytes 93 | * @param integer index part index (base zero) 94 | */ 95 | S3MultiUpload.prototype.sendToS3 = function(data, blob, index) { 96 | var self = this; 97 | var url = data['url']; 98 | var size = blob.size; 99 | var request = self.uploadXHR[index] = new XMLHttpRequest(); 100 | request.onreadystatechange = function() { 101 | if (request.readyState === 4) { // 4 is DONE 102 | // self.uploadXHR[index] = null; 103 | if (request.status !== 200) { 104 | self.updateProgress(); 105 | self.onS3UploadError(request); 106 | return; 107 | } 108 | self.updateProgress(); 109 | } 110 | }; 111 | 112 | request.upload.onprogress = function(e) { 113 | if (e.lengthComputable) { 114 | self.total[index] = size; 115 | self.loaded[index] = e.loaded; 116 | if (self.lastUploadedTime[index]) 117 | { 118 | var time_diff=(new Date().getTime() - self.lastUploadedTime[index])/1000; 119 | if (time_diff > 0.005) // 5 miliseconds has passed 120 | { 121 | var byterate=(self.loaded[index] - self.lastUploadedSize[index])/time_diff; 122 | self.byterate[index] = byterate; 123 | self.lastUploadedTime[index]=new Date().getTime(); 124 | self.lastUploadedSize[index]=self.loaded[index]; 125 | } 126 | } 127 | else 128 | { 129 | self.byterate[index] = 0; 130 | self.lastUploadedTime[index]=new Date().getTime(); 131 | self.lastUploadedSize[index]=self.loaded[index]; 132 | } 133 | // Only send update to user once, regardless of how many 134 | // parallel XHRs we have (unless the first one is over). 135 | if (index==0 || self.total[0]==self.loaded[0]) 136 | self.updateProgress(); 137 | } 138 | }; 139 | request.open('PUT', url, true); 140 | request.send(blob); 141 | }; 142 | 143 | /** 144 | * Abort multipart upload 145 | */ 146 | S3MultiUpload.prototype.cancel = function() { 147 | var self = this; 148 | for (var i=0; i