├── .gitignore
├── ASP.NET MVC C#
├── FineUpload.cs
├── FineUploadResult.cs
├── UploadController.cs
├── readme.md
└── web.config
├── ASP.Net_VB
└── UploadController.vb
├── C#
└── azure
│ └── FineUploaderAzureServer.cs
├── coldfusion
├── image-uploader.cfc
└── readme.txt
├── erlang
└── fileserver
│ ├── .gitignore
│ ├── Makefile
│ ├── README.md
│ ├── priv
│ ├── dispatch.conf
│ └── www
│ │ └── static
│ │ └── html
│ │ └── index.html
│ ├── rebar
│ ├── rebar.config
│ ├── src
│ ├── fileserver.app.src
│ ├── fileserver.erl
│ ├── fileserver_app.erl
│ ├── fileserver_fileuploader_resource.erl
│ ├── fileserver_static_resource.erl
│ └── fileserver_sup.erl
│ └── start.sh
├── java
├── MultipartUploadParser.java
├── RequestParser.java
├── UploadReceiver.java
└── s3
│ └── S3Uploads.java
├── license.txt
├── nodejs
├── nodejs.js
├── package.json
└── s3
│ └── s3handler.js
├── perl.cgi
├── php
├── s3
│ ├── README.md
│ ├── s3demo-cors.php
│ └── s3demo.php
└── traditional
│ ├── README.md
│ ├── chunks
│ └── .gitignore
│ ├── endpoint-cors.php
│ ├── endpoint.php
│ ├── files
│ └── .gitignore
│ ├── handler.php
│ └── li3
│ ├── controllers
│ └── FineUploadController.php
│ └── readme.md
├── python
├── django-fine-uploader-s3
│ ├── AUTHORS
│ ├── CHANGELOG
│ ├── LICENSE
│ ├── README.md
│ ├── __init__.py
│ ├── manage.py
│ ├── requirements.txt
│ ├── settings.py
│ ├── urls.py
│ ├── views.py
│ └── wsgi.py
├── django-fine-uploader
│ ├── .gitignore
│ ├── AUTHORS
│ ├── CHANGELOG
│ ├── LICENSE
│ ├── README.md
│ ├── __init__.py
│ ├── fine_uploader
│ │ ├── __init__.py
│ │ ├── forms.py
│ │ ├── utils.py
│ │ └── views.py
│ ├── manage.py
│ ├── media
│ │ ├── chunks
│ │ │ └── .gitignore
│ │ └── uploads
│ │ │ └── .gitignore
│ ├── requirements.txt
│ ├── settings.py
│ ├── static
│ │ ├── fine_uploader
│ │ │ └── .gitignore
│ │ └── main.js
│ ├── templates
│ │ └── fine_uploader
│ │ │ ├── .gitignore
│ │ │ └── index.html
│ ├── urls.py
│ └── wsgi.py
├── flask-fine-uploader-s3
│ ├── AUTHORS
│ ├── CHANGELOG
│ ├── LICENSE
│ ├── README.md
│ ├── app.py
│ └── requirements.txt
├── flask-fine-uploader
│ ├── AUTHORS
│ ├── CHANGELOG
│ ├── LICENSE
│ ├── README.md
│ ├── app.py
│ ├── media
│ │ ├── chunks
│ │ │ └── .gitignore
│ │ └── upload
│ │ │ └── .gitignore
│ ├── requirements.txt
│ ├── static
│ │ ├── fine_uploader
│ │ │ └── .gitignore
│ │ └── main.js
│ └── templates
│ │ └── fine_uploader
│ │ └── index.html
└── python3-flask-fine-uploader-s3
│ ├── .gitignore
│ ├── CHANGES.md
│ ├── LICENSE
│ ├── README.md
│ ├── basic_cors.xml
│ ├── client_conf.js
│ ├── index.html
│ ├── p3s3f.env
│ ├── requirements.txt
│ ├── s3-sign-srv.py
│ └── templates
│ └── index.html
├── rails.md
└── readme.md
/.gitignore:
--------------------------------------------------------------------------------
1 | .*
2 | *.iml
3 | Thumbs.db
4 | !.gitignore
5 | test/uploads/
6 | test/uploadsTemp/
7 | *.ipr
8 | *~
9 | .*.sw[a-z]
10 | build/*
11 | release/*
12 |
--------------------------------------------------------------------------------
/ASP.NET MVC C#/FineUpload.cs:
--------------------------------------------------------------------------------
1 | using System.IO;
2 | using System.Web.Mvc;
3 |
4 | namespace FineUploader
5 | {
6 | [ModelBinder(typeof(ModelBinder))]
7 | public class FineUpload
8 | {
9 | public string Filename { get; set; }
10 | public Stream InputStream { get; set; }
11 |
12 | public void SaveAs(string destination, bool overwrite = false, bool autoCreateDirectory = true)
13 | {
14 | if (autoCreateDirectory)
15 | {
16 | var directory = new FileInfo(destination).Directory;
17 | if (directory != null) directory.Create();
18 | }
19 |
20 | using (var file = new FileStream(destination, overwrite ? FileMode.Create : FileMode.CreateNew))
21 | InputStream.CopyTo(file);
22 | }
23 |
24 | public class ModelBinder : IModelBinder
25 | {
26 | public object BindModel(ControllerContext controllerContext, ModelBindingContext bindingContext)
27 | {
28 | var request = controllerContext.RequestContext.HttpContext.Request;
29 | var formUpload = request.Files.Count > 0;
30 |
31 | // find filename
32 | var xFileName = request.Headers["X-File-Name"];
33 | var qqFile = request["qqfile"];
34 | var formFilename = formUpload ? request.Files[0].FileName : null;
35 |
36 | var upload = new FineUpload
37 | {
38 | Filename = xFileName ?? qqFile ?? formFilename,
39 | InputStream = formUpload ? request.Files[0].InputStream : request.InputStream
40 | };
41 |
42 | return upload;
43 | }
44 | }
45 |
46 | }
47 | }
--------------------------------------------------------------------------------
/ASP.NET MVC C#/FineUploadResult.cs:
--------------------------------------------------------------------------------
1 | using System.Web.Mvc;
2 | using Newtonsoft.Json.Linq;
3 |
4 | namespace FineUploader
5 | {
6 | ///
7 | /// Docs at https://github.com/Widen/fine-uploader/blob/master/server/readme.md
8 | ///
9 | public class FineUploaderResult : ActionResult
10 | {
11 | public const string ResponseContentType = "text/plain";
12 |
13 | private readonly bool _success;
14 | private readonly string _error;
15 | private readonly bool? _preventRetry;
16 | private readonly JObject _otherData;
17 |
18 | public FineUploaderResult(bool success, object otherData = null, string error = null, bool? preventRetry = null)
19 | {
20 | _success = success;
21 | _error = error;
22 | _preventRetry = preventRetry;
23 |
24 | if (otherData != null)
25 | _otherData = JObject.FromObject(otherData);
26 | }
27 |
28 | public override void ExecuteResult(ControllerContext context)
29 | {
30 | var response = context.HttpContext.Response;
31 | response.ContentType = ResponseContentType;
32 |
33 | response.Write(BuildResponse());
34 | }
35 |
36 | public string BuildResponse()
37 | {
38 | var response = _otherData ?? new JObject();
39 | response["success"] = _success;
40 |
41 | if (!string.IsNullOrWhiteSpace(_error))
42 | response["error"] = _error;
43 |
44 | if (_preventRetry.HasValue)
45 | response["preventRetry"] = _preventRetry.Value;
46 |
47 | return response.ToString();
48 | }
49 | }
50 | }
51 |
--------------------------------------------------------------------------------
/ASP.NET MVC C#/UploadController.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.IO;
3 | using System.Web.Mvc;
4 |
5 | namespace FineUploader
6 | {
7 | public class UploadController : Controller
8 | {
9 | [HttpPost]
10 | public FineUploaderResult UploadFile(FineUpload upload, string extraParam1, int extraParam2)
11 | {
12 | // asp.net mvc will set extraParam1 and extraParam2 from the params object passed by Fine-Uploader
13 |
14 | var dir = @"c:\upload\path";
15 | var filePath = Path.Combine(dir, upload.Filename);
16 | try
17 | {
18 | upload.SaveAs(filePath);
19 | }
20 | catch (Exception ex)
21 | {
22 | return new FineUploaderResult(false, error: ex.Message);
23 | }
24 |
25 | // the anonymous object in the result below will be convert to json and set back to the browser
26 | return new FineUploaderResult(true, new { extraInformation = 12345 });
27 | }
28 | }
29 | }
--------------------------------------------------------------------------------
/ASP.NET MVC C#/readme.md:
--------------------------------------------------------------------------------
1 | Install NuGet package FineUploader.AspNet.Mvc (http://nuget.org/packages/FineUploader.AspNet.Mvc)
2 |
3 | _OR_
4 |
5 | 1. Drop these files into your project:
6 | - FineUpload.cs
7 | - FineUploadResult.cs
8 | 2. Reference Newtonsoft.Json
9 | 3. Look at UploadController.cs for example of handling uploaded files.
10 | 4. To upload larger files, take a look at web.config
11 |
--------------------------------------------------------------------------------
/ASP.NET MVC C#/web.config:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
--------------------------------------------------------------------------------
/ASP.Net_VB/UploadController.vb:
--------------------------------------------------------------------------------
1 | Imports System.Data.SqlClient
2 | Imports System.Net
3 | Imports System.IO
4 | Namespace Uploader
5 | Public Class UploadController
6 | Inherits System.Web.Mvc.Controller
7 |
8 | _
9 | Function Upload(ByVal uploadFile As String) As String
10 | On Error GoTo upload_error
11 | Dim strm As Stream = Request.InputStream
12 | Dim br As BinaryReader = New BinaryReader(strm)
13 | Dim fileContents() As Byte = {}
14 | Const ChunkSize As Integer = 1024 * 1024
15 |
16 | ' We need to hand IE a little bit differently...
17 | If Request.Browser.Browser = "IE" Then
18 | Dim myfiles As System.Web.HttpFileCollection = System.Web.HttpContext.Current.Request.Files
19 | Dim postedFile As System.Web.HttpPostedFile = myfiles(0)
20 | If Not postedFile.FileName.Equals("") Then
21 | Dim fn As String = System.IO.Path.GetFileName(postedFile.FileName)
22 | br = New BinaryReader(postedFile.InputStream)
23 | uploadFile = fn
24 | End If
25 | End If
26 |
27 | ' Nor have the binary reader on the IE file input Stream. Back to normal...
28 | Do While br.BaseStream.Position < br.BaseStream.Length - 1
29 | Dim b(ChunkSize - 1) As Byte
30 | Dim ReadLen As Integer = br.Read(b, 0, ChunkSize)
31 | Dim dummy() As Byte = fileContents.Concat(b).ToArray()
32 | fileContents = dummy
33 | dummy = Nothing
34 | Loop
35 |
36 |
37 | ' You now have all the bytes from the uploaded file in 'FileContents'
38 |
39 | ' You could write it to a database:
40 |
41 | 'Dim con As SqlConnection
42 | 'Dim connectionString As String = ""
43 | 'Dim cmd As SqlCommand
44 |
45 | 'connectionString = "Data Source=DEV\SQLEXPRESS;Initial Catalog=myDatabase;Trusted_Connection=True;"
46 | 'con = New SqlConnection(connectionString)
47 |
48 | 'cmd = New SqlCommand("INSERT INTO blobs VALUES(@filename,@filecontents)", con)
49 | 'cmd.Parameters.Add("@filename", SqlDbType.VarChar).Value = uploadFile
50 | 'cmd.Parameters.Add("@filecontents", SqlDbType.VarBinary).Value = fileContents
51 | 'con.Open()
52 | 'cmd.ExecuteNonQuery()
53 | 'con.Close()
54 |
55 |
56 | ' Or write it to the filesystem:
57 | Dim writeStream As FileStream = New FileStream("C:\TEMP\" & uploadFile, FileMode.Create)
58 | Dim bw As New BinaryWriter(writeStream)
59 | bw.Write(fileContents)
60 | bw.Close()
61 |
62 | ' it all worked ok so send back SUCCESS is true!
63 | Return "{""success"":true}"
64 | Exit Function
65 |
66 | upload_error:
67 | Return "{""error"":""An Error Occured""}"
68 | End Function
69 | End Class
70 | End Namespace
--------------------------------------------------------------------------------
/C#/azure/FineUploaderAzureServer.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Net;
3 | using System.Globalization;
4 | using System.Collections.Generic;
5 | using Microsoft.WindowsAzure.Storage;
6 | using Microsoft.WindowsAzure.Storage.Auth;
7 | using Microsoft.WindowsAzure.Storage.Blob;
8 | using Microsoft.WindowsAzure.Storage.Shared.Protocol;
9 |
10 | /**
11 | * C# Server-Side Example for Fine Uploader Azure.
12 | * Maintained by Widen Enterprises.
13 | *
14 | * This example:
15 | * - Handles signature/SAS GET requests
16 | * - Handles uploadSuccess POST requests
17 | * - Configures CORS rules for your storage account
18 | */
19 | namespace FineUploaderAzureServer
20 | {
21 | class Program
22 | {
23 | const string STORAGE_ACCOUNT_NAME = "INSERT_AZURE_STORAGE_ACCOUNT_NAME_HERE";
24 | const string STORAGE_ACCOUNT_KEY = "INSERT_AZURE_STORAGE_ACCOUNT_KEY_HERE";
25 | static List ALLOWED_CORS_ORIGINS = new List {"INSERT_WEB_APPLICATION_URL_HERE"};
26 | static List ALLOWED_CORS_HEADERS = new List {"x-ms-meta-qqfilename", "Content-Type", "x-ms-blob-type", "x-ms-blob-content-type"};
27 | const CorsHttpMethods ALLOWED_CORS_METHODS = CorsHttpMethods.Delete | CorsHttpMethods.Put;
28 | const int ALLOWED_CORS_AGE_DAYS = 5;
29 | const string SIGNATURE_SERVER_ENDPOINT_ADDRESS = "http://*:8080/sas/";
30 | const string UPLOAD_SUCCESS_ENDPOINT_ADDRESS = "http://*:8080/success/";
31 |
32 | [STAThread]
33 | private static void Main(string[] args)
34 | {
35 | var accountAndKey = new StorageCredentials(STORAGE_ACCOUNT_NAME, STORAGE_ACCOUNT_KEY);
36 | var storageAccount = new CloudStorageAccount(accountAndKey, true);
37 |
38 | // Uncomment this line to set CORS configuration on your account
39 | // configureCors(storageAccount);
40 |
41 | // Uncomment this line to start your signature/uploadSuccess server
42 | // startServer(accountAndKey);
43 | }
44 |
45 | private static void startServer(StorageCredentials accountAndKey)
46 | {
47 | HttpListener listener = new HttpListener();
48 | listener.Prefixes.Add(SIGNATURE_SERVER_ENDPOINT_ADDRESS);
49 | listener.Prefixes.Add(UPLOAD_SUCCESS_ENDPOINT_ADDRESS);
50 | listener.Start();
51 |
52 | while (true)
53 | {
54 | HttpListenerContext context = listener.GetContext();
55 | HttpListenerRequest request = context.Request;
56 | HttpListenerResponse response = context.Response;
57 |
58 | if (request.HttpMethod == "GET")
59 | {
60 | var blobUri = request.QueryString.Get("bloburi");
61 | var verb = request.QueryString.Get("_method");
62 |
63 | var sas = getSasForBlob(accountAndKey, blobUri, verb);
64 |
65 | byte[] buffer = System.Text.Encoding.UTF8.GetBytes(sas);
66 | response.ContentLength64 = buffer.Length;
67 | System.IO.Stream output = response.OutputStream;
68 | output.Write(buffer, 0, buffer.Length);
69 | output.Close();
70 | }
71 | else if (request.HttpMethod == "POST")
72 | {
73 | response.StatusCode = 200;
74 | // TODO insert uploadSuccess handling logic here
75 | response.Close();
76 | }
77 | else
78 | {
79 | response.StatusCode = 405;
80 | }
81 | }
82 | }
83 |
84 | private static String getSasForBlob(StorageCredentials credentials, String blobUri, String verb)
85 | {
86 | CloudBlockBlob blob = new CloudBlockBlob(new Uri(blobUri), credentials);
87 | var permission = SharedAccessBlobPermissions.Write;
88 |
89 | if (verb == "DELETE")
90 | {
91 | permission = SharedAccessBlobPermissions.Delete;
92 | }
93 |
94 | var sas = blob.GetSharedAccessSignature(new SharedAccessBlobPolicy()
95 | {
96 |
97 | Permissions = permission,
98 | SharedAccessExpiryTime = DateTime.UtcNow.AddMinutes(15),
99 | });
100 |
101 | return string.Format(CultureInfo.InvariantCulture, "{0}{1}", blob.Uri, sas);
102 | }
103 |
104 | private static void configureCors(CloudStorageAccount storageAccount)
105 | {
106 | var blobClient = storageAccount.CreateCloudBlobClient();
107 |
108 | Console.WriteLine("Storage Account: " + storageAccount.BlobEndpoint);
109 | var newProperties = CurrentProperties(blobClient);
110 |
111 | newProperties.DefaultServiceVersion = "2013-08-15";
112 | blobClient.SetServiceProperties(newProperties);
113 |
114 | var addRule = true;
115 | if (addRule)
116 | {
117 | var ruleWideOpenWriter = new CorsRule()
118 | {
119 | AllowedHeaders = ALLOWED_CORS_HEADERS,
120 | AllowedOrigins = ALLOWED_CORS_ORIGINS,
121 | AllowedMethods = ALLOWED_CORS_METHODS,
122 | MaxAgeInSeconds = (int)TimeSpan.FromDays(ALLOWED_CORS_AGE_DAYS).TotalSeconds
123 | };
124 | newProperties.Cors.CorsRules.Clear();
125 | newProperties.Cors.CorsRules.Add(ruleWideOpenWriter);
126 | blobClient.SetServiceProperties(newProperties);
127 |
128 | Console.WriteLine("New Properties:");
129 | CurrentProperties(blobClient);
130 |
131 | Console.ReadLine();
132 | }
133 | }
134 |
135 | private static ServiceProperties CurrentProperties(CloudBlobClient blobClient)
136 | {
137 | var currentProperties = blobClient.GetServiceProperties();
138 | if (currentProperties != null)
139 | {
140 | if (currentProperties.Cors != null)
141 | {
142 | Console.WriteLine("Cors.CorsRules.Count : " + currentProperties.Cors.CorsRules.Count);
143 | for (int index = 0; index < currentProperties.Cors.CorsRules.Count; index++)
144 | {
145 | var corsRule = currentProperties.Cors.CorsRules[index];
146 | Console.WriteLine("corsRule[index] : " + index);
147 | foreach (var allowedHeader in corsRule.AllowedHeaders)
148 | {
149 | Console.WriteLine("corsRule.AllowedHeaders : " + allowedHeader);
150 | }
151 | Console.WriteLine("corsRule.AllowedMethods : " + corsRule.AllowedMethods);
152 |
153 | foreach (var allowedOrigins in corsRule.AllowedOrigins)
154 | {
155 | Console.WriteLine("corsRule.AllowedOrigins : " + allowedOrigins);
156 | }
157 | foreach (var exposedHeaders in corsRule.ExposedHeaders)
158 | {
159 | Console.WriteLine("corsRule.ExposedHeaders : " + exposedHeaders);
160 | }
161 | Console.WriteLine("corsRule.MaxAgeInSeconds : " + corsRule.MaxAgeInSeconds);
162 | }
163 | }
164 | Console.WriteLine("DefaultServiceVersion : " + currentProperties.DefaultServiceVersion);
165 | Console.WriteLine("HourMetrics.MetricsLevel : " + currentProperties.HourMetrics.MetricsLevel);
166 | Console.WriteLine("HourMetrics.RetentionDays : " + currentProperties.HourMetrics.RetentionDays);
167 | Console.WriteLine("HourMetrics.Version : " + currentProperties.HourMetrics.Version);
168 | Console.WriteLine("Logging.LoggingOperations : " + currentProperties.Logging.LoggingOperations);
169 | Console.WriteLine("Logging.RetentionDays : " + currentProperties.Logging.RetentionDays);
170 | Console.WriteLine("Logging.Version : " + currentProperties.Logging.Version);
171 | Console.WriteLine("MinuteMetrics.MetricsLevel : " + currentProperties.MinuteMetrics.MetricsLevel);
172 | Console.WriteLine("MinuteMetrics.RetentionDays : " + currentProperties.MinuteMetrics.RetentionDays);
173 | Console.WriteLine("MinuteMetrics.Version : " + currentProperties.MinuteMetrics.Version);
174 | }
175 | return currentProperties;
176 | }
177 | }
178 | }
179 |
--------------------------------------------------------------------------------
/coldfusion/image-uploader.cfc:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
--------------------------------------------------------------------------------
/coldfusion/readme.txt:
--------------------------------------------------------------------------------
1 | Coldfusion example by Patrick Hedgepath - Pegasus Web Productions LLC - www.pegweb.com
2 |
3 | Code has been tested with version 2.0 of Valum's AJAX file uploader on Cold Fusion Enterprise 9.x server
4 | If you have any improvements to this code please feel free to email them to me webmaster@pegweb.com
5 |
6 | Questions, problems, comments? Go to the forums and post https://github.com/Widen/fine-uploader
7 |
--------------------------------------------------------------------------------
/erlang/fileserver/.gitignore:
--------------------------------------------------------------------------------
1 | .eunit
2 | deps
3 | *.o
4 | *.beam
5 | *.plt
6 | ebin
7 | priv/log
8 | doc
9 | priv/www/static/js
10 | priv/www/static/css
11 | priv/www/static/img
12 |
--------------------------------------------------------------------------------
/erlang/fileserver/Makefile:
--------------------------------------------------------------------------------
1 | ERL ?= erl
2 | APP := fileserver
3 |
4 | .PHONY: deps
5 |
6 | all: deps
7 | @./rebar compile
8 |
9 | deps:
10 | @./rebar get-deps
11 |
12 | clean:
13 | @./rebar clean
14 |
15 | distclean: clean
16 | @./rebar delete-deps
17 |
18 | docs:
19 | @erl -noshell -run edoc_run application '$(APP)' '"."' '[]'
20 |
--------------------------------------------------------------------------------
/erlang/fileserver/README.md:
--------------------------------------------------------------------------------
1 | fileserver
2 | =============
3 |
4 | Simple handling of multi-part form data from file-uploader framwork.
5 | Uses webmachine so assumes som familiarity with that.
6 |
7 | You probably want to do one of a couple of things at this point:
8 |
9 | 0. Get the latest build of file-uploader and place in the right
10 | directories under priv/www/static/.
11 | e.g. place file fineuploader-{VERSION}.js in priv/www/static/js/.
12 |
13 | 1. Update priv/www/static/html/index.html to reflect correct version files
14 |
15 | 2. Get dependencies and Build the application:
16 | $ ./rebar get-deps
17 | $ ./rebar compile
18 |
19 | 3. Start up the application:
20 | $ ./start.sh
21 |
22 | 4. Upload some files
23 | localhost:8000
24 |
25 |
26 | ### Notes
27 |
28 | 0. Currently all files are saved to /tmp to change this edit the
29 | despatch.conf file ( look for '/tmp' ).
30 |
31 | 1. You will have to edit the fineuploader-{VERSION}.css file so that
32 | the image paths are relative. i.e. background: url("../img/loading.gif"), etc.
--------------------------------------------------------------------------------
/erlang/fileserver/priv/dispatch.conf:
--------------------------------------------------------------------------------
1 | %%-*- mode: erlang -*-
2 |
3 | %% static files.
4 | {[], fileserver_static_resource, ["www/static/html"]}.
5 | {["css", '*'], fileserver_static_resource, ["www/static/css"]}.
6 | {["js", '*'], fileserver_static_resource, ["www/static/js"]}.
7 | {["img", '*'], fileserver_static_resource, ["www/static/img"]}.
8 | {["upload", '*'], fileserver_fileuploader_resource, ["/tmp"]}.
9 |
10 |
11 |
--------------------------------------------------------------------------------
/erlang/fileserver/priv/www/static/html/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Fine Uploader Demo
6 |
7 |
8 |
9 |
10 |
11 |
28 |
29 |
30 |
--------------------------------------------------------------------------------
/erlang/fileserver/rebar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FineUploader/server-examples/3ada78700b8d913be5ad8c9ddf3183c9a23e7fe6/erlang/fileserver/rebar
--------------------------------------------------------------------------------
/erlang/fileserver/rebar.config:
--------------------------------------------------------------------------------
1 | %%-*- mode: erlang -*-
2 |
3 | {deps, [{webmachine, "1.9.*", {git, "git://github.com/basho/webmachine", "master"}}]}.
4 |
--------------------------------------------------------------------------------
/erlang/fileserver/src/fileserver.app.src:
--------------------------------------------------------------------------------
1 | %%-*- mode: erlang -*-
2 | {application, fileserver,
3 | [
4 | {description, "fileserver"},
5 | {vsn, "1"},
6 | {modules, []},
7 | {registered, []},
8 | {applications, [
9 | kernel,
10 | stdlib,
11 | inets,
12 | crypto,
13 | mochiweb,
14 | webmachine
15 | ]},
16 | {mod, { fileserver_app, []}},
17 | {env, []}
18 | ]}.
19 |
--------------------------------------------------------------------------------
/erlang/fileserver/src/fileserver.erl:
--------------------------------------------------------------------------------
1 | %% @author author
2 | %% @copyright YYYY author.
3 |
4 | %% @doc fileserver startup code
5 |
6 | -module(fileserver).
7 | -author('author ').
8 | -export([start/0, start_link/0, stop/0]).
9 |
10 | ensure_started(App) ->
11 | case application:start(App) of
12 | ok ->
13 | ok;
14 | {error, {already_started, App}} ->
15 | ok
16 | end.
17 |
18 | %% @spec start_link() -> {ok,Pid::pid()}
19 | %% @doc Starts the app for inclusion in a supervisor tree
20 | start_link() ->
21 | ensure_started(inets),
22 | ensure_started(crypto),
23 | ensure_started(mochiweb),
24 | application:set_env(webmachine, webmachine_logger_module,
25 | webmachine_logger),
26 | ensure_started(webmachine),
27 | fileserver_sup:start_link().
28 |
29 | %% @spec start() -> ok
30 | %% @doc Start the fileserver server.
31 | start() ->
32 | ensure_started(inets),
33 | ensure_started(crypto),
34 | ensure_started(mochiweb),
35 | application:set_env(webmachine, webmachine_logger_module,
36 | webmachine_logger),
37 | ensure_started(webmachine),
38 | application:start(fileserver).
39 |
40 | %% @spec stop() -> ok
41 | %% @doc Stop the fileserver server.
42 | stop() ->
43 | Res = application:stop(fileserver),
44 | application:stop(webmachine),
45 | application:stop(mochiweb),
46 | application:stop(crypto),
47 | application:stop(inets),
48 | Res.
49 |
--------------------------------------------------------------------------------
/erlang/fileserver/src/fileserver_app.erl:
--------------------------------------------------------------------------------
1 | %% @author author
2 | %% @copyright YYYY author.
3 |
4 | %% @doc Callbacks for the fileserver application.
5 |
6 | -module(fileserver_app).
7 | -author('author ').
8 |
9 | -behaviour(application).
10 | -export([start/2,stop/1]).
11 |
12 |
13 | %% @spec start(_Type, _StartArgs) -> ServerRet
14 | %% @doc application start callback for fileserver.
15 | start(_Type, _StartArgs) ->
16 | fileserver_sup:start_link().
17 |
18 | %% @spec stop(_State) -> ServerRet
19 | %% @doc application stop callback for fileserver.
20 | stop(_State) ->
21 | ok.
22 |
--------------------------------------------------------------------------------
/erlang/fileserver/src/fileserver_fileuploader_resource.erl:
--------------------------------------------------------------------------------
1 | %% @author Dipesh Patel
2 | %% @copyright 2013 Dipesh Patel
3 |
4 | %%
5 | %% Licensed under the Apache License, Version 2.0 (the "License");
6 | %% you may not use this file except in compliance with the License.
7 | %% You may obtain a copy of the License at
8 | %%
9 | %% http://www.apache.org/licenses/LICENSE-2.0
10 | %%
11 | %% Unless required by applicable law or agreed to in writing, software
12 | %% distributed under the License is distributed on an "AS IS" BASIS,
13 | %% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | %% See the License for the specific language governing permissions and
15 | %% limitations under the License.
16 |
17 | %% @doc Webmachine resource file for use with Fine Uploader
18 | %% @reference See file-uploader
19 | %% and webmachine
20 | %% for more information.
21 |
22 | -module(fileserver_fileuploader_resource).
23 |
24 | %% ------------------------------------------------------------------
25 | %% API Function Exports
26 | %% ------------------------------------------------------------------
27 |
28 | -export([init/1,
29 | allowed_methods/2,
30 | delete_resource/2,
31 | process_post/2]).
32 |
33 | -include_lib("webmachine/include/webmachine.hrl").
34 |
35 | %% @type context() = Record :: #context{ outdir = filelib:dirname() }.
36 |
37 | -record(context, {outdir}).
38 |
39 | %% @todo tests, check delete works, debug mode to log file.
40 |
41 | %% ------------------------------------------------------------------
42 | %% API Function Definitions
43 | %% ------------------------------------------------------------------
44 |
45 | %% @spec init([OutputDir]) -> {ok, context()}
46 | %% OutputDir = filelib:dirname()
47 | %% @doc Initialise resource.
48 | init([OutputDir]) -> {ok, #context{outdir=OutputDir}}.
49 |
50 | %% @spec allowed_methods(wrq:reqdata(), context()) -> {[atom()], wrq:reqdata(), context()}
51 | %% @doc Only allow POST and DELETE
52 | allowed_methods(ReqData, Context) -> {['POST', 'DELETE'], ReqData, Context}.
53 |
54 | %% @spec delete_resource(wrq:reqdata(), context()) -> {boolean(), wrq:reqdata(), context()}
55 | %% @doc Delete the file given by the uuid in the dispatch path
56 | delete_resource(ReqData, Context) ->
57 | Dirname = file_path(wrq:disp_path(ReqData), Context),
58 | Files = find_all_files(Dirname++"/*"),
59 | case delete_files(Files) of
60 | ok -> case file:del_dir(Dirname) of
61 | ok -> {true, ReqData, Context};
62 | _ -> {false, ReqData, Context}
63 | end;
64 | _ -> {false, ReqData, Context}
65 | end.
66 |
67 | %% @spec process_post(wrq:reqdata(), context()) -> {boolean(), wrq:reqdata(), context()}
68 | %% @doc process a file post request. Assumes files are sent in parts.
69 | process_post(ReqData, Context) ->
70 | try
71 | Boundary = webmachine_multipart:find_boundary(ReqData),
72 | %%io:format("Boundary ~p~n",[Boundary]),
73 | Parts = accumulate_stream_parts(webmachine_multipart:stream_parts(
74 | wrq:stream_req_body(ReqData, 1024), Boundary
75 | ),[]),
76 | %%io:format("Parts ~p~n", [Parts]),
77 | case write_to_disk(Parts, Context) of
78 | ok -> {true, success(ReqData), Context};
79 | {error, Msg} -> {true, failure(Msg, ReqData), Context};
80 | {prevent_retry, Msg} -> {true, prevent_retry(Msg, ReqData), Context};
81 | {reset, Msg} -> {true, reset(Msg, ReqData), Context}
82 | end
83 | of
84 | Ret -> Ret
85 | catch
86 | Exception:Reason ->
87 | io:format("Caught Exception ~p~n", [{Exception, Reason}]),
88 | {true,
89 | prevent_retry(
90 | "Caught Exception",
91 | ReqData
92 | ),
93 | Context}
94 | %%after
95 | %%Maybe clean out file system?
96 | end.
97 |
98 | %% ------------------------------------------------------------------
99 | %% Internal Function Definitions
100 | %% ------------------------------------------------------------------
101 |
102 | %% @spec accumulate_stream_parts('done_parts' | {fpart(), function()}, [fpart()]) -> [fpart()]
103 | %% @doc Return all multipart streams as list
104 | accumulate_stream_parts(done_parts, Acc) ->
105 | %%io:format("RECEIVED ~p~n",[done_parts]),
106 | lists:reverse(Acc);
107 | accumulate_stream_parts({Hunk,Next},Acc) ->
108 | %%io:format("RECEIVED ~p~n",[Hunk]),
109 | accumulate_stream_parts(Next(),[Hunk|Acc]).
110 |
111 | %% @spec write_to_disk([fpart()], context()) -> ok | {error,Error}
112 | %% @doc Write part to disk, consolidate parts to one file if receive final part.
113 | write_to_disk(Parts, Context) ->
114 | Path = file_path(qquuid(Parts), Context),
115 | case filelib:ensure_dir(Path++"/") of
116 | ok -> %%io:format("Path ~p~n",[Path]),
117 | case write_to_disk(Parts, Context, qqpartindex(Parts), qqtotalparts(Parts)-1) of
118 | ok -> ok;
119 | %% TODO:handle more specific errors here.
120 | {error, Error} -> {error,Error}
121 | end;
122 | {error,Error} -> {error,Error}
123 | end.
124 |
125 | %% @spec write_to_disk([fpart()], context(), integer(), integer()) -> ok | {error,Error}
126 | %% @doc Write a part to disk, consolidate parts to one file if part is last one.
127 | write_to_disk(Parts, Context, Last, Last) ->
128 | case write_to_disk(Parts, Context, Last, Last+1) of
129 | ok ->
130 | Path = file_path(qquuid(Parts), Context),
131 | Filename = filename:join(Path, qqfilename(Parts)),
132 | case write_combined_parts(Filename) of
133 | ok -> case check_file_size(
134 | filelib:file_size(Filename),
135 | qqtotalfilesize(Parts)) of
136 | ok -> ok;
137 | {error, Error} -> {error, Error} %%return error and delete file?
138 | end;
139 | {error, Error} -> {error, Error}
140 | end;
141 | {error,Error} -> {error,Error}
142 | end;
143 | write_to_disk(Parts, Context, Index, _TotalIndex) ->
144 | Path = file_path(qquuid(Parts), Context),
145 | Filename = filename:join(Path, qqfilename(Parts)++"_"++integer_to_list(Index)),
146 | %%io:format("Filename ~p~n",[Filename]),
147 | Bytes = qqfile(Parts),
148 | case file:write_file(Filename, Bytes) of
149 | ok -> ok; %%TODO check filesize
150 | {error, Reason} -> {error, Reason}
151 | end.
152 |
153 |
154 | %% @spec write_combined_parts(file:filename()) -> ok | {error,Error}
155 | %% @doc Write parts to one file
156 | write_combined_parts(Filename) when is_list(Filename) ->
157 | Files = find_all_files(Filename++"_*"),
158 | %%io:format("Com files ~p~n",[Files]),
159 | case file:open(Filename, [append]) of
160 | {ok, Handle} ->
161 | case write_combined_parts(Handle, Files) of
162 | ok -> delete_files(Files),
163 | ok;
164 | {error, Reason} ->
165 | delete_files([Filename]),
166 | {error, Reason}
167 | end;
168 | {error, Reason} -> {error, Reason}
169 | end.
170 |
171 | %% @spec write_combined_parts(file:iodata(), [file:filename()]) -> ok | {error,Error}
172 | %% @doc Write parts to one file
173 | write_combined_parts(Handle,[]) ->
174 | file:close(Handle);
175 | write_combined_parts(Handle, [H|T]) ->
176 | %%io:format("Head in com ~p~n",[H]),
177 | case file:read_file(H) of
178 | {ok, Binary} ->
179 | case file:write(Handle, Binary) of
180 | ok -> write_combined_parts(Handle, T);
181 | {error, Reason} -> {error, Reason}
182 | end;
183 | {error, Reason} -> {error, Reason}
184 | end.
185 |
186 | %%params from body
187 | qqfilename(Parts) ->
188 | binary_to_list(get_param("qqfilename", Parts)).
189 |
190 | qqtotalparts(Parts) ->
191 | binary_to_int(get_param("qqtotalparts", Parts)).
192 |
193 | qqtotalfilesize(Parts) ->
194 | binary_to_int(get_param("qqtotalfilesize", Parts)).
195 |
196 | qqpartindex(Parts) ->
197 | binary_to_int(get_param("qqpartindex", Parts)).
198 |
199 | qquuid(Parts) ->
200 | binary_to_list(get_param("qquuid", Parts)).
201 |
202 | qqfile(Parts) ->
203 | get_param("qqfile", Parts).
204 |
205 | get_param(Name, Parts) ->
206 | {Name, _, Val} = proplists:lookup(Name, Parts),
207 | Val.
208 |
209 | %%responses
210 | success(ReqData) ->
211 | success("true", ReqData).
212 |
213 | success(BooleanStr, ReqData) ->
214 | build_response([{success, BooleanStr}], ReqData).
215 |
216 | reset(Msg, ReqData) ->
217 | build_response([{success, "false"}, {error, Msg}, {reset, "true"}], ReqData).
218 |
219 | prevent_retry(Msg, ReqData) ->
220 | build_response([{success, "false"}, {error, Msg}, {preventRetry, "true"}], ReqData).
221 |
222 | failure(Msg, ReqData) ->
223 | build_response([{error, Msg}], ReqData).
224 |
225 | build_response(Status, ReqData)->
226 | wrq:set_resp_header(
227 | "Content-type", "text/plain",
228 | response_body(Status, ReqData)
229 | ).
230 |
231 | response_body(Status, ReqData) ->
232 | wrq:set_resp_body(
233 | mochijson:encode(
234 | {struct, Status}
235 | ), ReqData
236 | ).
237 |
238 | %% @spec binary_to_int(binary()) -> integer()
239 | binary_to_int(N) ->
240 | list_to_integer(binary_to_list(N)).
241 |
242 | %% @spec check_file_size(integer(), integer()) -> ok | {error, string()}
243 | %% @doc Check that the two sizes given match
244 | check_file_size(_Same, _Same) -> ok;
245 | check_file_size(FileSize, Expected) ->
246 | {error, "Expected "++Expected++" got "++FileSize}.
247 |
248 | %% @spec find_all_files(file:filename()) -> [file:filename()]
249 | %% @doc find all files of the given wildcard
250 | find_all_files(Wildcard) ->
251 | lists:sort(filelib:wildcard(Wildcard)).
252 |
253 | %% @spec delete_files([file:filename()]) -> ok | {error,Error}
254 | %% @doc delete files from given list
255 | delete_files([]) -> ok;
256 | delete_files([H|T]) ->
257 | case file:delete(H) of
258 | ok -> delete_files(T);
259 | {error, Reason} -> {error, Reason}
260 | end.
261 |
262 | %% @spec file_path(string(), context()) -> false | file:filename()
263 | %% @doc Removes any begining "/" from the Name.
264 | file_path([], _Context) ->
265 | false;
266 | file_path(Name, Context) ->
267 | RelName = case hd(Name) of
268 | "/" -> tl(Name);
269 | _ -> Name
270 | end,
271 | filename:join([Context#context.outdir, RelName]).
272 |
273 |
274 |
275 | %% ------------------------------------------------------------------
276 | %% EUnit Tests
277 | %% ------------------------------------------------------------------
278 | -ifdef(TEST).
279 |
280 | -include_lib("eunit/include/eunit.hrl").
281 |
282 | setup() ->
283 | [{"qqfilename", some_stuff, <<"test.txt">>},
284 | {"qqtotalparts", some_stuff, <<"2">>},
285 | {"qqtotalfilesize", some_stuff, <<"10">>},
286 | {"qqpartindex", some_stuff, <<"1">>},
287 | {"qquuid", some_stuff, <<"er3-rere-dc">>},
288 | {"qqfile", some_stuff, <<"binary data for the win">>}
289 | ].
290 |
291 | cleanup(_SetupData) ->
292 | ok.
293 |
294 | params_test_() ->
295 | {setup,
296 | fun setup/0,
297 | fun cleanup/1,
298 | fun(SetupData) ->
299 | ?_assertEqual("test.txt", qqfilename(SetupData)),
300 | ?_assertEqual(2, qqtotalparts(SetupData)),
301 | ?_assertEqual(10, qqtotalfilesize(SetupData)),
302 | ?_assertEqual(1, qqpartindex(SetupData)),
303 | ?_assertEqual("er3-rere-dc", qquuid(SetupData)),
304 | ?_assertEqual(<<"binary data for the win">>, qqfile(SetupData))
305 | end
306 | }.
307 |
308 | -endif.
309 |
--------------------------------------------------------------------------------
/erlang/fileserver/src/fileserver_static_resource.erl:
--------------------------------------------------------------------------------
1 | %% Credit where credit due taken from webmachine demo
2 | %% @author Bryan Fink
3 | %% @author Andy Gross
4 | %% @author Justin Sheehy
5 | %% @copyright 2008-2009 Basho Technologies, Inc.
6 |
7 | -module(fileserver_static_resource).
8 | -export([init/1]).
9 | -export([allowed_methods/2,
10 | resource_exists/2,
11 | content_types_provided/2,
12 | provide_content/2]).
13 |
14 | -record(context, {root,response_body=undefined,metadata=[]}).
15 |
16 | -include_lib("webmachine/include/webmachine.hrl").
17 |
18 | init([ContentDir]) ->
19 | {ok, App}= application:get_application(),
20 | PrivDir = code:priv_dir(App),
21 | SourceDir = filename:join([PrivDir, ContentDir]),
22 | {ok, #context{root=SourceDir}}.
23 |
24 | allowed_methods(ReqData, Context) ->
25 | {['HEAD', 'GET'], ReqData, Context}.
26 |
27 | content_types_provided(ReqData, Context) ->
28 | %%CT = webmachine_util:guess_mime(wrq:disp_path(ReqData)),
29 | CT = webmachine_util:guess_mime(
30 | get_full_path(Context, wrq:disp_path(ReqData))
31 | ),
32 | {[{CT, provide_content}], ReqData,
33 | Context#context{metadata=[{'content-type', CT}|Context#context.metadata]}}.
34 |
35 | get_full_path(Context, Path) ->
36 | Root = Context#context.root,
37 | Result = case mochiweb_util:safe_relative_path(Path) of
38 | undefined ->
39 | undefined;
40 | RelPath ->
41 | FullPath = filename:join([Root, RelPath]),
42 | case filelib:is_dir(FullPath) of
43 | true ->
44 | filename:join([FullPath, "index.html"]);
45 | false ->
46 | FullPath
47 | end
48 | end,
49 | Result.
50 |
51 | file_exists(Context, Name) ->
52 | NamePath = get_full_path(Context, Name),
53 | case filelib:is_regular(NamePath) of
54 | true ->
55 | {true, NamePath};
56 | false ->
57 | false
58 | end.
59 |
60 | resource_exists(ReqData, Context) ->
61 | Path = wrq:disp_path(ReqData),
62 | case file_exists(Context, Path) of
63 | {true, _} ->
64 | {true, ReqData, Context};
65 | _ ->
66 | case Path of
67 | "p" -> {true, ReqData, Context};
68 | _ -> {false, ReqData, Context}
69 | end
70 | end.
71 |
72 | maybe_fetch_object(Context, Path) ->
73 | % if returns {true, NewContext} then NewContext has response_body
74 | case Context#context.response_body of
75 | undefined ->
76 | case file_exists(Context, Path) of
77 | {true, FullPath} ->
78 | {ok, Value} = file:read_file(FullPath),
79 | {true, Context#context{response_body=Value}};
80 | false ->
81 | {false, Context}
82 | end;
83 | _Body ->
84 | {true, Context}
85 | end.
86 |
87 | provide_content(ReqData, Context) ->
88 | case maybe_fetch_object(Context, wrq:disp_path(ReqData)) of
89 | {true, NewContext} ->
90 | Body = NewContext#context.response_body,
91 | {Body, ReqData, Context};
92 | {false, NewContext} ->
93 | {error, ReqData, NewContext}
94 | end.
95 |
--------------------------------------------------------------------------------
/erlang/fileserver/src/fileserver_sup.erl:
--------------------------------------------------------------------------------
1 | %% @author author
2 | %% @copyright YYYY author.
3 |
4 | %% @doc Supervisor for the fileserver application.
5 |
6 | -module(fileserver_sup).
7 | -author('author ').
8 |
9 | -behaviour(supervisor).
10 |
11 | %% External exports
12 | -export([start_link/0, upgrade/0]).
13 |
14 | %% supervisor callbacks
15 | -export([init/1]).
16 |
17 | %% @spec start_link() -> ServerRet
18 | %% @doc API for starting the supervisor.
19 | start_link() ->
20 | supervisor:start_link({local, ?MODULE}, ?MODULE, []).
21 |
22 | %% @spec upgrade() -> ok
23 | %% @doc Add processes if necessary.
24 | upgrade() ->
25 | {ok, {_, Specs}} = init([]),
26 |
27 | Old = sets:from_list(
28 | [Name || {Name, _, _, _} <- supervisor:which_children(?MODULE)]),
29 | New = sets:from_list([Name || {Name, _, _, _, _, _} <- Specs]),
30 | Kill = sets:subtract(Old, New),
31 |
32 | sets:fold(fun (Id, ok) ->
33 | supervisor:terminate_child(?MODULE, Id),
34 | supervisor:delete_child(?MODULE, Id),
35 | ok
36 | end, ok, Kill),
37 |
38 | [supervisor:start_child(?MODULE, Spec) || Spec <- Specs],
39 | ok.
40 |
41 | %% @spec init([]) -> SupervisorTree
42 | %% @doc supervisor callback.
43 | init([]) ->
44 | Ip = case os:getenv("WEBMACHINE_IP") of false -> "0.0.0.0"; Any -> Any end,
45 | {ok, App} = application:get_application(?MODULE),
46 | {ok, Dispatch} = file:consult(filename:join([priv_dir(App),
47 | "dispatch.conf"])),
48 | Port = case os:getenv("WEBMACHINE_PORT") of
49 | false -> 8000;
50 | AnyPort -> AnyPort
51 | end,
52 | WebConfig = [
53 | {ip, Ip},
54 | {port, Port},
55 | {log_dir, "priv/log"},
56 | {dispatch, Dispatch}],
57 | Web = {webmachine_mochiweb,
58 | {webmachine_mochiweb, start, [WebConfig]},
59 | permanent, 5000, worker, [mochiweb_socket_server]},
60 | Processes = [Web],
61 | {ok, { {one_for_one, 10, 10}, Processes} }.
62 |
63 | %%
64 | %% @doc return the priv dir
65 | priv_dir(Mod) ->
66 | case code:priv_dir(Mod) of
67 | {error, bad_name} ->
68 | Ebin = filename:dirname(code:which(Mod)),
69 | filename:join(filename:dirname(Ebin), "priv");
70 | PrivDir ->
71 | PrivDir
72 | end.
73 |
--------------------------------------------------------------------------------
/erlang/fileserver/start.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | cd `dirname $0`
3 | exec erl -pa $PWD/ebin $PWD/deps/*/ebin -boot start_sasl -s reloader -s fileserver
4 |
--------------------------------------------------------------------------------
/java/MultipartUploadParser.java:
--------------------------------------------------------------------------------
1 | package fineuploader;
2 |
3 | import org.apache.commons.fileupload.FileItem;
4 | import org.apache.commons.fileupload.FileUploadException;
5 | import org.apache.commons.fileupload.disk.DiskFileItemFactory;
6 | import org.apache.commons.fileupload.servlet.FileCleanerCleanup;
7 | import org.apache.commons.fileupload.servlet.ServletFileUpload;
8 | import org.apache.commons.io.FileCleaningTracker;
9 | import org.apache.commons.lang3.StringUtils;
10 | import org.slf4j.Logger;
11 | import org.slf4j.LoggerFactory;
12 |
13 | import javax.servlet.ServletContext;
14 | import javax.servlet.http.HttpServletRequest;
15 | import java.io.File;
16 | import java.io.IOException;
17 | import java.io.UnsupportedEncodingException;
18 | import java.util.*;
19 |
20 | public class MultipartUploadParser
21 | {
22 | final Logger log = LoggerFactory.getLogger(MultipartUploadParser.class);
23 |
24 | private Map params = new HashMap();
25 |
26 | private List files = new ArrayList();
27 |
28 | // fileItemsFactory is a field (even though it's scoped to the constructor) to prevent the
29 | // org.apache.commons.fileupload.servlet.FileCleanerCleanup thread from attempting to delete the
30 | // temp file before while it is still being used.
31 | //
32 | // FileCleanerCleanup uses a java.lang.ref.ReferenceQueue to delete the temp file when the FileItemsFactory marker object is GCed
33 | private DiskFileItemFactory fileItemsFactory;
34 |
35 | public MultipartUploadParser(HttpServletRequest request, File repository, ServletContext context) throws Exception
36 | {
37 | if (!repository.exists() && !repository.mkdirs())
38 | {
39 | throw new IOException("Unable to mkdirs to " + repository.getAbsolutePath());
40 | }
41 |
42 | fileItemsFactory = setupFileItemFactory(repository, context);
43 |
44 | ServletFileUpload upload = new ServletFileUpload(fileItemsFactory);
45 | List formFileItems = upload.parseRequest(request);
46 |
47 | parseFormFields(formFileItems);
48 |
49 | if (files.isEmpty())
50 | {
51 | log.warn("No files were found when processing the requst. Debugging info follows.");
52 |
53 | writeDebugInfo(request);
54 |
55 | throw new FileUploadException("No files were found when processing the requst.");
56 | }
57 | else
58 | {
59 | if (log.isDebugEnabled())
60 | {
61 | writeDebugInfo(request);
62 | }
63 | }
64 | }
65 |
66 | private DiskFileItemFactory setupFileItemFactory(File repository, ServletContext context)
67 | {
68 | DiskFileItemFactory factory = new DiskFileItemFactory();
69 | factory.setSizeThreshold(DiskFileItemFactory.DEFAULT_SIZE_THRESHOLD);
70 | factory.setRepository(repository);
71 |
72 | FileCleaningTracker pTracker = FileCleanerCleanup.getFileCleaningTracker(context);
73 | factory.setFileCleaningTracker(pTracker);
74 |
75 | return factory;
76 | }
77 |
78 | private void writeDebugInfo(HttpServletRequest request)
79 | {
80 | log.debug("-- POST HEADERS --");
81 | for (String header : Collections.list(request.getHeaderNames()))
82 | {
83 | log.debug("{}: {}", header, request.getHeader(header));
84 | }
85 |
86 | log.debug("-- POST PARAMS --");
87 | for (String key : params.keySet())
88 | {
89 | log.debug("{}: {}", key, params.get(key));
90 | }
91 | }
92 |
93 | private void parseFormFields(List items) throws UnsupportedEncodingException
94 | {
95 | for (FileItem item : items)
96 | {
97 | if (item.isFormField())
98 | {
99 | String key = item.getFieldName();
100 | String value = item.getString("UTF-8");
101 | if (StringUtils.isNotBlank(key))
102 | {
103 | params.put(key, StringUtils.defaultString(value));
104 | }
105 | }
106 | else
107 | {
108 | files.add(item);
109 | }
110 | }
111 | }
112 |
113 | public Map getParams()
114 | {
115 | return params;
116 | }
117 |
118 | public List getFiles()
119 | {
120 | if (files.isEmpty())
121 | {
122 | throw new RuntimeException("No FileItems exist.");
123 | }
124 |
125 | return files;
126 | }
127 |
128 | public FileItem getFirstFile()
129 | {
130 | if (files.isEmpty())
131 | {
132 | throw new RuntimeException("No FileItems exist.");
133 | }
134 |
135 | return files.iterator().next();
136 | }
137 | }
138 |
--------------------------------------------------------------------------------
/java/RequestParser.java:
--------------------------------------------------------------------------------
1 | package fineuploader;
2 |
3 | import org.apache.commons.fileupload.FileItem;
4 |
5 | import javax.servlet.http.HttpServletRequest;
6 | import java.io.BufferedReader;
7 | import java.net.URLDecoder;
8 | import java.util.Enumeration;
9 | import java.util.HashMap;
10 | import java.util.Iterator;
11 | import java.util.Map;
12 |
13 | public class RequestParser
14 | {
15 | private static String FILENAME_PARAM = "qqfile";
16 | private static String PART_INDEX_PARAM = "qqpartindex";
17 | private static String FILE_SIZE_PARAM = "qqtotalfilesize";
18 | private static String TOTAL_PARTS_PARAM = "qqtotalparts";
19 | private static String UUID_PARAM = "qquuid";
20 | private static String PART_FILENAME_PARAM = "qqfilename";
21 | private static String METHOD_PARAM = "_method";
22 |
23 | private static String GENERATE_ERROR_PARAM = "generateError";
24 |
25 | private String filename;
26 | private FileItem uploadItem;
27 | private boolean generateError;
28 |
29 | private int partIndex = -1;
30 | private long totalFileSize;
31 | private int totalParts;
32 | private String uuid;
33 | private String originalFilename;
34 | private String method;
35 |
36 | private Map customParams = new HashMap<>();
37 |
38 |
39 | private RequestParser()
40 | {
41 | }
42 |
43 | //2nd param is null unless a MPFR
44 | static RequestParser getInstance(HttpServletRequest request, MultipartUploadParser multipartUploadParser) throws Exception
45 | {
46 | RequestParser requestParser = new RequestParser();
47 |
48 | if (multipartUploadParser == null)
49 | {
50 | if (request.getMethod().equals("POST") && request.getContentType() == null)
51 | {
52 | parseXdrPostParams(request, requestParser);
53 | }
54 | else
55 | {
56 | requestParser.filename = request.getParameter(FILENAME_PARAM);
57 | parseQueryStringParams(requestParser, request);
58 | }
59 | }
60 | else
61 | {
62 | requestParser.uploadItem = multipartUploadParser.getFirstFile();
63 | requestParser.filename = multipartUploadParser.getFirstFile().getName();
64 |
65 | //params could be in body or query string, depending on Fine Uploader request option properties
66 | parseRequestBodyParams(requestParser, multipartUploadParser);
67 | parseQueryStringParams(requestParser, request);
68 | }
69 |
70 | removeQqParams(requestParser.customParams);
71 |
72 | return requestParser;
73 | }
74 |
75 | public String getFilename()
76 | {
77 | return originalFilename != null ? originalFilename : filename;
78 | }
79 |
80 | //only non-null for MPFRs
81 | public FileItem getUploadItem()
82 | {
83 | return uploadItem;
84 | }
85 |
86 | public boolean generateError()
87 | {
88 | return generateError;
89 | }
90 |
91 | public int getPartIndex()
92 | {
93 | return partIndex;
94 | }
95 |
96 | public long getTotalFileSize()
97 | {
98 | return totalFileSize;
99 | }
100 |
101 | public int getTotalParts()
102 | {
103 | return totalParts;
104 | }
105 |
106 | public String getUuid()
107 | {
108 | return uuid;
109 | }
110 |
111 | public String getOriginalFilename()
112 | {
113 | return originalFilename;
114 | }
115 |
116 | public String getMethod()
117 | {
118 | return method;
119 | }
120 |
121 | public Map getCustomParams()
122 | {
123 | return customParams;
124 | }
125 |
126 | private static void parseRequestBodyParams(RequestParser requestParser, MultipartUploadParser multipartUploadParser) throws Exception
127 | {
128 | if (multipartUploadParser.getParams().get(GENERATE_ERROR_PARAM) != null)
129 | {
130 | requestParser.generateError = Boolean.parseBoolean(multipartUploadParser.getParams().get(GENERATE_ERROR_PARAM));
131 | }
132 |
133 | String partNumStr = multipartUploadParser.getParams().get(PART_INDEX_PARAM);
134 | if (partNumStr != null)
135 | {
136 | requestParser.partIndex = Integer.parseInt(partNumStr);
137 |
138 | requestParser.totalFileSize = Long.parseLong(multipartUploadParser.getParams().get(FILE_SIZE_PARAM));
139 | requestParser.totalParts = Integer.parseInt(multipartUploadParser.getParams().get(TOTAL_PARTS_PARAM));
140 | }
141 |
142 | for (Map.Entry paramEntry : multipartUploadParser.getParams().entrySet())
143 | {
144 | requestParser.customParams.put(paramEntry.getKey(), paramEntry.getValue());
145 | }
146 |
147 | if (requestParser.uuid == null)
148 | {
149 | requestParser.uuid = multipartUploadParser.getParams().get(UUID_PARAM);
150 | }
151 |
152 | if (requestParser.originalFilename == null)
153 | {
154 | requestParser.originalFilename = multipartUploadParser.getParams().get(PART_FILENAME_PARAM);
155 | }
156 | }
157 |
158 | private static void parseQueryStringParams(RequestParser requestParser, HttpServletRequest req)
159 | {
160 | if (req.getParameter(GENERATE_ERROR_PARAM) != null)
161 | {
162 | requestParser.generateError = Boolean.parseBoolean(req.getParameter(GENERATE_ERROR_PARAM));
163 | }
164 |
165 | String partNumStr = req.getParameter(PART_INDEX_PARAM);
166 | if (partNumStr != null)
167 | {
168 | requestParser.partIndex = Integer.parseInt(partNumStr);
169 | requestParser.totalFileSize = Long.parseLong(req.getParameter(FILE_SIZE_PARAM));
170 | requestParser.totalParts = Integer.parseInt(req.getParameter(TOTAL_PARTS_PARAM));
171 | }
172 |
173 | Enumeration paramNames = req.getParameterNames();
174 | while (paramNames.hasMoreElements())
175 | {
176 | String paramName = paramNames.nextElement();
177 | requestParser.customParams.put(paramName, req.getParameter(paramName));
178 | }
179 |
180 | if (requestParser.uuid == null)
181 | {
182 | requestParser.uuid = req.getParameter(UUID_PARAM);
183 | }
184 |
185 | if (requestParser.method == null)
186 | {
187 | requestParser.method = req.getParameter(METHOD_PARAM);
188 | }
189 |
190 | if (requestParser.originalFilename == null)
191 | {
192 | requestParser.originalFilename = req.getParameter(PART_FILENAME_PARAM);
193 | }
194 | }
195 |
196 | private static void removeQqParams(Map customParams)
197 | {
198 | Iterator> paramIterator = customParams.entrySet().iterator();
199 |
200 | while (paramIterator.hasNext())
201 | {
202 | Map.Entry paramEntry = paramIterator.next();
203 | if (paramEntry.getKey().startsWith("qq"))
204 | {
205 | paramIterator.remove();
206 | }
207 | }
208 | }
209 |
210 | private static void parseXdrPostParams(HttpServletRequest request, RequestParser requestParser) throws Exception
211 | {
212 | String queryString = getQueryStringFromRequestBody(request);
213 | String[] queryParams = queryString.split("&");
214 |
215 | for (String queryParam : queryParams)
216 | {
217 | String[] keyAndVal = queryParam.split("=");
218 | String key = URLDecoder.decode(keyAndVal[0], "UTF-8");
219 | String value = URLDecoder.decode(keyAndVal[1], "UTF-8");
220 |
221 | if (key.equals(UUID_PARAM))
222 | {
223 | requestParser.uuid = value;
224 | }
225 | else if (key.equals(METHOD_PARAM))
226 | {
227 | requestParser.method = value;
228 | }
229 | else
230 | {
231 | requestParser.customParams.put(key, value);
232 | }
233 | }
234 | }
235 |
236 | private static String getQueryStringFromRequestBody(HttpServletRequest request) throws Exception
237 | {
238 | StringBuilder content = new StringBuilder();
239 | BufferedReader reader = null;
240 |
241 | try
242 | {
243 | reader = request.getReader();
244 | char[] chars = new char[128];
245 | int bytesRead;
246 | while ( (bytesRead = reader.read(chars)) != -1 )
247 | {
248 | content.append(chars, 0, bytesRead);
249 | }
250 | }
251 | finally
252 | {
253 | if (reader != null)
254 | {
255 | reader.close();
256 | }
257 | }
258 |
259 | return content.toString();
260 | }
261 | }
262 |
--------------------------------------------------------------------------------
/java/UploadReceiver.java:
--------------------------------------------------------------------------------
1 | package fineuploader;
2 |
3 | import org.apache.commons.fileupload.servlet.ServletFileUpload;
4 | import org.apache.commons.io.FileUtils;
5 | import org.apache.commons.io.IOUtils;
6 | import org.slf4j.Logger;
7 | import org.slf4j.LoggerFactory;
8 |
9 | import javax.servlet.ServletException;
10 | import javax.servlet.http.HttpServlet;
11 | import javax.servlet.http.HttpServletRequest;
12 | import javax.servlet.http.HttpServletResponse;
13 | import java.io.*;
14 | import java.util.Arrays;
15 | import java.util.regex.Pattern;
16 |
17 | //commented code blocks are only used for CORS environments
18 | public class UploadReceiver extends HttpServlet
19 | {
20 | private static final File UPLOAD_DIR = new File("test/uploads");
21 | private static File TEMP_DIR = new File("test/uploadsTemp");
22 |
23 | private static String CONTENT_LENGTH = "Content-Length";
24 | private static int SUCCESS_RESPONSE_CODE = 200;
25 |
26 | final Logger log = LoggerFactory.getLogger(UploadReceiver.class);
27 |
28 |
29 | @Override
30 | public void init() throws ServletException
31 | {
32 | UPLOAD_DIR.mkdirs();
33 | }
34 |
35 | @Override
36 | public void doDelete(HttpServletRequest req, HttpServletResponse resp) throws IOException
37 | {
38 | String uuid = req.getPathInfo().replaceAll("/", "");
39 |
40 | handleDeleteFileRequest(uuid, resp);
41 | }
42 |
43 | private void handleDeleteFileRequest(String uuid, HttpServletResponse resp) throws IOException
44 | {
45 | FileUtils.deleteDirectory(new File(UPLOAD_DIR, uuid));
46 |
47 | if (new File(UPLOAD_DIR, uuid).exists())
48 | {
49 | log.warn("couldn't find or delete " + uuid);
50 | }
51 | else
52 | {
53 | log.info("deleted " + uuid);
54 | }
55 |
56 | resp.setStatus(SUCCESS_RESPONSE_CODE);
57 | // resp.addHeader("Access-Control-Allow-Origin", "*");
58 | }
59 |
60 | @Override
61 | public void doOptions(HttpServletRequest req, HttpServletResponse resp)
62 | {
63 | resp.setStatus(SUCCESS_RESPONSE_CODE);
64 | resp.addHeader("Access-Control-Allow-Origin", "http://192.168.130.118:8080");
65 | // resp.addHeader("Access-Control-Allow-Credentials", "true");
66 | resp.addHeader("Access-Control-Allow-Methods", "POST, DELETE");
67 | resp.addHeader("Access-Control-Allow-Headers", "x-requested-with, cache-control, content-type");
68 | }
69 |
70 | @Override
71 | public void doPost(HttpServletRequest req, HttpServletResponse resp) throws IOException
72 | {
73 | RequestParser requestParser = null;
74 |
75 | boolean isIframe = req.getHeader("X-Requested-With") == null || !req.getHeader("X-Requested-With").equals("XMLHttpRequest");
76 |
77 | try
78 | {
79 | // resp.setContentType(isIframe ? "text/html" : "text/plain");
80 | resp.setContentType("text/plain");
81 | resp.setStatus(SUCCESS_RESPONSE_CODE);
82 |
83 | // resp.addHeader("Access-Control-Allow-Origin", "http://192.168.130.118:8080");
84 | // resp.addHeader("Access-Control-Allow-Credentials", "true");
85 | // resp.addHeader("Access-Control-Allow-Origin", "*");
86 |
87 | if (ServletFileUpload.isMultipartContent(req))
88 | {
89 | MultipartUploadParser multipartUploadParser = new MultipartUploadParser(req, TEMP_DIR, getServletContext());
90 | requestParser = RequestParser.getInstance(req, multipartUploadParser);
91 | writeFileForMultipartRequest(requestParser);
92 | writeResponse(resp.getWriter(), requestParser.generateError() ? "Generated error" : null, isIframe, false, requestParser);
93 | }
94 | else
95 | {
96 | requestParser = RequestParser.getInstance(req, null);
97 |
98 | //handle POST delete file request
99 | if (requestParser.getMethod() != null
100 | && requestParser.getMethod().equalsIgnoreCase("DELETE"))
101 | {
102 | String uuid = requestParser.getUuid();
103 | handleDeleteFileRequest(uuid, resp);
104 | }
105 | else
106 | {
107 | writeFileForNonMultipartRequest(req, requestParser);
108 | writeResponse(resp.getWriter(), requestParser.generateError() ? "Generated error" : null, isIframe, false, requestParser);
109 | }
110 | }
111 | } catch (Exception e)
112 | {
113 | log.error("Problem handling upload request", e);
114 | if (e instanceof MergePartsException)
115 | {
116 | writeResponse(resp.getWriter(), e.getMessage(), isIframe, true, requestParser);
117 | }
118 | else
119 | {
120 | writeResponse(resp.getWriter(), e.getMessage(), isIframe, false, requestParser);
121 | }
122 | }
123 | }
124 |
125 | private void writeFileForNonMultipartRequest(HttpServletRequest req, RequestParser requestParser) throws Exception
126 | {
127 | File dir = new File(UPLOAD_DIR, requestParser.getUuid());
128 | dir.mkdirs();
129 |
130 | String contentLengthHeader = req.getHeader(CONTENT_LENGTH);
131 | long expectedFileSize = Long.parseLong(contentLengthHeader);
132 |
133 | if (requestParser.getPartIndex() >= 0)
134 | {
135 | writeFile(req.getInputStream(), new File(dir, requestParser.getUuid() + "_" + String.format("%05d", requestParser.getPartIndex())), null);
136 |
137 | if (requestParser.getTotalParts()-1 == requestParser.getPartIndex())
138 | {
139 | File[] parts = getPartitionFiles(dir, requestParser.getUuid());
140 | File outputFile = new File(dir, requestParser.getFilename());
141 | for (File part : parts)
142 | {
143 | mergeFiles(outputFile, part);
144 | }
145 |
146 | assertCombinedFileIsVaid(requestParser.getTotalFileSize(), outputFile, requestParser.getUuid());
147 | deletePartitionFiles(dir, requestParser.getUuid());
148 | }
149 | }
150 | else
151 | {
152 | writeFile(req.getInputStream(), new File(dir, requestParser.getFilename()), expectedFileSize);
153 | }
154 | }
155 |
156 |
157 | private void writeFileForMultipartRequest(RequestParser requestParser) throws Exception
158 | {
159 | File dir = new File(UPLOAD_DIR, requestParser.getUuid());
160 | dir.mkdirs();
161 |
162 | if (requestParser.getPartIndex() >= 0)
163 | {
164 | writeFile(requestParser.getUploadItem().getInputStream(), new File(dir, requestParser.getUuid() + "_" + String.format("%05d", requestParser.getPartIndex())), null);
165 |
166 | if (requestParser.getTotalParts()-1 == requestParser.getPartIndex())
167 | {
168 | File[] parts = getPartitionFiles(dir, requestParser.getUuid());
169 | File outputFile = new File(dir, requestParser.getOriginalFilename());
170 | for (File part : parts)
171 | {
172 | mergeFiles(outputFile, part);
173 | }
174 |
175 | assertCombinedFileIsVaid(requestParser.getTotalFileSize(), outputFile, requestParser.getUuid());
176 | deletePartitionFiles(dir, requestParser.getUuid());
177 | }
178 | }
179 | else
180 | {
181 | writeFile(requestParser.getUploadItem().getInputStream(), new File(dir, requestParser.getFilename()), null);
182 | }
183 | }
184 |
185 | private void assertCombinedFileIsVaid(long totalFileSize, File outputFile, String uuid) throws MergePartsException
186 | {
187 | if (totalFileSize != outputFile.length())
188 | {
189 | deletePartitionFiles(UPLOAD_DIR, uuid);
190 | outputFile.delete();
191 | throw new MergePartsException("Incorrect combined file size!");
192 | }
193 |
194 | }
195 |
196 |
197 | private static class PartitionFilesFilter implements FilenameFilter
198 | {
199 | private String filename;
200 | PartitionFilesFilter(String filename)
201 | {
202 | this.filename = filename;
203 | }
204 |
205 | @Override
206 | public boolean accept(File file, String s)
207 | {
208 | return s.matches(Pattern.quote(filename) + "_\\d+");
209 | }
210 | }
211 |
212 | private static File[] getPartitionFiles(File directory, String filename)
213 | {
214 | File[] files = directory.listFiles(new PartitionFilesFilter(filename));
215 | Arrays.sort(files);
216 | return files;
217 | }
218 |
219 | private static void deletePartitionFiles(File directory, String filename)
220 | {
221 | File[] partFiles = getPartitionFiles(directory, filename);
222 | for (File partFile : partFiles)
223 | {
224 | partFile.delete();
225 | }
226 | }
227 |
228 | private File mergeFiles(File outputFile, File partFile) throws IOException
229 | {
230 | FileOutputStream fos = new FileOutputStream(outputFile, true);
231 |
232 | try
233 | {
234 | FileInputStream fis = new FileInputStream(partFile);
235 |
236 | try
237 | {
238 | IOUtils.copy(fis, fos);
239 | }
240 | finally
241 | {
242 | IOUtils.closeQuietly(fis);
243 | }
244 | }
245 | finally
246 | {
247 | IOUtils.closeQuietly(fos);
248 | }
249 |
250 | return outputFile;
251 | }
252 |
253 | private File writeFile(InputStream in, File out, Long expectedFileSize) throws IOException
254 | {
255 | FileOutputStream fos = null;
256 |
257 | try
258 | {
259 | fos = new FileOutputStream(out);
260 |
261 | IOUtils.copy(in, fos);
262 |
263 | if (expectedFileSize != null)
264 | {
265 | Long bytesWrittenToDisk = out.length();
266 | if (!expectedFileSize.equals(bytesWrittenToDisk))
267 | {
268 | log.warn("Expected file {} to be {} bytes; file on disk is {} bytes", new Object[] { out.getAbsolutePath(), expectedFileSize, 1 });
269 | out.delete();
270 | throw new IOException(String.format("Unexpected file size mismatch. Actual bytes %s. Expected bytes %s.", bytesWrittenToDisk, expectedFileSize));
271 | }
272 | }
273 |
274 | return out;
275 | }
276 | catch (Exception e)
277 | {
278 | throw new IOException(e);
279 | }
280 | finally
281 | {
282 | IOUtils.closeQuietly(fos);
283 | }
284 | }
285 |
286 | private void writeResponse(PrintWriter writer, String failureReason, boolean isIframe, boolean restartChunking, RequestParser requestParser)
287 | {
288 | if (failureReason == null)
289 | {
290 | // if (isIframe)
291 | // {
292 | // writer.print("{\"success\": true, \"uuid\": \"" + requestParser.getUuid() + "\"}");
293 | // }
294 | // else
295 | // {
296 | writer.print("{\"success\": true}");
297 | // }
298 | }
299 | else
300 | {
301 | if (restartChunking)
302 | {
303 | writer.print("{\"error\": \"" + failureReason + "\", \"reset\": true}");
304 | }
305 | else
306 | {
307 | // if (isIframe)
308 | // {
309 | // writer.print("{\"error\": \"" + failureReason + "\", \"uuid\": \"" + requestParser.getUuid() + "\"}");
310 | // }
311 | // else
312 | // {
313 |
314 | writer.print("{\"error\": \"" + failureReason + "\"}");
315 | // }
316 | }
317 | }
318 | }
319 |
320 | private class MergePartsException extends Exception
321 | {
322 | MergePartsException(String message)
323 | {
324 | super(message);
325 | }
326 | }
327 | }
328 |
--------------------------------------------------------------------------------
/java/s3/S3Uploads.java:
--------------------------------------------------------------------------------
1 | package fineuploader.s3;
2 |
3 | import com.amazonaws.auth.AWSCredentials;
4 | import com.amazonaws.auth.BasicAWSCredentials;
5 | import com.amazonaws.services.s3.AmazonS3;
6 | import com.amazonaws.services.s3.AmazonS3Client;
7 | import com.amazonaws.util.BinaryUtils;
8 | import com.google.gson.JsonArray;
9 | import com.google.gson.JsonElement;
10 | import com.google.gson.JsonObject;
11 | import com.google.gson.JsonParser;
12 | import org.apache.commons.codec.binary.Hex;
13 |
14 | import javax.crypto.Mac;
15 | import javax.crypto.spec.SecretKeySpec;
16 | import javax.servlet.http.HttpServlet;
17 | import javax.servlet.http.HttpServletRequest;
18 | import javax.servlet.http.HttpServletResponse;
19 | import java.io.IOException;
20 | import java.io.UnsupportedEncodingException;
21 | import java.security.InvalidKeyException;
22 | import java.security.MessageDigest;
23 | import java.security.NoSuchAlgorithmException;
24 | import java.util.regex.Matcher;
25 | import java.util.regex.Pattern;
26 |
27 | /**
28 | * Java Server-Side Example for Fine Uploader S3.
29 | * Maintained by Widen Enterprises.
30 | *
31 | * This example:
32 | * - handles non-CORS environments
33 | * - handles delete file requests via the DELETE method
34 | * - signs policy documents (simple uploads) and REST requests
35 | * (chunked/multipart uploads)
36 | * - handles both version 2 and version 4 signatures
37 | *
38 | * Requirements:
39 | * - Java 1.5 or newer
40 | * - Google GSon
41 | * - Amazon Java SDK (only if utilizing the delete file feature)
42 | *
43 | * If you need to install the AWS SDK, see http://docs.aws.amazon.com/aws-sdk-php-2/guide/latest/installation.html.
44 | */
45 | public class S3Uploads extends HttpServlet
46 | {
47 | // This assumes your secret key is available in an environment variable.
48 | // It is needed to sign policy documents.
49 | final static String AWS_SECRET_KEY = System.getenv("AWS_SECRET_KEY");
50 |
51 | // You will need to use your own public key here.
52 | final static String AWS_PUBLIC_KEY = "AKIAJLRYC5FTY3VRRTDA";
53 |
54 |
55 | // Main entry point for POST requests from Fine Uploader. This currently assumes delete file requests use the
56 | // default method of DELETE, but that can be adjusted.
57 | public void doPost(HttpServletRequest req, HttpServletResponse resp) throws IOException
58 | {
59 | if (req.getServletPath().endsWith("s3/signature"))
60 | {
61 | handleSignatureRequest(req, resp);
62 | }
63 | else if (req.getServletPath().endsWith("s3/success"))
64 | {
65 | handleUploadSuccessRequest(req, resp);
66 | }
67 | }
68 |
69 | // Main entry point for DELETE requests sent by Fine Uploader.
70 | @Override
71 | public void doDelete(HttpServletRequest req, HttpServletResponse resp) throws IOException
72 | {
73 | String key = req.getParameter("key");
74 | String bucket = req.getParameter("bucket");
75 |
76 | resp.setStatus(200);
77 |
78 | AWSCredentials myCredentials = new BasicAWSCredentials(AWS_PUBLIC_KEY, AWS_SECRET_KEY);
79 | AmazonS3 s3Client = new AmazonS3Client(myCredentials);
80 | s3Client.deleteObject(bucket, key);
81 | }
82 |
83 | // Called by the main POST request handler if Fine Uploader has asked for an item to be signed. The item may be a
84 | // policy document or a string that represents multipart upload request headers.
85 | private void handleSignatureRequest(HttpServletRequest req, HttpServletResponse resp) throws IOException
86 | {
87 | resp.setStatus(200);
88 |
89 | JsonParser jsonParser = new JsonParser();
90 | JsonElement contentJson = jsonParser.parse(req.getReader());
91 | JsonObject jsonObject = contentJson.getAsJsonObject();
92 |
93 | if (req.getQueryString() != null && req.getQueryString().contains("v4=true")) {
94 | handleV4SignatureRequest(jsonObject, contentJson, req, resp);
95 | }
96 | else {
97 | handleV2SignatureRequest(jsonObject, contentJson, req, resp);
98 | }
99 |
100 | resp.setStatus(200);
101 | }
102 |
103 | private void handleV2SignatureRequest(JsonObject payload, JsonElement contentJson, HttpServletRequest req, HttpServletResponse resp) throws IOException{
104 | String signature;
105 | JsonElement headers = payload.get("headers");
106 | JsonObject response = new JsonObject();
107 |
108 | try
109 | {
110 | // If this is not a multipart upload-related request, Fine Uploader will send a policy document
111 | // as the value of a "policy" property in the request. In that case, we must base-64 encode
112 | // the policy document and then sign it. The will include the base-64 encoded policy and the signed policy document.
113 | if (headers == null)
114 | {
115 | String base64Policy = base64EncodePolicy(contentJson);
116 | signature = sign(base64Policy);
117 |
118 | // Validate the policy document to ensure the client hasn't tampered with it.
119 | // If it has been tampered with, set this property on the response and set the status to a non-200 value.
120 | // response.addProperty("invalid", true);
121 |
122 | response.addProperty("policy", base64Policy);
123 | }
124 |
125 | // If this is a request to sign a multipart upload-related request, we only need to sign the headers,
126 | // which are passed as the value of a "headers" property from Fine Uploader. In this case,
127 | // we only need to return the signed value.
128 | else
129 | {
130 | signature = sign(headers.getAsString());
131 | }
132 |
133 | response.addProperty("signature", signature);
134 | resp.getWriter().write(response.toString());
135 | }
136 | catch (Exception e)
137 | {
138 | resp.setStatus(500);
139 | }
140 | }
141 |
142 | private void handleV4SignatureRequest(JsonObject payload, JsonElement contentJson, HttpServletRequest req, HttpServletResponse resp) throws IOException{
143 | String signature = null;
144 | JsonElement headers = payload.get("headers");
145 | JsonObject response = new JsonObject();
146 |
147 | try
148 | {
149 | // If this is not a multipart upload-related request, Fine Uploader will send a policy document
150 | // as the value of a "policy" property in the request. In that case, we must base-64 encode
151 | // the policy document and then sign it. The will include the base-64 encoded policy and the signed policy document.
152 | if (headers == null)
153 | {
154 | String base64Policy = base64EncodePolicy(contentJson);
155 | JsonArray conditions = payload.getAsJsonArray("conditions");
156 | String credentialCondition = null;
157 | for (int i = 0; i < conditions.size(); i++) {
158 | JsonObject condition = conditions.get(i).getAsJsonObject();
159 | JsonElement value = condition.get("x-amz-credential");
160 | if (value != null) {
161 | credentialCondition = value.getAsString();
162 | break;
163 | }
164 | }
165 |
166 | // Validate the policy document to ensure the client hasn't tampered with it.
167 | // If it has been tampered with, set this property on the response and set the status to a non-200 value.
168 | // response.addProperty("invalid", true);
169 |
170 |
171 | Pattern pattern = Pattern.compile(".+\\/(.+)\\/(.+)\\/s3\\/aws4_request");
172 | Matcher matcher = pattern.matcher(credentialCondition);
173 | matcher.matches();
174 | signature = getV4Signature(matcher.group(1), matcher.group(2), base64Policy);
175 |
176 | response.addProperty("policy", base64Policy);
177 | }
178 |
179 | // If this is a request to sign a multipart upload-related request, we only need to sign the headers,
180 | // which are passed as the value of a "headers" property from Fine Uploader. In this case,
181 | // we only need to return the signed value.
182 | else
183 | {
184 | Pattern pattern = Pattern.compile(".+\\n.+\\n(\\d+)\\/(.+)\\/s3\\/aws4_request\\n(.+)", Pattern.DOTALL);
185 | Matcher matcher = pattern.matcher(headers.getAsString());
186 | matcher.matches();
187 | String canonicalRequest = matcher.group(3);
188 | String hashedCanonicalRequest = hash256(canonicalRequest);
189 | String stringToSign = headers.getAsString().replaceAll("(?s)(.+s3\\/aws4_request\\n).+", "$1" + hashedCanonicalRequest);
190 |
191 | // Validate the policy document to ensure the client hasn't tampered with it.
192 | // If it has been tampered with, set this property on the response and set the status to a non-200 value.
193 | // response.addProperty("invalid", true);
194 |
195 | signature = getV4Signature(matcher.group(1), matcher.group(2), stringToSign);
196 | }
197 |
198 | response.addProperty("signature", signature);
199 | resp.getWriter().write(response.toString());
200 | }
201 | catch (Exception e)
202 | {
203 | resp.setStatus(500);
204 | }
205 | }
206 |
207 | // Called by the main POST request handler if Fine Uploader has indicated that the file has been
208 | // successfully sent to S3. You have the opportunity here to examine the file in S3 and "fail" the upload
209 | // if something in not correct.
210 | private void handleUploadSuccessRequest(HttpServletRequest req, HttpServletResponse resp)
211 | {
212 | String key = req.getParameter("key");
213 | String uuid = req.getParameter("uuid");
214 | String bucket = req.getParameter("bucket");
215 | String name = req.getParameter("name");
216 |
217 | resp.setStatus(200);
218 |
219 | System.out.println(String.format("Upload successfully sent to S3! Bucket: %s, Key: %s, UUID: %s, Filename: %s",
220 | bucket, key, uuid, name));
221 | }
222 |
223 | private String getV4Signature(String date, String region, String stringToSign) throws Exception {
224 | byte[] kSecret = ("AWS4" + AWS_SECRET_KEY).getBytes("UTF8");
225 | byte[] kDate = sha256Encode(date, kSecret);
226 | byte[] kRegion = sha256Encode(region, kDate);
227 | byte[] kService = sha256Encode("s3", kRegion);
228 | byte[] kSigning = sha256Encode("aws4_request", kService);
229 | byte[] kSignature = sha256Encode(stringToSign, kSigning);
230 |
231 | return Hex.encodeHexString(kSignature);
232 | }
233 |
234 | private byte[] sha256Encode(String data, byte[] key) throws Exception {
235 | String algorithm="HmacSHA256";
236 | Mac mac = Mac.getInstance(algorithm);
237 | mac.init(new SecretKeySpec(key, algorithm));
238 | return mac.doFinal(data.getBytes("UTF8"));
239 | }
240 |
241 | private String hash256(String data) throws NoSuchAlgorithmException {
242 | MessageDigest md = MessageDigest.getInstance("SHA-256");
243 | md.update(data.getBytes());
244 | return bytesToHex(md.digest());
245 | }
246 |
247 | private String bytesToHex(byte[] bytes) {
248 | StringBuffer result = new StringBuffer();
249 | for (byte byt : bytes) result.append(Integer.toString((byt & 0xff) + 0x100, 16).substring(1));
250 | return result.toString();
251 | }
252 |
253 | private String base64EncodePolicy(JsonElement jsonElement) throws UnsupportedEncodingException
254 | {
255 | String policyJsonStr = jsonElement.toString();
256 | String base64Encoded = BinaryUtils.toBase64 (policyJsonStr.getBytes("UTF-8"));
257 |
258 | return base64Encoded;
259 | }
260 |
261 | private String sign(String toSign) throws UnsupportedEncodingException, NoSuchAlgorithmException, InvalidKeyException
262 | {
263 | Mac hmac = Mac.getInstance("HmacSHA1");
264 | hmac.init(new SecretKeySpec(AWS_SECRET_KEY.getBytes("UTF-8"), "HmacSHA1"));
265 | String signature = BinaryUtils.toBase64 (hmac.doFinal(toSign.getBytes("UTF-8")));
266 |
267 | return signature;
268 | }
269 | }
270 |
--------------------------------------------------------------------------------
/license.txt:
--------------------------------------------------------------------------------
1 | The MIT License (MIT)
2 |
3 | Copyright (c) 2013-present, Widen Enterprises, Inc.
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/nodejs/nodejs.js:
--------------------------------------------------------------------------------
1 | /**
2 | * NodeJs Server-Side Example for Fine Uploader (traditional endpoints).
3 | * Maintained by Widen Enterprises.
4 | *
5 | * This example:
6 | * - handles non-CORS environments
7 | * - handles delete file requests assuming the method is DELETE
8 | * - Ensures the file size does not exceed the max
9 | * - Handles chunked upload requests
10 | *
11 | * Requirements:
12 | * - express (for handling requests)
13 | * - rimraf (for "rm -rf" support)
14 | * - multiparty (for parsing request payloads)
15 | * - mkdirp (for "mkdir -p" support)
16 | */
17 |
18 | // Dependencies
19 | var express = require("express"),
20 | fs = require("fs"),
21 | rimraf = require("rimraf"),
22 | mkdirp = require("mkdirp"),
23 | multiparty = require('multiparty'),
24 | app = express(),
25 |
26 | // paths/constants
27 | fileInputName = process.env.FILE_INPUT_NAME || "qqfile",
28 | publicDir = process.env.PUBLIC_DIR,
29 | nodeModulesDir = process.env.NODE_MODULES_DIR,
30 | uploadedFilesPath = process.env.UPLOADED_FILES_DIR,
31 | chunkDirName = "chunks",
32 | port = process.env.SERVER_PORT || 8000,
33 | maxFileSize = process.env.MAX_FILE_SIZE || 0; // in bytes, 0 for unlimited
34 |
35 |
36 | app.listen(port);
37 |
38 | // routes
39 | app.use(express.static(publicDir));
40 | app.use("/node_modules", express.static(nodeModulesDir));
41 | app.post("/uploads", onUpload);
42 | app.delete("/uploads/:uuid", onDeleteFile);
43 |
44 |
45 | function onUpload(req, res) {
46 | var form = new multiparty.Form();
47 |
48 | form.parse(req, function(err, fields, files) {
49 | var partIndex = fields.qqpartindex;
50 |
51 | // text/plain is required to ensure support for IE9 and older
52 | res.set("Content-Type", "text/plain");
53 |
54 | if (partIndex == null) {
55 | onSimpleUpload(fields, files[fileInputName][0], res);
56 | }
57 | else {
58 | onChunkedUpload(fields, files[fileInputName][0], res);
59 | }
60 | });
61 | }
62 |
63 | function onSimpleUpload(fields, file, res) {
64 | var uuid = fields.qquuid,
65 | responseData = {
66 | success: false
67 | };
68 |
69 | file.name = fields.qqfilename;
70 |
71 | if (isValid(file.size)) {
72 | moveUploadedFile(file, uuid, function() {
73 | responseData.success = true;
74 | res.send(responseData);
75 | },
76 | function() {
77 | responseData.error = "Problem copying the file!";
78 | res.send(responseData);
79 | });
80 | }
81 | else {
82 | failWithTooBigFile(responseData, res);
83 | }
84 | }
85 |
86 | function onChunkedUpload(fields, file, res) {
87 | var size = parseInt(fields.qqtotalfilesize),
88 | uuid = fields.qquuid,
89 | index = fields.qqpartindex,
90 | totalParts = parseInt(fields.qqtotalparts),
91 | responseData = {
92 | success: false
93 | };
94 |
95 | file.name = fields.qqfilename;
96 |
97 | if (isValid(size)) {
98 | storeChunk(file, uuid, index, totalParts, function() {
99 | if (index < totalParts - 1) {
100 | responseData.success = true;
101 | res.send(responseData);
102 | }
103 | else {
104 | combineChunks(file, uuid, function() {
105 | responseData.success = true;
106 | res.send(responseData);
107 | },
108 | function() {
109 | responseData.error = "Problem conbining the chunks!";
110 | res.send(responseData);
111 | });
112 | }
113 | },
114 | function(reset) {
115 | responseData.error = "Problem storing the chunk!";
116 | res.send(responseData);
117 | });
118 | }
119 | else {
120 | failWithTooBigFile(responseData, res);
121 | }
122 | }
123 |
124 | function failWithTooBigFile(responseData, res) {
125 | responseData.error = "Too big!";
126 | responseData.preventRetry = true;
127 | res.send(responseData);
128 | }
129 |
130 | function onDeleteFile(req, res) {
131 | var uuid = req.params.uuid,
132 | dirToDelete = uploadedFilesPath + uuid;
133 |
134 | rimraf(dirToDelete, function(error) {
135 | if (error) {
136 | console.error("Problem deleting file! " + error);
137 | res.status(500);
138 | }
139 |
140 | res.send();
141 | });
142 | }
143 |
144 | function isValid(size) {
145 | return maxFileSize === 0 || size < maxFileSize;
146 | }
147 |
148 | function moveFile(destinationDir, sourceFile, destinationFile, success, failure) {
149 | mkdirp(destinationDir, function(error) {
150 | var sourceStream, destStream;
151 |
152 | if (error) {
153 | console.error("Problem creating directory " + destinationDir + ": " + error);
154 | failure();
155 | }
156 | else {
157 | sourceStream = fs.createReadStream(sourceFile);
158 | destStream = fs.createWriteStream(destinationFile);
159 |
160 | sourceStream
161 | .on("error", function(error) {
162 | console.error("Problem copying file: " + error.stack);
163 | destStream.end();
164 | failure();
165 | })
166 | .on("end", function(){
167 | destStream.end();
168 | success();
169 | })
170 | .pipe(destStream);
171 | }
172 | });
173 | }
174 |
175 | function moveUploadedFile(file, uuid, success, failure) {
176 | var destinationDir = uploadedFilesPath + uuid + "/",
177 | fileDestination = destinationDir + file.name;
178 |
179 | moveFile(destinationDir, file.path, fileDestination, success, failure);
180 | }
181 |
182 | function storeChunk(file, uuid, index, numChunks, success, failure) {
183 | var destinationDir = uploadedFilesPath + uuid + "/" + chunkDirName + "/",
184 | chunkFilename = getChunkFilename(index, numChunks),
185 | fileDestination = destinationDir + chunkFilename;
186 |
187 | moveFile(destinationDir, file.path, fileDestination, success, failure);
188 | }
189 |
190 | function combineChunks(file, uuid, success, failure) {
191 | var chunksDir = uploadedFilesPath + uuid + "/" + chunkDirName + "/",
192 | destinationDir = uploadedFilesPath + uuid + "/",
193 | fileDestination = destinationDir + file.name;
194 |
195 |
196 | fs.readdir(chunksDir, function(err, fileNames) {
197 | var destFileStream;
198 |
199 | if (err) {
200 | console.error("Problem listing chunks! " + err);
201 | failure();
202 | }
203 | else {
204 | fileNames.sort();
205 | destFileStream = fs.createWriteStream(fileDestination, {flags: "a"});
206 |
207 | appendToStream(destFileStream, chunksDir, fileNames, 0, function() {
208 | rimraf(chunksDir, function(rimrafError) {
209 | if (rimrafError) {
210 | console.log("Problem deleting chunks dir! " + rimrafError);
211 | }
212 | });
213 | success();
214 | },
215 | failure);
216 | }
217 | });
218 | }
219 |
220 | function appendToStream(destStream, srcDir, srcFilesnames, index, success, failure) {
221 | if (index < srcFilesnames.length) {
222 | fs.createReadStream(srcDir + srcFilesnames[index])
223 | .on("end", function() {
224 | appendToStream(destStream, srcDir, srcFilesnames, index + 1, success, failure);
225 | })
226 | .on("error", function(error) {
227 | console.error("Problem appending chunk! " + error);
228 | destStream.end();
229 | failure();
230 | })
231 | .pipe(destStream, {end: false});
232 | }
233 | else {
234 | destStream.end();
235 | success();
236 | }
237 | }
238 |
239 | function getChunkFilename(index, count) {
240 | var digits = new String(count).length,
241 | zeros = new Array(digits + 1).join("0");
242 |
243 | return (zeros + index).slice(-digits);
244 | }
245 |
--------------------------------------------------------------------------------
/nodejs/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name" : "fine-uploader-traditional-server",
3 | "description" : "Fine Uploader NodeJS example server for traditional server environments",
4 | "dependencies" : {
5 | "body-parser": "1.14.2",
6 | "express": "4.13.3",
7 | "mkdirp": "0.5.1",
8 | "multiparty": "4.1.2",
9 | "rimraf": "2.5.0"
10 | },
11 | "files": ["nodejs.js"],
12 | "version" : "3.0.3"
13 | }
14 |
--------------------------------------------------------------------------------
/nodejs/s3/s3handler.js:
--------------------------------------------------------------------------------
1 | /**
2 | * NodeJs Server-Side Example for Fine Uploader S3.
3 | * Maintained by Widen Enterprises.
4 | *
5 | * This example:
6 | * - handles non-CORS environments
7 | * - handles delete file requests assuming the method is DELETE
8 | * - Ensures again the file size does not exceed the max (after file is in S3)
9 | * - signs policy documents (simple uploads) and REST requests
10 | * (chunked/multipart uploads)
11 | * - supports version 2 and version 4 signatures
12 | *
13 | * Requirements:
14 | * - express 3.3.5+ (for handling requests)
15 | * - crypto-js 3.1.5+ (for signing requests)
16 | * - aws-sdk 2.1.10+ (only if utilizing the AWS SDK for deleting files or otherwise examining them)
17 | *
18 | * Notes:
19 | *
20 | * Change `expectedMinSize` and `expectedMaxSize` from `null` to integers
21 | * to enable policy doc verification on size. The `validation.minSizeLimit`
22 | * and `validation.maxSizeLimit` options **must** be set client-side and
23 | * match the values you set below.
24 | *
25 | */
26 |
27 | var express = require("express"),
28 | CryptoJS = require("crypto-js"),
29 | aws = require("aws-sdk"),
30 | app = express(),
31 | clientSecretKey = process.env.CLIENT_SECRET_KEY,
32 |
33 | // These two keys are only needed if you plan on using the AWS SDK
34 | serverPublicKey = process.env.SERVER_PUBLIC_KEY,
35 | serverSecretKey = process.env.SERVER_SECRET_KEY,
36 |
37 | // Set these two values to match your environment
38 | expectedBucket = "fineuploadertest",
39 | expectedHostname = "fineuploadertest.s3.amazonaws.com",
40 |
41 | // CHANGE TO INTEGERS TO ENABLE POLICY DOCUMENT VERIFICATION ON FILE SIZE
42 | // (recommended)
43 | expectedMinSize = null,
44 | expectedMaxSize = null,
45 | // EXAMPLES DIRECTLY BELOW:
46 | //expectedMinSize = 0,
47 | //expectedMaxSize = 15000000,
48 |
49 | s3;
50 |
51 |
52 | // Init S3, given your server-side keys. Only needed if using the AWS SDK.
53 | aws.config.update({
54 | accessKeyId: serverPublicKey,
55 | secretAccessKey: serverSecretKey
56 | });
57 | s3 = new aws.S3();
58 |
59 |
60 | app.use(express.bodyParser());
61 | app.use(express.static(__dirname)); //only needed if serving static content as well
62 | app.listen(8000);
63 |
64 | // Handles all signature requests and the success request FU S3 sends after the file is in S3
65 | // You will need to adjust these paths/conditions based on your setup.
66 | app.post("/s3handler", function(req, res) {
67 | if (typeof req.query.success !== "undefined") {
68 | verifyFileInS3(req, res);
69 | }
70 | else {
71 | signRequest(req, res);
72 | }
73 | });
74 |
75 | // Handles the standard DELETE (file) request sent by Fine Uploader S3.
76 | // Omit if you don't want to support this feature.
77 | app.delete("/s3handler/*", function(req, res) {
78 | deleteFile(req.query.bucket, req.query.key, function(err) {
79 | if (err) {
80 | console.log("Problem deleting file: " + err);
81 | res.status(500);
82 | }
83 |
84 | res.end();
85 | });
86 | });
87 |
88 | // Signs any requests. Delegate to a more specific signer based on type of request.
89 | function signRequest(req, res) {
90 | if (req.body.headers) {
91 | signRestRequest(req, res);
92 | }
93 | else {
94 | signPolicy(req, res);
95 | }
96 | }
97 |
98 | // Signs multipart (chunked) requests. Omit if you don't want to support chunking.
99 | function signRestRequest(req, res) {
100 | var version = req.query.v4 ? 4 : 2,
101 | stringToSign = req.body.headers,
102 | signature = version === 4 ? signV4RestRequest(stringToSign) : signV2RestRequest(stringToSign);
103 |
104 | var jsonResponse = {
105 | signature: signature
106 | };
107 |
108 | res.setHeader("Content-Type", "application/json");
109 |
110 | if (isValidRestRequest(stringToSign, version)) {
111 | res.end(JSON.stringify(jsonResponse));
112 | }
113 | else {
114 | res.status(400);
115 | res.end(JSON.stringify({invalid: true}));
116 | }
117 | }
118 |
119 | function signV2RestRequest(headersStr) {
120 | return getV2SignatureKey(clientSecretKey, headersStr);
121 | }
122 |
123 | function signV4RestRequest(headersStr) {
124 | var matches = /.+\n.+\n(\d+)\/(.+)\/s3\/aws4_request\n([\s\S]+)/.exec(headersStr),
125 | hashedCanonicalRequest = CryptoJS.SHA256(matches[3]),
126 | stringToSign = headersStr.replace(/(.+s3\/aws4_request\n)[\s\S]+/, '$1' + hashedCanonicalRequest);
127 |
128 | return getV4SignatureKey(clientSecretKey, matches[1], matches[2], "s3", stringToSign);
129 | }
130 |
131 | // Signs "simple" (non-chunked) upload requests.
132 | function signPolicy(req, res) {
133 | var policy = req.body,
134 | base64Policy = new Buffer(JSON.stringify(policy)).toString("base64"),
135 | signature = req.query.v4 ? signV4Policy(policy, base64Policy) : signV2Policy(base64Policy);
136 |
137 | var jsonResponse = {
138 | policy: base64Policy,
139 | signature: signature
140 | };
141 |
142 | res.setHeader("Content-Type", "application/json");
143 |
144 | if (isPolicyValid(req.body)) {
145 | res.end(JSON.stringify(jsonResponse));
146 | }
147 | else {
148 | res.status(400);
149 | res.end(JSON.stringify({invalid: true}));
150 | }
151 | }
152 |
153 | function signV2Policy(base64Policy) {
154 | return getV2SignatureKey(clientSecretKey, base64Policy);
155 | }
156 |
157 | function signV4Policy(policy, base64Policy) {
158 | var conditions = policy.conditions,
159 | credentialCondition;
160 |
161 | for (var i = 0; i < conditions.length; i++) {
162 | credentialCondition = conditions[i]["x-amz-credential"];
163 | if (credentialCondition != null) {
164 | break;
165 | }
166 | }
167 |
168 | var matches = /.+\/(.+)\/(.+)\/s3\/aws4_request/.exec(credentialCondition);
169 | return getV4SignatureKey(clientSecretKey, matches[1], matches[2], "s3", base64Policy);
170 | }
171 |
172 | // Ensures the REST request is targeting the correct bucket.
173 | // Omit if you don't want to support chunking.
174 | function isValidRestRequest(headerStr, version) {
175 | if (version === 4) {
176 | return new RegExp("host:" + expectedHostname).exec(headerStr) != null;
177 | }
178 |
179 | return new RegExp("\/" + expectedBucket + "\/.+$").exec(headerStr) != null;
180 | }
181 |
182 | // Ensures the policy document associated with a "simple" (non-chunked) request is
183 | // targeting the correct bucket and the min/max-size is as expected.
184 | // Comment out the expectedMaxSize and expectedMinSize variables near
185 | // the top of this file to disable size validation on the policy document.
186 | function isPolicyValid(policy) {
187 | var bucket, parsedMaxSize, parsedMinSize, isValid;
188 |
189 | policy.conditions.forEach(function(condition) {
190 | if (condition.bucket) {
191 | bucket = condition.bucket;
192 | }
193 | else if (condition instanceof Array && condition[0] === "content-length-range") {
194 | parsedMinSize = condition[1];
195 | parsedMaxSize = condition[2];
196 | }
197 | });
198 |
199 | isValid = bucket === expectedBucket;
200 |
201 | // If expectedMinSize and expectedMax size are not null (see above), then
202 | // ensure that the client and server have agreed upon the exact same
203 | // values.
204 | if (expectedMinSize != null && expectedMaxSize != null) {
205 | isValid = isValid && (parsedMinSize === expectedMinSize.toString())
206 | && (parsedMaxSize === expectedMaxSize.toString());
207 | }
208 |
209 | return isValid;
210 | }
211 |
212 | // After the file is in S3, make sure it isn't too big.
213 | // Omit if you don't have a max file size, or add more logic as required.
214 | function verifyFileInS3(req, res) {
215 | function headReceived(err, data) {
216 | if (err) {
217 | res.status(500);
218 | console.log(err);
219 | res.end(JSON.stringify({error: "Problem querying S3!"}));
220 | }
221 | else if (expectedMaxSize != null && data.ContentLength > expectedMaxSize) {
222 | res.status(400);
223 | res.write(JSON.stringify({error: "Too big!"}));
224 | deleteFile(req.body.bucket, req.body.key, function(err) {
225 | if (err) {
226 | console.log("Couldn't delete invalid file!");
227 | }
228 |
229 | res.end();
230 | });
231 | }
232 | else {
233 | res.end();
234 | }
235 | }
236 |
237 | callS3("head", {
238 | bucket: req.body.bucket,
239 | key: req.body.key
240 | }, headReceived);
241 | }
242 |
243 | function getV2SignatureKey(key, stringToSign) {
244 | var words = CryptoJS.HmacSHA1(stringToSign, key);
245 | return CryptoJS.enc.Base64.stringify(words);
246 | }
247 |
248 | function getV4SignatureKey(key, dateStamp, regionName, serviceName, stringToSign) {
249 | var kDate = CryptoJS.HmacSHA256(dateStamp, "AWS4" + key),
250 | kRegion = CryptoJS.HmacSHA256(regionName, kDate),
251 | kService = CryptoJS.HmacSHA256(serviceName, kRegion),
252 | kSigning = CryptoJS.HmacSHA256("aws4_request", kService);
253 |
254 | return CryptoJS.HmacSHA256(stringToSign, kSigning).toString();
255 | }
256 |
257 | function deleteFile(bucket, key, callback) {
258 | callS3("delete", {
259 | bucket: bucket,
260 | key: key
261 | }, callback);
262 | }
263 |
264 | function callS3(type, spec, callback) {
265 | s3[type + "Object"]({
266 | Bucket: spec.bucket,
267 | Key: spec.key
268 | }, callback)
269 | }
270 |
--------------------------------------------------------------------------------
/perl.cgi:
--------------------------------------------------------------------------------
1 | #!/usr/bin/perl
2 |
3 | use strict;
4 | use CGI::Carp qw(fatalsToBrowser);
5 |
6 | use Digest::MD5;
7 |
8 | my $uploaddir = '/folder/to/save/in/ajax_upload/tmp_uploads';
9 |
10 | my $maxFileSize = 0.5 * 1024 * 1024; # 1/2mb max file size...
11 |
12 | use CGI;
13 | my $IN = new CGI;
14 |
15 | my $file;
16 | if ($IN->param('POSTDATA')) {
17 | $file = $IN->param('POSTDATA');
18 | } else {
19 | $file = $IN->upload('qqfile');
20 | }
21 |
22 | my $temp_id = $IN->param('temp_id');
23 |
24 | # make a random filename, and we guess the file type later on...
25 | my $name = Digest::MD5::md5_base64( rand );
26 | $name =~ s/\+/_/g;
27 | $name =~ s/\//_/g;
28 |
29 | my $type;
30 | if ($file =~ /^GIF/) {
31 | $type = "gif";
32 | } elsif ($file =~ /PNG/) {
33 | $type = "png";
34 | } elsif ($file =~ /JFIF/) {
35 | $type = "jpg";
36 | }
37 |
38 | if (!$type) {
39 | print qq|{ "success": false, "error": "Invalid file type..." }|;
40 | print STDERR "file has been NOT been uploaded... \n";
41 | }
42 |
43 | print STDERR "Making dir: $uploaddir/$temp_id \n";
44 |
45 | mkdir("$uploaddir/$temp_id");
46 |
47 | binmode(WRITEIT);
48 | open(WRITEIT, ">$uploaddir/$name.$type") or die "Cant write to $uploaddir/$name.$type. Reason: $!";
49 | if ($IN->param('POSTDATA')) {
50 | print WRITEIT $file;
51 | } else {
52 | while (<$file>) {
53 | print WRITEIT;
54 | }
55 | }
56 | close(WRITEIT);
57 |
58 | my $check_size = -s "$uploaddir/$name.$type";
59 |
60 | print STDERR qq|Main filesize: $check_size Max Filesize: $maxFileSize \n\n|;
61 |
62 | print $IN->header();
63 | if ($check_size < 1) {
64 | print STDERR "ooops, its empty - gonna get rid of it!\n";
65 | print qq|{ "success": false, "error": "File is empty..." }|;
66 | print STDERR "file has been NOT been uploaded... \n";
67 | } elsif ($check_size > $maxFileSize) {
68 | print STDERR "ooops, its too large - gonna get rid of it!\n";
69 | print qq|{ "success": false, "error": "File is too large..." }|;
70 | print STDERR "file has been NOT been uploaded... \n";
71 | } else {
72 | print qq|{ "success": true }|;
73 |
74 | print STDERR "file has been successfully uploaded... thank you.\n";
75 | }
76 |
--------------------------------------------------------------------------------
/php/s3/README.md:
--------------------------------------------------------------------------------
1 | The S3 PHP server example has [moved to a new repository](https://github.com/FineUploader/php-s3-server).
2 | The server code is also available on packagist via composer under the name `fineuploader/php-s3-server`.
3 |
--------------------------------------------------------------------------------
/php/s3/s3demo-cors.php:
--------------------------------------------------------------------------------
1 | The S3 PHP server example has moved to a new repository: https://github.com/FineUploader/php-s3-server.
2 | The server code is also available on packagist via composer under the name "fineuploader/php-s3-server".
3 |
--------------------------------------------------------------------------------
/php/s3/s3demo.php:
--------------------------------------------------------------------------------
1 | The S3 PHP server example has moved to a new repository: https://github.com/FineUploader/php-s3-server.
2 | The server code is also available on packagist via composer under the name "fineuploader/php-s3-server".
3 |
--------------------------------------------------------------------------------
/php/traditional/README.md:
--------------------------------------------------------------------------------
1 | The traditional PHP server example has [moved to a new repository](https://github.com/FineUploader/php-traditional-server).
2 | The server code is also available on packagist via composer under the name `fineuploader/php-traditional-server`.
3 |
--------------------------------------------------------------------------------
/php/traditional/chunks/.gitignore:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FineUploader/server-examples/3ada78700b8d913be5ad8c9ddf3183c9a23e7fe6/php/traditional/chunks/.gitignore
--------------------------------------------------------------------------------
/php/traditional/endpoint-cors.php:
--------------------------------------------------------------------------------
1 | The traditional PHP server example has moved to a new repository: https://github.com/FineUploader/php-traditional-server.
2 | The server code is also available on packagist via composer under the name "fineuploader/php-traditional-server".
3 |
--------------------------------------------------------------------------------
/php/traditional/endpoint.php:
--------------------------------------------------------------------------------
1 | The traditional PHP server example has moved to a new repository: https://github.com/FineUploader/php-traditional-server.
2 | The server code is also available on packagist via composer under the name "fineuploader/php-traditional-server".
3 |
--------------------------------------------------------------------------------
/php/traditional/files/.gitignore:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FineUploader/server-examples/3ada78700b8d913be5ad8c9ddf3183c9a23e7fe6/php/traditional/files/.gitignore
--------------------------------------------------------------------------------
/php/traditional/handler.php:
--------------------------------------------------------------------------------
1 | The traditional PHP server example has moved to a new repository: https://github.com/FineUploader/php-traditional-server.
2 | The server code is also available on packagist via composer under the name "fineuploader/php-traditional-server".
3 |
--------------------------------------------------------------------------------
/php/traditional/li3/controllers/FineUploadController.php:
--------------------------------------------------------------------------------
1 | _render['layout'] = false; // no layout
18 | $this->_render['type'] = 'json';
19 |
20 | $tempfilepath = tempnam(sys_get_temp_dir());
21 |
22 | if ($this->request->is('ajax')) {
23 | // i.e. do HTML5 streaming upload
24 | $pathinfo = pathinfo($_GET['qqfile']);
25 | $filename = $pathinfo['filename'];
26 | $ext = @$pathinfo['extension'];
27 | $ext = ($ext == '') ? $ext : '.' . $ext;
28 | $uploadname = $filename . $ext;
29 | $input = fopen('php://input', 'r');
30 | $temp = fopen($tempfilepath, 'w');
31 | $realsize = stream_copy_to_stream($input, $temp); // write stream to temp file
32 | @chmod($tempfilepath, 0644);
33 | fclose($input);
34 | if ($realsize != (int)$_SERVER['CONTENT_LENGTH']) {
35 | $results = array('error' => 'Could not save upload file.');
36 | } else {
37 | $results = array('success' => true);
38 | }
39 | }
40 | else
41 | {
42 | // else do regular POST upload (i.e. for old non-HTML5 browsers)
43 | $size = $_FILES['qqfile']['size'];
44 | if ($size == 0) {
45 | return array('error' => 'File is empty.');
46 | }
47 | $pathinfo = pathinfo($_FILES['qqfile']['name']);
48 | $filename = $pathinfo['filename'];
49 | $ext = @$pathinfo['extension'];
50 | $ext = ($ext == '') ? $ext : '.' . $ext;
51 | $uploadname = $filename . $ext;
52 | if (!move_uploaded_file($_FILES['qqfile']['tmp_name'], $tempfilepath)) {
53 | $results = array('error' => 'Could not save upload file.');
54 | } else {
55 | @chmod($tempfilepath, 0644);
56 | $results = array('success' => true);
57 | }
58 | }
59 |
60 | return $results; // returns JSON
61 | }
62 |
63 | }
64 | ?>
65 |
--------------------------------------------------------------------------------
/php/traditional/li3/readme.md:
--------------------------------------------------------------------------------
1 | # Integration with the Lithium PHP framework #
2 |
3 | Ajai Khattri
4 |
5 | Here is an example Lithium controller implementing a basic upload action.
6 |
7 | This is really just a very basic uploader using mostly defaults so there are no features such as handling parameters (in the form or as query strings), no chunking or resume, etc. The code does the basic streaming upload for HTML5 browsers if its an AJAX call (I used the jQuery wrapper) or if its a regular request then the upload is handled as the usual HTTP POST way. Any developer using the Lithium framework should find it pretty easy to extend this example to add extra features (feel free to fork and improve upon this :-)
8 |
9 | NB: This has only been tested in the last few releases of Firefox (up to Firefox 18), Chrome (23.0) and Safari 6. I do not have a means of testing with any versions of IE or older versions of the above browsers.
10 |
11 |
12 |
--------------------------------------------------------------------------------
/python/django-fine-uploader-s3/AUTHORS:
--------------------------------------------------------------------------------
1 | Mark Feltner - mfeltner@widen.com
--------------------------------------------------------------------------------
/python/django-fine-uploader-s3/CHANGELOG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FineUploader/server-examples/3ada78700b8d913be5ad8c9ddf3183c9a23e7fe6/python/django-fine-uploader-s3/CHANGELOG
--------------------------------------------------------------------------------
/python/django-fine-uploader-s3/LICENSE:
--------------------------------------------------------------------------------
1 | The MIT License (MIT)
2 |
3 | Copyright (c) [year] [fullname]
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
--------------------------------------------------------------------------------
/python/django-fine-uploader-s3/README.md:
--------------------------------------------------------------------------------
1 | # Python + Django + Amazon S3 + Fine Uploader
2 | = crazy delicious
3 |
4 | Install dependencies
5 |
6 | ```bash
7 | pip install -r requirements.txt
8 | ```
9 |
10 | Set environment variables
11 |
12 | ```bash
13 | export AWS_CLIENT_SECRET_KEY='keep me secret!'
14 | export AWS_SERVER_PUBLIC_KEY='who cares if i am secret'
15 | export AWS_SERVER_PRIVATE_KEY='keep me secret!'
16 | ```
17 |
18 | Modify yo' settings (if needed).
19 |
20 | ```bash
21 | vim settings.py
22 | ```
23 |
24 | Run yo' server
25 |
26 | ```bash
27 | python manage.py runserver
28 | ```
29 |
30 | Upload some things.
31 | Relax and bask in glory.
--------------------------------------------------------------------------------
/python/django-fine-uploader-s3/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FineUploader/server-examples/3ada78700b8d913be5ad8c9ddf3183c9a23e7fe6/python/django-fine-uploader-s3/__init__.py
--------------------------------------------------------------------------------
/python/django-fine-uploader-s3/manage.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | import os
3 | import sys
4 | try:
5 | import settings
6 | except ImportError:
7 | import sys
8 | sys.stderr.write("Error: could not find the file `settings.py`")
9 | sys.exit(1)
10 |
11 | if __name__ == "__main__":
12 | os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
13 |
14 | from django.core.management import execute_from_command_line, execute_manager
15 |
16 | execute_manager(settings)
17 |
--------------------------------------------------------------------------------
/python/django-fine-uploader-s3/requirements.txt:
--------------------------------------------------------------------------------
1 | Django==1.5.2
2 | boto==2.10.0
3 | wsgiref==0.1.2
4 |
--------------------------------------------------------------------------------
/python/django-fine-uploader-s3/settings.py:
--------------------------------------------------------------------------------
1 | import os.path
2 | # Django settings for python_django_s3 project.
3 |
4 | # Amazon variables. Be wary and don't hard-code your secret keys here. Rather,
5 | # set them as environment variables, or read them from a file somehow.
6 | AWS_CLIENT_SECRET_KEY = os.getenv("AWS_CLIENT_SECRET_KEY")
7 | AWS_SERVER_PUBLIC_KEY = os.getenv("AWS_SERVER_PUBLIC_KEY")
8 | AWS_SERVER_SECRET_KEY = os.getenv("AWS_SERVER_SECRET_KEY")
9 |
10 | AWS_EXPECTED_BUCKET = 'fineuploadertest'
11 | AWS_MAX_SIZE = 15000000
12 |
13 | DEBUG = True
14 | TEMPLATE_DEBUG = DEBUG
15 |
16 | ADMINS = (
17 | # ('Your Name', 'your_email@example.com'),
18 | )
19 |
20 | MANAGERS = ADMINS
21 |
22 | DATABASES = {
23 | 'default': {
24 | 'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
25 | 'NAME': '', # Or path to database file if using sqlite3.
26 | # The following settings are not used with sqlite3:
27 | 'USER': '',
28 | 'PASSWORD': '',
29 | 'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
30 | 'PORT': '', # Set to empty string for default.
31 | }
32 | }
33 |
34 | # Hosts/domain names that are valid for this site; required if DEBUG is False
35 | # See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
36 | ALLOWED_HOSTS = []
37 |
38 | # Local time zone for this installation. Choices can be found here:
39 | # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
40 | # although not all choices may be available on all operating systems.
41 | # In a Windows environment this must be set to your system time zone.
42 | TIME_ZONE = 'America/Chicago'
43 |
44 | # Language code for this installation. All choices can be found here:
45 | # http://www.i18nguy.com/unicode/language-identifiers.html
46 | LANGUAGE_CODE = 'en-us'
47 |
48 | SITE_ID = 1
49 |
50 | # If you set this to False, Django will make some optimizations so as not
51 | # to load the internationalization machinery.
52 | USE_I18N = True
53 |
54 | # If you set this to False, Django will not format dates, numbers and
55 | # calendars according to the current locale.
56 | USE_L10N = True
57 |
58 | # If you set this to False, Django will not use timezone-aware datetimes.
59 | USE_TZ = True
60 |
61 | # Absolute filesystem path to the directory that will hold user-uploaded files.
62 | # Example: "/var/www/example.com/media/"
63 | MEDIA_ROOT = os.path.join(os.path.dirname(__file__), "media")
64 |
65 | # URL that handles the media served from MEDIA_ROOT. Make sure to use a
66 | # trailing slash.
67 | # Examples: "http://example.com/media/", "http://media.example.com/"
68 | MEDIA_URL = '/media/'
69 |
70 | # Absolute path to the directory static files should be collected to.
71 | # Don't put anything in this directory yourself; store your static files
72 | # in apps' "static/" subdirectories and in STATICFILES_DIRS.
73 | # Example: "/var/www/example.com/static/"
74 | STATIC_ROOT = ''
75 |
76 | # URL prefix for static files.
77 | # Example: "http://example.com/static/", "http://static.example.com/"
78 | STATIC_URL = '/static/'
79 |
80 | # Additional locations of static files
81 | STATICFILES_DIRS = (
82 | # Put strings here, like "/home/html/static" or "C:/www/django/static".
83 | # Always use forward slashes, even on Windows.
84 | # Don't forget to use absolute paths, not relative paths.
85 | os.path.join(os.path.dirname(__file__), "static"),
86 | )
87 |
88 | # List of finder classes that know how to find static files in
89 | # various locations.
90 | STATICFILES_FINDERS = (
91 | 'django.contrib.staticfiles.finders.FileSystemFinder',
92 | 'django.contrib.staticfiles.finders.AppDirectoriesFinder',
93 | # 'django.contrib.staticfiles.finders.DefaultStorageFinder',
94 | )
95 |
96 | # Make this unique, and don't share it with anybody.
97 | SECRET_KEY = 'vu9^u*8gj5o_dg4%lejn2*)lfv)u4m49ydqw7n8%=(=uh8hmqr'
98 |
99 | # List of callables that know how to import templates from various sources.
100 | TEMPLATE_LOADERS = (
101 | 'django.template.loaders.filesystem.Loader',
102 | 'django.template.loaders.app_directories.Loader',
103 | # 'django.template.loaders.eggs.Loader',
104 | )
105 |
106 | MIDDLEWARE_CLASSES = (
107 | 'django.middleware.common.CommonMiddleware',
108 | 'django.contrib.sessions.middleware.SessionMiddleware',
109 | 'django.middleware.csrf.CsrfViewMiddleware',
110 | 'django.contrib.auth.middleware.AuthenticationMiddleware',
111 | 'django.contrib.messages.middleware.MessageMiddleware',
112 | # Uncomment the next line for simple clickjacking protection:
113 | # 'django.middleware.clickjacking.XFrameOptionsMiddleware',
114 | )
115 |
116 | ROOT_URLCONF = 'python_django_s3.urls'
117 |
118 | # Python dotted path to the WSGI application used by Django's runserver.
119 | WSGI_APPLICATION = 'python_django_s3.wsgi.application'
120 |
121 | TEMPLATE_DIRS = (
122 | # Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
123 | # Always use forward slashes, even on Windows.
124 | # Don't forget to use absolute paths, not relative paths.
125 | os.path.join(os.path.dirname(__file__), "templates")
126 | )
127 |
128 | INSTALLED_APPS = (
129 | 'django.contrib.auth',
130 | 'django.contrib.contenttypes',
131 | 'django.contrib.sessions',
132 | 'django.contrib.sites',
133 | 'django.contrib.messages',
134 | 'django.contrib.staticfiles',
135 | # Uncomment the next line to enable the admin:
136 | # 'django.contrib.admin',
137 | # Uncomment the next line to enable admin documentation:
138 | # 'django.contrib.admindocs',
139 | )
140 |
141 | # A sample logging configuration. The only tangible logging
142 | # performed by this configuration is to send an email to
143 | # the site admins on every HTTP 500 error when DEBUG=False.
144 | # See http://docs.djangoproject.com/en/dev/topics/logging for
145 | # more details on how to customize your logging configuration.
146 | LOGGING = {
147 | 'version': 1,
148 | 'disable_existing_loggers': False,
149 | 'filters': {
150 | 'require_debug_false': {
151 | '()': 'django.utils.log.RequireDebugFalse'
152 | }
153 | },
154 | 'handlers': {
155 | 'mail_admins': {
156 | 'level': 'ERROR',
157 | 'filters': ['require_debug_false'],
158 | 'class': 'django.utils.log.AdminEmailHandler'
159 | }
160 | },
161 | 'loggers': {
162 | 'django.request': {
163 | 'handlers': ['mail_admins'],
164 | 'level': 'ERROR',
165 | 'propagate': True,
166 | },
167 | }
168 | }
169 |
--------------------------------------------------------------------------------
/python/django-fine-uploader-s3/urls.py:
--------------------------------------------------------------------------------
1 | from django.conf.urls import patterns, include, url
2 |
3 | # Uncomment the next two lines to enable the admin:
4 | # from django.contrib import admin
5 | # admin.autodiscover()
6 |
7 | urlpatterns = patterns('',
8 | # Examples:
9 | # url(r'^$', 'python_django_s3.views.home', name='home'),
10 | # url(r'^python_django_s3/', include('python_django_s3.foo.urls')),
11 |
12 | # Uncomment the admin/doc line below to enable admin documentation:
13 | # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
14 |
15 | # Uncomment the next line to enable the admin:
16 | # url(r'^admin/', include(admin.site.urls)),
17 |
18 | url(r'^$', 'views.home', name='home'),
19 | url(r'^s3/signature', 'views.handle_s3', name="s3_signee"),
20 | url(r'^s3/delete', 'views.handle_s3', name='s3_delete'),
21 | url(r'^s3/success', 'views.success_redirect_endpoint', name="s3_succes_endpoint")
22 | )
23 |
--------------------------------------------------------------------------------
/python/django-fine-uploader-s3/views.py:
--------------------------------------------------------------------------------
1 | from django.conf import settings
2 | from django.http import HttpResponse, HttpRequest
3 | from django.shortcuts import render
4 | from django.views.decorators.http import require_http_methods
5 | from django.views.decorators.csrf import csrf_exempt
6 |
7 | import base64, hmac, hashlib, json, sys
8 |
9 | try:
10 | import boto
11 | from boto.s3.connection import Key, S3Connection
12 | boto.set_stream_logger('boto')
13 | S3 = S3Connection(settings.AWS_SERVER_PUBLIC_KEY, settings.AWS_SERVER_SECRET_KEY)
14 | except ImportError, e:
15 | print("Could not import boto, the Amazon SDK for Python.")
16 | print("Deleting files will not work.")
17 | print("Install boto with")
18 | print("$ pip install boto")
19 |
20 |
21 | def home(request):
22 | """ The 'home' page. Returns an HTML page with Fine Uploader code
23 | ready to upload to S3.
24 | """
25 | return render(request, "index.html")
26 |
27 | @csrf_exempt
28 | def success_redirect_endpoint(request):
29 | """ This is where the upload will snd a POST request after the
30 | file has been stored in S3.
31 | """
32 | return make_response(200)
33 |
34 | @csrf_exempt
35 | def handle_s3(request):
36 | """ View which handles all POST and DELETE requests sent by Fine Uploader
37 | S3. You will need to adjust these paths/conditions based on your setup.
38 | """
39 | if request.method == "POST":
40 | return handle_POST(request)
41 | elif request.method == "DELETE":
42 | return handle_DELETE(request)
43 | else:
44 | return HttpResponse(status=405)
45 |
46 | def handle_POST(request):
47 | """ Handle S3 uploader POST requests here. For files <=5MiB this is a simple
48 | request to sign the policy document. For files >5MiB this is a request
49 | to sign the headers to start a multipart encoded request.
50 | """
51 | if request.POST.get('success', None):
52 | return make_response(200)
53 | else:
54 | request_payload = json.loads(request.body)
55 | headers = request_payload.get('headers', None)
56 | if headers:
57 | # The presence of the 'headers' property in the request payload
58 | # means this is a request to sign a REST/multipart request
59 | # and NOT a policy document
60 | response_data = sign_headers(headers)
61 | else:
62 | if not is_valid_policy(request_payload):
63 | return make_response(400, {'invalid': True})
64 | response_data = sign_policy_document(request_payload)
65 | response_payload = json.dumps(response_data)
66 | return make_response(200, response_payload)
67 |
68 | def handle_DELETE(request):
69 | """ Handle file deletion requests. For this, we use the Amazon Python SDK,
70 | boto.
71 | """
72 | if boto:
73 | bucket_name = request.REQUEST.get('bucket')
74 | key_name = request.REQUEST.get('key')
75 | aws_bucket = S3.get_bucket(bucket_name, validate=False)
76 | aws_key = Key(aws_bucket, key_name)
77 | aws_key.delete()
78 | return make_response(200)
79 | else:
80 | return make_response(500)
81 |
82 | def make_response(status=200, content=None):
83 | """ Construct an HTTP response. Fine Uploader expects 'application/json'.
84 | """
85 | response = HttpResponse()
86 | response.status_code = status
87 | response['Content-Type'] = "application/json"
88 | response.content = content
89 | return response
90 |
91 | def is_valid_policy(policy_document):
92 | """ Verify the policy document has not been tampered with client-side
93 | before sending it off.
94 | """
95 | #bucket = settings.AWS_EXPECTED_BUCKET
96 | #parsed_max_size = settings.AWS_MAX_SIZE
97 | bucket = ''
98 | parsed_max_size = 0
99 |
100 | for condition in policy_document['conditions']:
101 | if isinstance(condition, list) and condition[0] == 'content-length-range':
102 | parsed_max_size = condition[2]
103 | else:
104 | if condition.get('bucket', None):
105 | bucket = condition['bucket']
106 |
107 | return bucket == settings.AWS_EXPECTED_BUCKET and parsed_max_size == settings.AWS_MAX_SIZE
108 |
109 | def sign_policy_document(policy_document):
110 | """ Sign and return the policy doucument for a simple upload.
111 | http://aws.amazon.com/articles/1434/#signyours3postform
112 | """
113 | policy = base64.b64encode(json.dumps(policy_document))
114 | signature = base64.b64encode(hmac.new(settings.AWS_CLIENT_SECRET_KEY, policy, hashlib.sha1).digest())
115 | return {
116 | 'policy': policy,
117 | 'signature': signature
118 | }
119 |
120 | def sign_headers(headers):
121 | """ Sign and return the headers for a chunked upload. """
122 | return {
123 | 'signature': base64.b64encode(hmac.new(settings.AWS_CLIENT_SECRET_KEY, headers, hashlib.sha1).digest())
124 | }
125 |
--------------------------------------------------------------------------------
/python/django-fine-uploader-s3/wsgi.py:
--------------------------------------------------------------------------------
1 | """
2 | WSGI config for python_django_s3 project.
3 |
4 | This module contains the WSGI application used by Django's development server
5 | and any production WSGI deployments. It should expose a module-level variable
6 | named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
7 | this application via the ``WSGI_APPLICATION`` setting.
8 |
9 | Usually you will have the standard Django WSGI application here, but it also
10 | might make sense to replace the whole Django WSGI application with a custom one
11 | that later delegates to the Django one. For example, you could introduce WSGI
12 | middleware here, or combine a Django application with an application of another
13 | framework.
14 |
15 | """
16 | import os
17 |
18 | # We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
19 | # if running multiple sites in the same mod_wsgi process. To fix this, use
20 | # mod_wsgi daemon mode with each site in its own daemon process, or use
21 | # os.environ["DJANGO_SETTINGS_MODULE"] = "python_django_s3.settings"
22 | os.environ.setdefault("DJANGO_SETTINGS_MODULE", "python_django_s3.settings")
23 |
24 | # This application object is used by any WSGI server configured to use this
25 | # file. This includes Django's development server, if the WSGI_APPLICATION
26 | # setting points here.
27 | from django.core.wsgi import get_wsgi_application
28 | application = get_wsgi_application()
29 |
30 | # Apply WSGI middleware here.
31 | # from helloworld.wsgi import HelloWorldApplication
32 | # application = HelloWorldApplication(application)
33 |
--------------------------------------------------------------------------------
/python/django-fine-uploader/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 |
5 | # C extensions
6 | *.so
7 |
8 | # Distribution / packaging
9 | bin/
10 | build/
11 | develop-eggs/
12 | dist/
13 | eggs/
14 | lib/
15 | lib64/
16 | parts/
17 | sdist/
18 | var/
19 | *.egg-info/
20 | .installed.cfg
21 | *.egg
22 |
23 | # Installer logs
24 | pip-log.txt
25 | pip-delete-this-directory.txt
26 |
27 | # Unit test / coverage reports
28 | .tox/
29 | .coverage
30 | .cache
31 | nosetests.xml
32 | coverage.xml
33 |
34 | # Translations
35 | *.mo
36 |
37 | # Mr Developer
38 | .mr.developer.cfg
39 | .project
40 | .pydevproject
41 |
42 | # Rope
43 | .ropeproject
44 |
45 | # Django stuff:
46 | *.log
47 | *.pot
48 |
49 | # Sphinx documentation
50 | docs/_build/
51 |
52 |
--------------------------------------------------------------------------------
/python/django-fine-uploader/AUTHORS:
--------------------------------------------------------------------------------
1 | Mark Feltner - mfeltner@widen.com
2 | Ferdinand E. Silva - ferdinandsilva@ferdinandsilva.com
--------------------------------------------------------------------------------
/python/django-fine-uploader/CHANGELOG:
--------------------------------------------------------------------------------
1 | # 0.1.0
2 |
3 | - First release
4 | - Delete
5 | - Chunking
6 | - Auto-resume/retry
7 | - MPE Encoded requests only
8 | - Traditional endpoint only
9 |
--------------------------------------------------------------------------------
/python/django-fine-uploader/LICENSE:
--------------------------------------------------------------------------------
1 | The MIT License (MIT)
2 |
3 | Copyright (c) [year] [fullname]
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
--------------------------------------------------------------------------------
/python/django-fine-uploader/README.md:
--------------------------------------------------------------------------------
1 | # Fine Uploader Django Traditional Example
2 |
3 |
4 | ## Supported Features
5 | - Chunking
6 | - Auto-resume
7 | - Retrying
8 | - Delete
9 | - Handles multipart-encoded requests
10 | - Handles a traditional endpoint
11 |
12 |
13 | ## How to
14 |
15 | 1. Install dependencies
16 |
17 | ```
18 | pip install -r requirements.txt
19 | ```
20 |
21 | 2. Copy Fine Uploader sources into static files:
22 |
23 | ```
24 | cp fine_uploader/* static/fine_uploader
25 | ```
26 |
27 | 3. Run development server with:
28 |
29 | ```
30 | python manage.py runserver
31 | ```
32 |
33 | Uploads are stored in `./media/uploads`
34 | This can be changed by editing `settings.py`.
35 |
--------------------------------------------------------------------------------
/python/django-fine-uploader/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FineUploader/server-examples/3ada78700b8d913be5ad8c9ddf3183c9a23e7fe6/python/django-fine-uploader/__init__.py
--------------------------------------------------------------------------------
/python/django-fine-uploader/fine_uploader/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FineUploader/server-examples/3ada78700b8d913be5ad8c9ddf3183c9a23e7fe6/python/django-fine-uploader/fine_uploader/__init__.py
--------------------------------------------------------------------------------
/python/django-fine-uploader/fine_uploader/forms.py:
--------------------------------------------------------------------------------
1 | from django import forms
2 |
3 | class UploadFileForm(forms.Form):
4 | """ This form represents a basic request from Fine Uploader.
5 | The required fields will **always** be sent, the other fields are optional
6 | based on your setup.
7 |
8 | Edit this if you want to add custom parameters in the body of the POST
9 | request.
10 | """
11 | qqfile = forms.FileField()
12 | qquuid = forms.CharField()
13 | qqfilename = forms.CharField()
14 | qqpartindex = forms.IntegerField(required=False)
15 | qqchunksize = forms.IntegerField(required=False)
16 | qqpartbyteoffset = forms.IntegerField(required=False)
17 | qqtotalfilesize = forms.IntegerField(required=False)
18 | qqtotalparts = forms.IntegerField(required=False)
19 |
20 |
--------------------------------------------------------------------------------
/python/django-fine-uploader/fine_uploader/utils.py:
--------------------------------------------------------------------------------
1 | import os, os.path, shutil
2 |
3 | def combine_chunks(total_parts, total_size, source_folder, dest):
4 | """ Combine a chunked file into a whole file again. Goes through each part
5 | , in order, and appends that part's bytes to another destination file.
6 |
7 | Chunks are stored in media/chunks
8 | Uploads are saved in media/uploads
9 | """
10 |
11 | if not os.path.exists(os.path.dirname(dest)):
12 | os.makedirs(os.path.dirname(dest))
13 |
14 | with open(dest, 'wb+') as destination:
15 | for i in xrange(total_parts):
16 | part = os.path.join(source_folder, str(i))
17 | with open(part, 'rb') as source:
18 | destination.write(source.read())
19 |
20 |
21 | def save_upload(f, path):
22 | """ Save an upload. Django will automatically "chunk" incoming files
23 | (even when previously chunked by fine-uploader) to prevent large files
24 | from taking up your server's memory. If Django has chunked the file, then
25 | write the chunks, otherwise, save as you would normally save a file in
26 | Python.
27 |
28 | Uploads are stored in media/uploads
29 | """
30 | if not os.path.exists(os.path.dirname(path)):
31 | os.makedirs(os.path.dirname(path))
32 | with open(path, 'wb+') as destination:
33 | if hasattr(f, 'multiple_chunks') and f.multiple_chunks():
34 | for chunk in f.chunks():
35 | destination.write(chunk)
36 | else:
37 | destination.write(f.read())
38 |
39 |
40 |
--------------------------------------------------------------------------------
/python/django-fine-uploader/fine_uploader/views.py:
--------------------------------------------------------------------------------
1 | import json
2 | import logging
3 | import os
4 | import os.path
5 | import shutil
6 |
7 | from django.conf import settings
8 | from django.http import HttpResponse, HttpRequest
9 | from django.shortcuts import render
10 | from django.views.decorators.csrf import csrf_exempt
11 | from django.views.generic import View
12 |
13 | from fine_uploader.forms import UploadFileForm
14 | from fine_uploader import utils
15 |
16 | logger = logging.getLogger('django')
17 |
18 | ##
19 | # Utils
20 | ##
21 | def make_response(status=200, content_type='text/plain', content=None):
22 | """ Construct a response to an upload request.
23 | Success is indicated by a status of 200 and { "success": true }
24 | contained in the content.
25 |
26 | Also, content-type is text/plain by default since IE9 and below chokes
27 | on application/json. For CORS environments and IE9 and below, the
28 | content-type needs to be text/html.
29 | """
30 | response = HttpResponse()
31 | response.status_code = status
32 | response['Content-Type'] = content_type
33 | response.content = content
34 | return response
35 |
36 |
37 | ##
38 | # Views
39 | ##
40 | def home(request):
41 | """ The 'home' page. Returns an HTML page with Fine Uploader code
42 | ready to upload. This HTML page should contain your client-side code
43 | for instatiating and modifying Fine Uploader.
44 | """
45 | return render(request, 'fine_uploader/index.html')
46 |
47 |
48 | class UploadView(View):
49 | """ View which will handle all upload requests sent by Fine Uploader.
50 | See: https://docs.djangoproject.com/en/dev/topics/security/#user-uploaded-content-security
51 |
52 | Handles POST and DELETE requests.
53 | """
54 |
55 | @csrf_exempt
56 | def dispatch(self, *args, **kwargs):
57 | return super(UploadView, self).dispatch(*args, **kwargs)
58 |
59 | def post(self, request, *args, **kwargs):
60 | """A POST request. Validate the form and then handle the upload
61 | based ont the POSTed data. Does not handle extra parameters yet.
62 | """
63 | form = UploadFileForm(request.POST, request.FILES)
64 | if form.is_valid():
65 | handle_upload(request.FILES['qqfile'], form.cleaned_data)
66 | return make_response(content=json.dumps({ 'success': True }))
67 | else:
68 | return make_response(status=400,
69 | content=json.dumps({
70 | 'success': False,
71 | 'error': '%s' % repr(form.errors)
72 | }))
73 |
74 | def delete(self, request, *args, **kwargs):
75 | """A DELETE request. If found, deletes a file with the corresponding
76 | UUID from the server's filesystem.
77 | """
78 | qquuid = kwargs.get('qquuid', '')
79 | if qquuid:
80 | try:
81 | handle_deleted_file(qquuid)
82 | return make_response(content=json.dumps({ 'success': True }))
83 | except Exception, e:
84 | return make_response(status=400,
85 | content=json.dumps({
86 | 'success': False,
87 | 'error': '%s' % repr(e)
88 | }))
89 | return make_response(status=404,
90 | content=json.dumps({
91 | 'success': False,
92 | 'error': 'File not present'
93 | }))
94 |
95 | def handle_upload(f, fileattrs):
96 | """ Handle a chunked or non-chunked upload.
97 | """
98 | logger.info(fileattrs)
99 |
100 | chunked = False
101 | dest_folder = os.path.join(settings.UPLOAD_DIRECTORY, fileattrs['qquuid'])
102 | dest = os.path.join(dest_folder, fileattrs['qqfilename'])
103 |
104 | # Chunked
105 | if fileattrs.get('qqtotalparts') and int(fileattrs['qqtotalparts']) > 1:
106 | chunked = True
107 | dest_folder = os.path.join(settings.CHUNKS_DIRECTORY, fileattrs['qquuid'])
108 | dest = os.path.join(dest_folder, fileattrs['qqfilename'], str(fileattrs['qqpartindex']))
109 | logger.info('Chunked upload received')
110 |
111 | utils.save_upload(f, dest)
112 | logger.info('Upload saved: %s' % dest)
113 |
114 | # If the last chunk has been sent, combine the parts.
115 | if chunked and (fileattrs['qqtotalparts'] - 1 == fileattrs['qqpartindex']):
116 |
117 | logger.info('Combining chunks: %s' % os.path.dirname(dest))
118 | utils.combine_chunks(fileattrs['qqtotalparts'],
119 | fileattrs['qqtotalfilesize'],
120 | source_folder=os.path.dirname(dest),
121 | dest=os.path.join(settings.UPLOAD_DIRECTORY, fileattrs['qquuid'], fileattrs['qqfilename']))
122 | logger.info('Combined: %s' % dest)
123 |
124 | shutil.rmtree(os.path.dirname(os.path.dirname(dest)))
125 |
126 | def handle_deleted_file(uuid):
127 | """ Handles a filesystem delete based on UUID."""
128 | logger.info(uuid)
129 |
130 | loc = os.path.join(settings.UPLOAD_DIRECTORY, uuid)
131 | shutil.rmtree(loc)
132 |
--------------------------------------------------------------------------------
/python/django-fine-uploader/manage.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | import os
3 | import sys
4 |
5 | if __name__ == "__main__":
6 | os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
7 |
8 | from django.core.management import execute_from_command_line
9 |
10 | execute_from_command_line(sys.argv)
11 |
--------------------------------------------------------------------------------
/python/django-fine-uploader/media/chunks/.gitignore:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FineUploader/server-examples/3ada78700b8d913be5ad8c9ddf3183c9a23e7fe6/python/django-fine-uploader/media/chunks/.gitignore
--------------------------------------------------------------------------------
/python/django-fine-uploader/media/uploads/.gitignore:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FineUploader/server-examples/3ada78700b8d913be5ad8c9ddf3183c9a23e7fe6/python/django-fine-uploader/media/uploads/.gitignore
--------------------------------------------------------------------------------
/python/django-fine-uploader/requirements.txt:
--------------------------------------------------------------------------------
1 | Django==1.6.1
--------------------------------------------------------------------------------
/python/django-fine-uploader/settings.py:
--------------------------------------------------------------------------------
1 | """
2 | Django settings for project.
3 |
4 | For more information on this file, see
5 | https://docs.djangoproject.com/en/1.6/topics/settings/
6 |
7 | For the full list of settings and their values, see
8 | https://docs.djangoproject.com/en/1.6/ref/settings/
9 | """
10 |
11 | # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
12 | import os
13 | BASE_DIR = os.path.dirname(__file__)
14 |
15 | # Static files (CSS, JavaScript, Images)
16 | # https://docs.djangoproject.com/en/1.6/howto/static-files/
17 |
18 | STATIC_ROOT = ''
19 | STATIC_URL = '/static/'
20 | STATICFILES_DIRS = (
21 | os.path.join(BASE_DIR, 'static'),
22 | )
23 |
24 | # Templates
25 | # https://docs.djangoproject.com/en/1.6/ref/settings/#std:setting-TEMPLATE_DIRS
26 |
27 | TEMPLATE_DIRS = [
28 | os.path.join(BASE_DIR, 'templates'),
29 | ]
30 |
31 | # Media URL
32 | MEDIA_URL = '/media/'
33 | # Media root
34 | MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
35 |
36 | # Uploads Directory
37 | UPLOAD_DIRECTORY = os.path.join(MEDIA_ROOT, 'uploads')
38 | CHUNKS_DIRECTORY = os.path.join(MEDIA_ROOT, 'chunks')
39 |
40 | # Quick-start development settings - unsuitable for production
41 | # See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
42 |
43 | # SECURITY WARNING: keep the secret key used in production secret!
44 | SECRET_KEY = '-xiygt6+uao=#%2@vf89k)@jhljdu1-af834k@1!lzbs*5_und'
45 |
46 | # SECURITY WARNING: don't run with debug turned on in production!
47 | DEBUG = True
48 |
49 | TEMPLATE_DEBUG = True
50 |
51 | ALLOWED_HOSTS = ['0.0.0.0', 'localhost']
52 |
53 | # Application definition
54 |
55 | INSTALLED_APPS = (
56 | 'django.contrib.admin',
57 | 'django.contrib.auth',
58 | 'django.contrib.contenttypes',
59 | 'django.contrib.sessions',
60 | 'django.contrib.messages',
61 | 'django.contrib.staticfiles',
62 | )
63 |
64 | MIDDLEWARE_CLASSES = (
65 | 'django.contrib.sessions.middleware.SessionMiddleware',
66 | 'django.middleware.common.CommonMiddleware',
67 | 'django.middleware.csrf.CsrfViewMiddleware',
68 | 'django.contrib.auth.middleware.AuthenticationMiddleware',
69 | 'django.contrib.messages.middleware.MessageMiddleware',
70 | 'django.middleware.clickjacking.XFrameOptionsMiddleware',
71 | )
72 |
73 | ROOT_URLCONF = 'urls'
74 |
75 | WSGI_APPLICATION = 'wsgi.application'
76 |
77 |
78 | # Database
79 | # https://docs.djangoproject.com/en/1.6/ref/settings/#databases
80 |
81 | #DATABASES = {
82 | # 'default': {
83 | # 'ENGINE': 'django.db.backends.sqlite3',
84 | # 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
85 | # }
86 | #}
87 |
88 | # Internationalization
89 | # https://docs.djangoproject.com/en/1.6/topics/i18n/
90 |
91 | LANGUAGE_CODE = 'en-us'
92 |
93 | TIME_ZONE = 'UTC'
94 |
95 | USE_I18N = True
96 |
97 | USE_L10N = True
98 |
99 | USE_TZ = True
100 |
101 |
102 |
--------------------------------------------------------------------------------
/python/django-fine-uploader/static/fine_uploader/.gitignore:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FineUploader/server-examples/3ada78700b8d913be5ad8c9ddf3183c9a23e7fe6/python/django-fine-uploader/static/fine_uploader/.gitignore
--------------------------------------------------------------------------------
/python/django-fine-uploader/static/main.js:
--------------------------------------------------------------------------------
1 | $(document).ready(function() {
2 |
3 | /** instantiate Fine Uploader here */
4 |
5 | });
6 |
--------------------------------------------------------------------------------
/python/django-fine-uploader/templates/fine_uploader/.gitignore:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FineUploader/server-examples/3ada78700b8d913be5ad8c9ddf3183c9a23e7fe6/python/django-fine-uploader/templates/fine_uploader/.gitignore
--------------------------------------------------------------------------------
/python/django-fine-uploader/templates/fine_uploader/index.html:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FineUploader/server-examples/3ada78700b8d913be5ad8c9ddf3183c9a23e7fe6/python/django-fine-uploader/templates/fine_uploader/index.html
--------------------------------------------------------------------------------
/python/django-fine-uploader/urls.py:
--------------------------------------------------------------------------------
1 | from django.conf import settings
2 | from django.conf.urls import include, patterns, static, url
3 |
4 | from fine_uploader.views import UploadView
5 |
6 | urlpatterns = patterns('',
7 | url(r'^$', 'fine_uploader.views.home', name='home'),
8 | url(r'^upload(?:/(?P\S+))?', UploadView.as_view(), name='upload'),
9 | )
10 | urlpatterns += static.static('/browse/', document_root=settings.UPLOAD_DIRECTORY)
11 |
12 |
--------------------------------------------------------------------------------
/python/django-fine-uploader/wsgi.py:
--------------------------------------------------------------------------------
1 | """
2 | WSGI config for fine_uploader_django project.
3 |
4 | It exposes the WSGI callable as a module-level variable named ``application``.
5 |
6 | For more information on this file, see
7 | https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
8 | """
9 |
10 | import os
11 | os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
12 |
13 | from django.core.wsgi import get_wsgi_application
14 | application = get_wsgi_application()
15 |
--------------------------------------------------------------------------------
/python/flask-fine-uploader-s3/AUTHORS:
--------------------------------------------------------------------------------
1 | Mark Feltner - mfeltner@widen.com
--------------------------------------------------------------------------------
/python/flask-fine-uploader-s3/CHANGELOG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FineUploader/server-examples/3ada78700b8d913be5ad8c9ddf3183c9a23e7fe6/python/flask-fine-uploader-s3/CHANGELOG
--------------------------------------------------------------------------------
/python/flask-fine-uploader-s3/LICENSE:
--------------------------------------------------------------------------------
1 | The MIT License (MIT)
2 |
3 | Copyright (c) [year] [fullname]
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
--------------------------------------------------------------------------------
/python/flask-fine-uploader-s3/README.md:
--------------------------------------------------------------------------------
1 | # Python + Flask + Amazon S3 + Fine Uploader
2 | = crazy delicious
3 |
4 | > client-side code not included.
5 |
6 | ## Instructions
7 | 1. Install dependencies
8 |
9 | `pip install -r requirements.txt`
10 |
11 | 2. Set environment variables
12 |
13 | ```
14 | export AWS_CLIENT_SECRET_KEY='keep me secret!'
15 | export AWS_SERVER_PUBLIC_KEY='who cares if i am secret'
16 | export AWS_SERVER_PRIVATE_KEY='keep me secret!'
17 | ```
18 |
19 | 3. Get the static files
20 |
21 | http://fineuploader.com/downloads
22 |
23 | 4. Make your template
24 |
25 | ```bash
26 | mkdir templates
27 | touch templates/index.html
28 | vim index.html
29 | ```
30 |
31 | 5. [Enable Fine Uploader](http://docs.fineuploader.com)
32 | 6. Run the server
33 |
34 | `python app.py`
35 |
36 | 7. Enjoy!
37 |
38 | YMMV.
39 |
--------------------------------------------------------------------------------
/python/flask-fine-uploader-s3/app.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | #
3 | # app.py
4 | #
5 | # by: Mark Feltner
6 | #
7 | # Server-side S3 upload example for Fine Uploader
8 | #
9 | # Features:
10 | # * Upload to S3
11 | # * Delete from S3
12 | # * Sign Policy documents (simple uploads) and REST requests (chunked/multipart)
13 | # uploads
14 | # * non-CORS environment
15 |
16 | import base64, hmac, hashlib, os, sys
17 |
18 | from flask import (Flask, json, jsonify, make_response, render_template,
19 | request)
20 |
21 | AWS_CLIENT_SECRET_KEY = os.getenv('AWS_CLIENT_SECRET_KEY')
22 | AWS_SERVER_PUBLIC_KEY = os.getenv('AWS_SERVER_PUBLIC_KEY')
23 | AWS_SERVER_SECRET_KEY = os.getenv('AWS_SERVER_SECRET_KEY')
24 | AWS_EXPECTED_BUCKET = 'fineuploadertest'
25 | AWS_MAX_SIZE = 15000000
26 |
27 | app = Flask(__name__)
28 | app.config.from_object(__name__)
29 |
30 | def sign_policy(policy):
31 | """ Sign and return the policy document for a simple upload.
32 | http://aws.amazon.com/articles/1434/#signyours3postform """
33 | signed_policy = base64.b64encode(policy)
34 | signature = base64.b64encode(hmac.new(
35 | app.config.get('AWS_CLIENT_SECRET_KEY'), signed_policy, hashlib.sha1).
36 | digest())
37 | return { 'policy': signed_policy, 'signature': signature }
38 |
39 | def sign_headers(headers):
40 | """ Sign and return the headers for a chunked upload. """
41 | headers = bytearray(headers, 'utf-8') # hmac doesn't want unicode
42 | return {
43 | 'signature': base64.b64encode(hmac.new(
44 | app.config.get('AWS_CLIENT_SECRET_KEY'), headers, hashlib.sha1).
45 | digest())
46 | }
47 |
48 | @app.route("/s3/sign", methods=['POST'])
49 | def s3_signature():
50 | """ Route for signing the policy document or REST headers. """
51 | request_payload = request.get_json()
52 | if request_payload.get('headers'):
53 | response_data = sign_headers(request_payload['headers'])
54 | else:
55 | response_data = sign_policy(request.data)
56 | return jsonify(response_data)
57 |
58 |
59 | @app.route("/s3/delete/", methods=['POST', 'DELETE'])
60 | def s3_delete(key=None):
61 | """ Route for deleting files off S3. Uses the SDK. """
62 | try:
63 | from boto.s3.connection import Key, S3Connection
64 | S3 = S3Connection(app.config.get("AWS_SERVER_PUBLIC_KEY"),
65 | app.config.get("AWS_SERVER_SECRET_KEY"))
66 | request_payload = request.values
67 | bucket_name = request_payload.get('bucket')
68 | key_name = request_payload.get('key')
69 | aws_bucket = S3.get_bucket(bucket_name, validate=False)
70 | aws_key = Key(aws_bucket, key_name)
71 | aws_key.delete()
72 | return make_response('', 200)
73 | except ImportError:
74 | abort(500)
75 |
76 | @app.route("/s3/success", methods=['GET', 'POST'])
77 | def s3_success():
78 | """ Success redirect endpoint for <=IE9. """
79 | return make_response()
80 |
81 | @app.route("/")
82 | def index():
83 | return render_template("index.html")
84 |
85 | def main(argv=None):
86 |
87 | app.run('0.0.0.0')
88 |
89 | return 0 # success
90 |
91 | if __name__ == '__main__':
92 | status = main()
93 | sys.exit(status)
94 |
--------------------------------------------------------------------------------
/python/flask-fine-uploader-s3/requirements.txt:
--------------------------------------------------------------------------------
1 | Flask==0.10.1
2 | boto==2.10.0
3 |
--------------------------------------------------------------------------------
/python/flask-fine-uploader/AUTHORS:
--------------------------------------------------------------------------------
1 | Mark Feltner - mfeltner@widen.com
--------------------------------------------------------------------------------
/python/flask-fine-uploader/CHANGELOG:
--------------------------------------------------------------------------------
1 | # 0.1.0
2 |
3 | - First release
4 | - Delete
5 | - Chunking
6 | - Auto-resume/retry
7 | - MPE Encoded requests only
8 | - Traditional endpoint only
9 |
--------------------------------------------------------------------------------
/python/flask-fine-uploader/LICENSE:
--------------------------------------------------------------------------------
1 | The MIT License (MIT)
2 |
3 | Copyright (c) [year] [fullname]
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
--------------------------------------------------------------------------------
/python/flask-fine-uploader/README.md:
--------------------------------------------------------------------------------
1 | # Fine Uploader Flask Traditional Example
2 |
3 |
4 | ## Supported Features
5 | - Chunking
6 | - Auto-resume
7 | - Retrying
8 | - Delete
9 | - Handles multipart-encoded requests
10 | - Handles a traditional endpoint
11 |
12 |
13 | ## How to ...
14 |
15 | 1. Install dependencies
16 |
17 | ```
18 | pip install -r requirements.txt
19 | ```
20 |
21 | 2. Copy Fine Uploader sources into static files:
22 |
23 | ```
24 | cp fine-uploader/* static/fine_uploader
25 | ```
26 |
27 | 3. Run server with:
28 |
29 | ```
30 | python app.py
31 | ```
32 |
33 | Uploads are stored in `./media/uploads`
34 | This can be changed by editing `settings.py`.
35 |
--------------------------------------------------------------------------------
/python/flask-fine-uploader/app.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | import json
3 | import os
4 | import os.path
5 | import shutil
6 | import sys
7 |
8 | from flask import current_app, Flask, jsonify, render_template, request
9 | from flask.views import MethodView
10 |
11 | # Meta
12 | ##################
13 | __version__ = '0.1.0'
14 |
15 | # Config
16 | ##################
17 | DEBUG = True
18 | SECRET_KEY = 'development key'
19 |
20 | BASE_DIR = os.path.dirname(__file__)
21 |
22 | MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
23 | UPLOAD_DIRECTORY = os.path.join(MEDIA_ROOT, 'upload')
24 | CHUNKS_DIRECTORY = os.path.join(MEDIA_ROOT, 'chunks')
25 |
26 | app = Flask(__name__)
27 | app.config.from_object(__name__)
28 |
29 | # Utils
30 | ##################
31 | def make_response(status=200, content=None):
32 | """ Construct a response to an upload request.
33 | Success is indicated by a status of 200 and { "success": true }
34 | contained in the content.
35 |
36 | Also, content-type is text/plain by default since IE9 and below chokes
37 | on application/json. For CORS environments and IE9 and below, the
38 | content-type needs to be text/html.
39 | """
40 | return current_app.response_class(json.dumps(content,
41 | indent=None if request.is_xhr else 2), mimetype='text/plain')
42 |
43 |
44 | def validate(attrs):
45 | """ No-op function which will validate the client-side data.
46 | Werkzeug will throw an exception if you try to access an
47 | attribute that does not have a key for a MultiDict.
48 | """
49 | try:
50 | #required_attributes = ('qquuid', 'qqfilename')
51 | #[attrs.get(k) for k,v in attrs.items()]
52 | return True
53 | except Exception, e:
54 | return False
55 |
56 |
57 | def handle_delete(uuid):
58 | """ Handles a filesystem delete based on UUID."""
59 | location = os.path.join(app.config['UPLOAD_DIRECTORY'], uuid)
60 | print(uuid)
61 | print(location)
62 | shutil.rmtree(location)
63 |
64 | def handle_upload(f, attrs):
65 | """ Handle a chunked or non-chunked upload.
66 | """
67 |
68 | chunked = False
69 | dest_folder = os.path.join(app.config['UPLOAD_DIRECTORY'], attrs['qquuid'])
70 | dest = os.path.join(dest_folder, attrs['qqfilename'])
71 |
72 |
73 | # Chunked
74 | if attrs.has_key('qqtotalparts') and int(attrs['qqtotalparts']) > 1:
75 | chunked = True
76 | dest_folder = os.path.join(app.config['CHUNKS_DIRECTORY'], attrs['qquuid'])
77 | dest = os.path.join(dest_folder, attrs['qqfilename'], str(attrs['qqpartindex']))
78 |
79 | save_upload(f, dest)
80 |
81 | if chunked and (int(attrs['qqtotalparts']) - 1 == int(attrs['qqpartindex'])):
82 |
83 | combine_chunks(attrs['qqtotalparts'],
84 | attrs['qqtotalfilesize'],
85 | source_folder=os.path.dirname(dest),
86 | dest=os.path.join(app.config['UPLOAD_DIRECTORY'], attrs['qquuid'],
87 | attrs['qqfilename']))
88 |
89 | shutil.rmtree(os.path.dirname(os.path.dirname(dest)))
90 |
91 |
92 | def save_upload(f, path):
93 | """ Save an upload.
94 | Uploads are stored in media/uploads
95 | """
96 | if not os.path.exists(os.path.dirname(path)):
97 | os.makedirs(os.path.dirname(path))
98 | with open(path, 'wb+') as destination:
99 | destination.write(f.read())
100 |
101 |
102 | def combine_chunks(total_parts, total_size, source_folder, dest):
103 | """ Combine a chunked file into a whole file again. Goes through each part
104 | , in order, and appends that part's bytes to another destination file.
105 |
106 | Chunks are stored in media/chunks
107 | Uploads are saved in media/uploads
108 | """
109 |
110 | if not os.path.exists(os.path.dirname(dest)):
111 | os.makedirs(os.path.dirname(dest))
112 |
113 | with open(dest, 'wb+') as destination:
114 | for i in xrange(int(total_parts)):
115 | part = os.path.join(source_folder, str(i))
116 | with open(part, 'rb') as source:
117 | destination.write(source.read())
118 |
119 |
120 | # Views
121 | ##################
122 | @app.route("/")
123 | def index():
124 | """ The 'home' page. Returns an HTML page with Fine Uploader code
125 | ready to upload. This HTML page should contain your client-side code
126 | for instatiating and modifying Fine Uploader.
127 | """
128 | return render_template('fine_uploader/index.html')
129 |
130 |
131 | class UploadAPI(MethodView):
132 | """ View which will handle all upload requests sent by Fine Uploader.
133 |
134 | Handles POST and DELETE requests.
135 | """
136 |
137 | def post(self):
138 | """A POST request. Validate the form and then handle the upload
139 | based ont the POSTed data. Does not handle extra parameters yet.
140 | """
141 | if validate(request.form):
142 | handle_upload(request.files['qqfile'], request.form)
143 | return make_response(200, { "success": True })
144 | else:
145 | return make_response(400, { "error", "Invalid request" })
146 |
147 | def delete(self, uuid):
148 | """A DELETE request. If found, deletes a file with the corresponding
149 | UUID from the server's filesystem.
150 | """
151 | try:
152 | handle_delete(uuid)
153 | return make_response(200, { "success": True })
154 | except Exception, e:
155 | return make_response(400, { "success": False, "error": e.message })
156 |
157 | upload_view = UploadAPI.as_view('upload_view')
158 | app.add_url_rule('/upload', view_func=upload_view, methods=['POST',])
159 | app.add_url_rule('/upload/', view_func=upload_view, methods=['DELETE',])
160 |
161 |
162 | # Main
163 | ##################
164 | def main():
165 | app.run('0.0.0.0')
166 | return 0
167 |
168 | if __name__ == '__main__':
169 | status = main()
170 | sys.exit(status)
171 |
--------------------------------------------------------------------------------
/python/flask-fine-uploader/media/chunks/.gitignore:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FineUploader/server-examples/3ada78700b8d913be5ad8c9ddf3183c9a23e7fe6/python/flask-fine-uploader/media/chunks/.gitignore
--------------------------------------------------------------------------------
/python/flask-fine-uploader/media/upload/.gitignore:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FineUploader/server-examples/3ada78700b8d913be5ad8c9ddf3183c9a23e7fe6/python/flask-fine-uploader/media/upload/.gitignore
--------------------------------------------------------------------------------
/python/flask-fine-uploader/requirements.txt:
--------------------------------------------------------------------------------
1 | Flask==0.10.1
--------------------------------------------------------------------------------
/python/flask-fine-uploader/static/fine_uploader/.gitignore:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FineUploader/server-examples/3ada78700b8d913be5ad8c9ddf3183c9a23e7fe6/python/flask-fine-uploader/static/fine_uploader/.gitignore
--------------------------------------------------------------------------------
/python/flask-fine-uploader/static/main.js:
--------------------------------------------------------------------------------
1 | $(document).ready(function() {
2 |
3 | /** instantiate Fine Uploader here. */
4 |
5 | });
6 |
--------------------------------------------------------------------------------
/python/flask-fine-uploader/templates/fine_uploader/index.html:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FineUploader/server-examples/3ada78700b8d913be5ad8c9ddf3183c9a23e7fe6/python/flask-fine-uploader/templates/fine_uploader/index.html
--------------------------------------------------------------------------------
/python/python3-flask-fine-uploader-s3/.gitignore:
--------------------------------------------------------------------------------
1 | my_p3s3f.env
2 | my_client_conf.js
3 | static/
4 | __pycache__/
5 |
--------------------------------------------------------------------------------
/python/python3-flask-fine-uploader-s3/CHANGES.md:
--------------------------------------------------------------------------------
1 | # 1.0.1
2 | Fix internal links in readme
3 |
4 | # 1.0.0
5 | First version for sharing.
6 |
--------------------------------------------------------------------------------
/python/python3-flask-fine-uploader-s3/LICENSE:
--------------------------------------------------------------------------------
1 | The MIT License (MIT)
2 |
3 | Copyright (c) 2017 python3-flask-fine-uploader-s3
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/python/python3-flask-fine-uploader-s3/README.md:
--------------------------------------------------------------------------------
1 | # Python + Flask + Amazon S3 + Fine Uploader
2 | This has been constructed as a working starting point for S3 multipart uploads
3 | based on the [fine-uploader](https://fineuploader.com/) html/css/js and the
4 | [python flask-fine-uploader-s3](https://github.com/FineUploader/server-examples/tree/master/python/flask-fine-uploader-s3)
5 | signing server example.
6 |
7 | Significant changes have been made to the [flask-fine-uploader-s3](https://github.com/FineUploader/server-examples/tree/master/python/flask-fine-uploader-s3) code to convert to python3 and support simple examples of server side hooks for checking whether requests should be accepted by the application in use.
8 |
9 | Although the original examples in [fine-uploader](https://fineuploader.com/) are good, all elements that require modification for proof of concept have been extracted down to three files, see [Setup](#setup) for more details.
10 |
11 | __Content:__
12 |
13 | * [Features](#features)
14 | * [Getting started](#getting-started)
15 | * [Prerequisites](#prerequisites)
16 | * [Setup](#setup)
17 | * [`client_conf.js`](#client_confjs)
18 | * [`basic_cors.xml`](#basic_corsxml)
19 | * [`p3s3f.env`](#p3s3fenv)
20 | * [Server side checks](#server-side-checks)
21 | * [Known issues](#known-issues)
22 | * [Browser extensions](#browser-extensions)
23 | * [Client side code](#client-side-code)
24 | * [CEPH](#ceph)
25 |
26 |
27 | # Features
28 |
29 | This is a functional example to upload files to S3 (specifically Ceph rados-gateway) via a web-browser with a python server to sign requests.
30 |
31 | * Uploads resumable following:
32 | * temporary network dropping (automatic for short breaks)
33 | * computer sleeps (retry button)
34 | * switching between wired <-> wireless (retry button)
35 |
36 | It works out of the box, but you should read the docs on how to run [flask apps](http://flask.pocoo.org/docs/latest/quickstart/#quickstart) if external facing (the built in exec option is not suitable).
37 |
38 | # Getting started
39 |
40 | ## Prerequisites
41 |
42 | * python 3
43 | `sudo apt-get install python3`
44 | * pip 3
45 | `sudo apt-get install python3-pip`
46 |
47 | (or via homebrew et.al.)
48 |
49 | ## Setup
50 |
51 | Consider `$S3FLASK` as the folder containing this `README.md` file.
52 |
53 | 1. Clone/unpack this archive/repository and cd into `server-examples/python/python3-flask-fine-uploader-s3`
54 | (referred to as `$S3FLASK` after this point)
55 | 2. Add the client side code:
56 | ```bash
57 | mkdir static
58 | curl -sSL https://github.com/FineUploader/fine-uploader/archive/master.tar.gz | tar -zx
59 | cd fine-uploader-master
60 | npm install
61 | # See note below for CEPH
62 | make build
63 | cp _build/s3.*.js* ../static/.
64 | cp _build/fine-uploader-new.css ../static/.
65 | cp _build/*.gif ../static/.
66 | cd ..
67 | ```
68 | 3. Install the dependencies:
69 | `pip3 install -r requirements.txt [--user]`
70 | 4. Set environment variables in [`p3s3f.env`](#p3s3fenv), see link for details.
71 | 5. Configure the bucket with basic CORS rules, see [`basic_cors.xml`](basic_cors.xml):
72 | `s3cmd setcors basic_cors.xml s3://$AWS_DEST_BUCKET`
73 | 6. Update [`client_conf.js`](#client_confjs) with relevant values and link into `static`
74 | folder. See link for details.
75 | 7. Run the app (like this to ensure env doesn't leak):
76 | `bash -c 'source my_p3s3f.env && ./s3-sign-srv.py'`
77 | 8. Visit `http[s]://localhost:5000` (dependent on config of `p3s3f.env`)
78 |
79 | ### `client_conf.js`
80 |
81 | _You need to force the browser to reload from server not from cache
82 | if you modify this file after the initial page load, e.g. shift-ctrl-R._
83 |
84 | First:
85 |
86 | ```
87 | cp client_conf.js my_client_conf.js
88 | ln my_client_conf.js static/client_conf.js
89 | ```
90 |
91 | _Needs to be a hard link_ `static/client_conf.js` is ignored by the repo so will not be committed.
92 |
93 | In a production system this object would be dynamically created in your web-application setting variables as appropriate, e.g. depending on the logged-in user.
94 |
95 | Items to modify for testing are:
96 |
97 | * `objectProperties` - indicated as required for ceph
98 | * `bucket` - name of destination bucket
99 | * `host` - s3 gateway host name (no `http[s]://`)
100 | * `request`
101 | * `endpoint` - endpoint url including bucket prefix
102 | * `accessKey` - `AWS_ACCESS_KEY` (to verify against server side for bucket)
103 | * `params` - _OPTIONAL_, adds to header prefixed with `x-amz-meta-`
104 | * useful for server-side checking that a signature should be provided
105 |
106 | See [here](https://docs.fineuploader.com/branch/master/api/options-s3.html) for
107 | the full fine-uploader API docs.
108 |
109 | ### `basic_cors.xml`
110 |
111 | If you just want to see if this works you don't need to modify this. If using in a
112 | production environment you should review this file to restrict allowed actions and header
113 | fields to be appropriate for your application.
114 |
115 | Further information is included in the file as comments.
116 |
117 | ### `p3s3f.env`
118 |
119 | Recommend: `cp p3s3f.env my_p3s3f.env` (set permissions accordingly)
120 |
121 | Contains variables needed by the signing server. This file has annotation and is
122 | relatively self explanatory with the exception of `P3S3F_EXAMPLE_ALLOW_*`.
123 |
124 | Activating the `P3S3F_EXAMPLE_ALLOW_*` variables will result in request for signing
125 | to be rejected if you attempt to upload files that are not expected. These are
126 | here to exercise the `challenge_is_good()` function of `s3-sign-srv.py`. This is
127 | the point you could do some form of server side validation of headers or expected
128 | data files. Note that relying on the content-type or filename extension isn't strong
129 | validation.
130 |
131 | # Server side checks
132 |
133 | As indicated in the [`p3s3f.env`](#p3s3fenv) section, functions in `s3-sign-srv.py` can be
134 | augmented for application specific triggers and checks. Key locations would be:
135 |
136 | * `challenge_is_good()` - Using request headers should the server provide a signature, e.g.
137 | * Is file expected (check against DB)?
138 | * Has file been fully uploaded under a different key)?
139 | * `s3_success()` - Post completion hooks could go in this function, e.g.
140 | * Mark file as upload complete in DB (links back to `challenge_is_good()` checks)
141 | * Trigger downstream processing...
142 | * `s3_delete()` - Hooks could be added here but would advise against exposing this in the main upload interface.
143 | * Deletes are more likely to require an application specific implementation with a recover option.
144 | * Button can be disabled via `static/client_conf.js`
145 | * `DELETE` action can be disabled on bucket via `basic_cors.xml`
146 |
147 | # Known issues
148 |
149 | ## Browser extensions
150 |
151 | Some browser extensions will block the AJAX calls; if you have issues check the
152 | browser logs (developer tools) and add exceptions appropriately.
153 |
154 | # Client side code
155 |
156 | In a production system you would not normally include the client side code directly
157 | in the back-end server. Remove the `static` folder when building a real application
158 | using this.
159 |
160 | ## CEPH
161 |
162 | There is a [CEPH radog-gateway bug](http://tracker.ceph.com/issues/20201) which
163 | results in all files where the MIME type cannot be determined being rejected as
164 | they have no content-type.
165 |
166 | If the bug has not been resolved on your CEPH install (and you want to support no or unsusual extensions) you can work around it with
167 | [this](https://github.com/FineUploader/fine-uploader/pull/1846/files) patch to the
168 | web-client.
169 |
--------------------------------------------------------------------------------
/python/python3-flask-fine-uploader-s3/basic_cors.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | *
5 | POST
6 | PUT
7 | DELETE
8 | 3000
9 | ETag
10 | *
11 |
17 |
18 |
19 |
20 |
32 |
--------------------------------------------------------------------------------
/python/python3-flask-fine-uploader-s3/client_conf.js:
--------------------------------------------------------------------------------
1 | var fine_u_conf = {
2 | debug: true,
3 | element: document.getElementById('fine-uploader'),
4 | cors: {
5 | expected: true
6 | },
7 | objectProperties: {
8 | bucket: 'somebucket',
9 | host: 'some.s3.server.ac.uk'
10 | },
11 | request: {
12 | endpoint: 'https://somebucket.some.s3.server.ac.uk',
13 | accessKey: 'XXXXXXXXXXXXXXXXXXXX',
14 | params: { dataset: '731db507-1240-44ab-a616-de95f02aeaa4' }
15 | },
16 | signature: {
17 | endpoint: "/s3/sign"
18 | },
19 | uploadSuccess: {
20 | endpoint: "/s3/success"
21 | },
22 | deleteFile: {
23 | enabled: true,
24 | endpoint: "/s3/delete"
25 | },
26 | iframeSupport: {
27 | localBlankPagePath: "success.html"
28 | },
29 | chunking: {
30 | enabled: true,
31 | concurrent: {
32 | enabled: true
33 | }
34 | },
35 | resume: {
36 | enabled: true
37 | },
38 | retry: {
39 | enableAuto: false,
40 | preventRetryResponseProperty: "error"
41 | }
42 | }
43 |
--------------------------------------------------------------------------------
/python/python3-flask-fine-uploader-s3/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
67 |
68 |
69 |
70 |
73 |
--------------------------------------------------------------------------------
/python/python3-flask-fine-uploader-s3/p3s3f.env:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # server startup variables:
4 | export P3S3F_HOST_NAME=127.0.0.1 # localhost
5 | export P3S3F_HOST_PORT=5000
6 | export P3S3F_THREADED=0
7 |
8 | # for ssl
9 | export P3S3F_USE_HTTPS=0 # set to 1 if following are configured
10 | export P3S3F_SRV_CRT=$HOME/ssl_cert/server.crt
11 | export P3S3F_SRV_KEY=$HOME/ssl_cert/server.key
12 |
13 | # s3 variables
14 | export AWS_CLIENT_ACCESS_KEY=XXXXXXXXXXXXXXXXXXXX
15 | export AWS_CLIENT_SECRET_KEY=XyXyXyXyXyXyXyXyXyXyXyXyXyXyXyXyXyXyXyXy
16 | export AWS_DEST_BUCKET=somebucket
17 | export AWS_ENDPOINT='https://some.s3.server.ac.uk'
18 |
19 | ### TO TEST SIMULATED accept/reject signing based on 'expected' data
20 | # the uuid in these = client_conf.js -> request.params.dataset
21 | #dd bs=1024 count=1 < /dev/zero > 1kb.txt
22 | #export P3S3F_EXAMPLE_ALLOW_SMALL=somebucket/731db507-1240-44ab-a616-de95f02aeaa4/1kb.txt
23 | #dd bs=1024 count=20000 < /dev/zero > 20mb.txt
24 | #export P3S3F_EXAMPLE_ALLOW_LARGE=somebucket/731db507-1240-44ab-a616-de95f02aeaa4/20mb.txt
25 |
--------------------------------------------------------------------------------
/python/python3-flask-fine-uploader-s3/requirements.txt:
--------------------------------------------------------------------------------
1 | Flask==0.12.2
2 | boto3==1.4.4
3 |
--------------------------------------------------------------------------------
/python/python3-flask-fine-uploader-s3/s3-sign-srv.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | """
3 | s3-sign.srv.py
4 |
5 | Originally by: Mark Feltner (https://github.com/FineUploader/server-examples/tree/master/python/flask-fine-uploader-s3)
6 | Server-side S3 upload example for Fine Uploader
7 |
8 | Features:
9 | * Upload to S3
10 | * Delete from S3
11 | * Sign Policy documents (simple uploads) and REST requests (chunked/multipart) uploads
12 | * non-CORS environment
13 |
14 | Enhanced by: Keiran Raine
15 | * Converted to python3
16 | * Added HTTPS
17 | * More configuration via environment
18 | * Indicate clear points for server side hooks
19 | * Standardised access to request data for server side hooks
20 | """
21 |
22 | import base64, hmac, hashlib, os, sys, re
23 |
24 | from flask import (Flask, json, jsonify, make_response, render_template,
25 | request, abort)
26 |
27 | AWS_CLIENT_SECRET_KEY = os.getenv('AWS_CLIENT_SECRET_KEY')
28 | AWS_CLIENT_ACCESS_KEY = os.getenv('AWS_CLIENT_ACCESS_KEY')
29 | AWS_ENDPOINT = os.getenv('AWS_ENDPOINT')
30 |
31 | app = Flask(__name__)
32 | app.config.from_object(__name__)
33 |
34 | def sign_policy(policy):
35 | """ Sign and return the policy document for a simple upload.
36 | http://aws.amazon.com/articles/1434/#signyours3postform """
37 | signed_policy = base64.b64encode(policy)
38 | encoded_key = str(AWS_CLIENT_SECRET_KEY).encode()
39 | hmac_v = hmac.new(encoded_key,
40 | signed_policy,
41 | hashlib.sha1)
42 | signature = base64.b64encode(hmac_v.digest())
43 | return {
44 | 'policy': signed_policy.decode("utf-8"),
45 | 'signature': signature.decode("utf-8")
46 | }
47 |
48 | def sign_headers(headers):
49 | """ Sign and return the headers for a chunked upload. """
50 | encoded_key = str(AWS_CLIENT_SECRET_KEY).encode()
51 | hmac_v = hmac.new(encoded_key,
52 | bytearray(headers, 'utf-8'), # hmac doesn't want unicode
53 | hashlib.sha1)
54 | signature = base64.b64encode(hmac_v.digest())
55 | return {
56 | 'signature': signature.decode("utf-8")
57 | }
58 |
59 | def challenge_from_headers(headers):
60 | print(">>>>" + headers)
61 | patt = re.compile(r'(x-amz-meta-[^:]+):(.+)')
62 | for_challenge = {}
63 | for (key, value) in re.findall(patt, headers):
64 | for_challenge[key] = value
65 |
66 | # now figure out bucket key and uuid from request
67 | url_data = headers.split('\n')[-1].strip()
68 | (bucket, uuid, ext) = re.match(r'/([^/]+)/([^.]+)\.([^?]+)\?uploads', url_data).groups()
69 | for_challenge['bucket'] = bucket
70 | for_challenge['uuid'] = uuid
71 | for_challenge['key'] = uuid + '.' + ext
72 |
73 | return for_challenge
74 |
75 | def challenge_from_conditions(conditions):
76 | for_challenge = {}
77 | for item in conditions:
78 | for key, value in item.items():
79 | for_challenge[key] = value
80 | return for_challenge
81 |
82 | def challenge_is_good(to_challenge):
83 | """
84 | This is where you would run checks based on the 'x-aws-meta-' header elements
85 | set by fine-uploaders js.
86 | By default you get:
87 | key - name in bucket after
88 | uuid - uuid of key without file extension
89 | name - original file name from client (no path)
90 | bucket - destination bucket
91 | Recommended you augment this with additional request.params fields in the js object.
92 | """
93 |
94 | transfer_req_for = '%s/%s/%s' % (to_challenge['bucket'],
95 | to_challenge['x-amz-meta-dataset'],
96 | to_challenge['x-amz-meta-qqfilename'])
97 |
98 | # this simulates signing rejection based on data being expected
99 | # REMOVE/REPLACE BLOCK IN PRODUCTION CODE
100 | if os.getenv('P3S3F_EXAMPLE_ALLOW_SMALL') is not None:
101 | if (transfer_req_for == os.getenv('P3S3F_EXAMPLE_ALLOW_SMALL') or
102 | transfer_req_for == os.getenv('P3S3F_EXAMPLE_ALLOW_LARGE')):
103 | return True
104 | return False
105 |
106 |
107 | return True
108 |
109 | def challenge_request(request):
110 | request_payload = request.get_json()
111 | response_data = None
112 | challenge_data = None
113 | if request_payload.get('headers'):
114 | # this if is where you'd do some checking against the back end to check allowed to upload
115 | # signifies first element of chunked data
116 | if request_payload['headers'].startswith('POST') and 'uploadId' not in request_payload['headers']:
117 | print("\t**** Chunked signing request ****", file=sys.stderr)
118 | challenge_data = challenge_from_headers(request_payload['headers'])
119 | response_data = sign_headers(request_payload['headers'])
120 | else:
121 | # this if is where you'd do some checking against the back end to check allowed to upload
122 | print("\t**** Un-Chunked signing request ****", file=sys.stderr)
123 | challenge_data = challenge_from_conditions(request_payload['conditions'])
124 | response_data = sign_policy(request.data)
125 |
126 | # although we've already done the signing, now do the actual challenge
127 | if challenge_data is not None:
128 | print('\t' + str(challenge_data), file=sys.stderr)
129 | if challenge_is_good(challenge_data) is False:
130 | return None
131 |
132 | return response_data
133 |
134 | @app.route("/s3/sign", methods=['POST'])
135 | def s3_signature():
136 | """ Route for signing the policy document or REST headers. """
137 | response_data = challenge_request(request)
138 | if response_data is None:
139 | response_data = {'error': 'This file has not been approved for transfer, check upload is to correct dataset.'}
140 | return jsonify(response_data)
141 |
142 |
143 | # Probably delete this completely for systems that should't allow delete
144 | @app.route("/s3/delete/", methods=['POST', 'DELETE'])
145 | def s3_delete(key=None):
146 | """ Route for deleting files off S3. Uses the SDK. """
147 |
148 | request_payload = request.values
149 |
150 | print("\t**** THIS DATA USED TO NOTIFY BACKEND OF DELETED DATA ****", file=sys.stderr)
151 | print("\tBucket: %s\n\tKey: %s" % (request_payload.get('bucket'), request_payload.get('key')), file=sys.stderr)
152 | print("\t**********************************************************", file=sys.stderr)
153 |
154 | try:
155 | import boto3
156 | from botocore.utils import fix_s3_host
157 |
158 | s3 = boto3.resource("s3",
159 | aws_access_key_id = AWS_CLIENT_ACCESS_KEY,
160 | aws_secret_access_key = AWS_CLIENT_SECRET_KEY,
161 | endpoint_url=AWS_ENDPOINT)
162 | s3.meta.client.meta.events.unregister('before-sign.s3', fix_s3_host)
163 |
164 | s3.meta.client.delete_object(Bucket=request_payload.get('bucket'),
165 | Key=request_payload.get('key'))
166 |
167 | return make_response('', 200)
168 | except ImportError:
169 | abort(500)
170 |
171 | @app.route("/s3/success", methods=['GET', 'POST'])
172 | def s3_success():
173 | """ Success redirect endpoint for <=IE9. """
174 |
175 | print("\t**** THIS DATA USED TO NOTIFY BACKEND OF COMPLETED DATA ****", file=sys.stderr)
176 | for key, value in request.form.items():
177 | # these don't have 'x-aws-meta-' prefix
178 | print("\t%s : %s" % (key, value), file=sys.stderr)
179 | print("\t************************************************************", file=sys.stderr)
180 |
181 | return make_response()
182 |
183 | @app.route("/")
184 | def index():
185 | data = None
186 | with open('index.html', 'r') as myfile:
187 | data = myfile.read()
188 | return data
189 |
190 |
191 | def main(argv=None):
192 | print("\n#####\n!\tWARNING: This example is using app.run() please see:\n!\t\thttp://flask.pocoo.org/docs/latest/api/#flask.Flask.run\n#####\n", file=sys.stderr)
193 | threaded = False
194 | if os.getenv('P3S3F_THREADED') == '1' :
195 | threaded = True
196 |
197 | if os.getenv('P3S3F_USE_HTTPS') == '1' :
198 | print(os.getenv('P3S3F_SRV_CRT'))
199 | context = (os.getenv('P3S3F_SRV_CRT'),
200 | os.getenv('P3S3F_SRV_KEY'))
201 | app.run(host=os.getenv('P3S3F_HOST_NAME'),
202 | port=os.getenv('P3S3F_HOST_PORT'),
203 | ssl_context=context,
204 | threaded=threaded) # debug=True
205 | else:
206 | app.run(host=os.getenv('P3S3F_HOST_NAME'),
207 | port=os.getenv('P3S3F_HOST_PORT'),
208 | threaded=threaded) # debug=True
209 | return 0 # success
210 |
211 | if __name__ == '__main__':
212 | status = main()
213 | sys.exit(status)
214 |
--------------------------------------------------------------------------------
/python/python3-flask-fine-uploader-s3/templates/index.html:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/FineUploader/server-examples/3ada78700b8d913be5ad8c9ddf3183c9a23e7fe6/python/python3-flask-fine-uploader-s3/templates/index.html
--------------------------------------------------------------------------------
/rails.md:
--------------------------------------------------------------------------------
1 | Please have a look at [this wiki page](https://github.com/Widen/fine-uploader-server/wiki/Rails---CarrierWave) if you are using Rails
2 |
--------------------------------------------------------------------------------
/readme.md:
--------------------------------------------------------------------------------
1 | # Server-Side Examples for the Widen Fine Uploader Javascript Library #
2 | [](LICENSE)
3 |
4 | This repository contains server-side examples for users of Widen's Fine Uploader javascript library. Server side examples
5 | for Fine Uploader S3 will appear in a S3 directory under the language of choice. All other examples assume you are
6 | uploading files to your own server.
7 |
8 | For guidelines useful when integrating your server-side code with Fine Uploader, see the [documentation site](http://docs.fineuploader.com).
9 |
10 |
11 | #### Support ####
12 |
13 | ##### Traditional upload examples (upload to your own server)
14 | Widen supports the following traditional endpoint examples: (all other examples were provided and are supported by third parties)
15 | * PHP
16 | * node.js
17 | * Java
18 | * Python (Flask & Django)
19 |
20 | ##### Fine Uploader S3 examples
21 | Widen supports upload-to-s3 server-side examples in the following languages (in the associated language's S3 subdirectory):
22 | * PHP
23 | * node.js
24 | * Java
25 | * Python
26 |
27 | ##### Fine Uploader Azure examples
28 | Widen supports upload-to-azure server-side examples in the following languages (in the associated language's azure subdirectory):
29 | * C#
30 |
31 | It is possible, and quite easy, to write your own server-side code for Fine Uploader S3 in another language. See
32 | the documentation for more details.
33 |
34 | #### License ####
35 | All server-side examples are MIT licensed. Please see [license.txt](license.txt) for details.
36 |
--------------------------------------------------------------------------------