├── .gitignore
├── api
├── v1
│ ├── api.go
│ └── web.go
├── v3
│ ├── api.go
│ ├── api_test.go
│ ├── file.go
│ └── store.go
└── v4
│ ├── api.go
│ ├── api_test.go
│ ├── file.go
│ ├── notes.md
│ └── store.go
├── cmd
├── composer
│ └── main.go
├── resumable-client-chucked
│ ├── main.go
│ └── main.py
├── resumable-client
│ └── main.go
└── server
│ └── main.go
├── docker-compose.yml
├── go.mod
├── go.sum
├── next.md
├── notes.md
├── scripts
├── grafana
│ └── provisioning
│ │ ├── dashboards
│ │ └── demo.yml
│ │ └── datasources
│ │ └── demo.yml
├── locust
│ ├── .gitignore
│ ├── locustfiles
│ │ └── locustfile.py
│ └── requirements.txt
└── prometheus
│ └── prometheus.yml
└── server
├── log.go
├── opentelemetry.go
└── server.go
/.gitignore:
--------------------------------------------------------------------------------
1 | testfile
--------------------------------------------------------------------------------
/api/v1/api.go:
--------------------------------------------------------------------------------
1 | package v1
2 |
3 | import (
4 | "io"
5 | "net/http"
6 | "os"
7 | "path/filepath"
8 |
9 | "github.com/rs/zerolog/log"
10 | )
11 |
12 | func FormUpload() http.HandlerFunc {
13 | return func(w http.ResponseWriter, r *http.Request) {
14 | // log content type
15 | log.Debug().Str("content_type", r.Header.Get("Content-Type")).Msg("Request Content Type")
16 |
17 | // limit the size of the request body
18 | r.Body = http.MaxBytesReader(w, r.Body, 10<<20) //10MB
19 | // parse the form
20 | if err := r.ParseMultipartForm(5 << 20); err != nil {
21 | log.Error().Err(err).Msg("Error Parsing the Form")
22 | w.WriteHeader(http.StatusBadRequest)
23 | return
24 | }
25 | defer r.MultipartForm.RemoveAll()
26 |
27 | // get a handle to the file
28 | file, handler, err := r.FormFile("file")
29 | if err != nil {
30 | log.Error().Err(err).Msg("Error Retrieving the File")
31 | w.WriteHeader(http.StatusBadRequest)
32 | w.Write([]byte("Error Retrieving the File"))
33 | return
34 | }
35 | defer file.Close()
36 |
37 | // convert handler.size to KB
38 | f, err := os.CreateTemp("/tmp", "sample-")
39 | if err != nil {
40 | w.WriteHeader(http.StatusBadRequest)
41 | w.Write([]byte("Error Retrieving the File"))
42 | return
43 | }
44 |
45 | defer f.Close()
46 | defer os.Remove(f.Name())
47 |
48 | n, err := io.Copy(f, file)
49 | if err != nil {
50 | log.Error().Err(err).Msg("Error Copying the File")
51 | }
52 |
53 | log.Info().Str("file_name", handler.Filename).
54 | Int64("file_size", handler.Size).
55 | Int64("written_size", n).
56 | Str("stored_file", f.Name()).
57 | Msg("File Uploaded")
58 |
59 | w.WriteHeader(http.StatusOK)
60 | }
61 | }
62 |
63 | func BinaryUpload() http.HandlerFunc {
64 | return func(w http.ResponseWriter, r *http.Request) {
65 | // limit the size of the request body
66 | r.Body = http.MaxBytesReader(w, r.Body, 10<<20) //10MB
67 |
68 | defer r.Body.Close()
69 | contentType := r.Header.Get("Content-Type")
70 | contentLength := r.Header.Get("Content-Length")
71 | fileName := r.Header.Get("X-Api-File-Name")
72 | log.Debug().
73 | Str("content_type", contentType).
74 | Str("content_length", contentLength).
75 | Str("file_name", fileName).
76 | Msg("received binary data")
77 |
78 | f, err := os.OpenFile(filepath.Join("/tmp", fileName), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
79 | if err != nil {
80 | w.WriteHeader(http.StatusBadRequest)
81 | w.Write([]byte("Error Retrieving the File"))
82 | return
83 | }
84 | defer f.Close()
85 | defer os.Remove(f.Name())
86 | n, err := io.Copy(f, r.Body)
87 | if err != nil {
88 | log.Error().Err(err).Msg("Error Copying the File")
89 | }
90 |
91 | log.Info().
92 | Int64("written_size", n).
93 | Str("stored_file", f.Name()).
94 | Msg("File Uploaded")
95 |
96 | w.WriteHeader(http.StatusOK)
97 | }
98 | }
99 |
--------------------------------------------------------------------------------
/api/v1/web.go:
--------------------------------------------------------------------------------
1 | package v1
2 |
3 | import "net/http"
4 |
5 | func Web() http.HandlerFunc {
6 | return func(w http.ResponseWriter, r *http.Request) {
7 | html := `
8 |
9 |
10 |
11 | File Upload
12 |
20 |
21 |
22 |
31 |
32 |
65 |
66 | `
67 |
68 | w.Header().Set("Content-Type", "text/html")
69 | w.Write([]byte(html))
70 | }
71 | }
--------------------------------------------------------------------------------
/api/v3/api.go:
--------------------------------------------------------------------------------
1 | package v3
2 |
3 | import (
4 | "crypto/md5"
5 | "crypto/sha1"
6 | "encoding/hex"
7 | "encoding/json"
8 | "errors"
9 | "fmt"
10 | "hash"
11 | "io"
12 | "net"
13 | "net/http"
14 | "os"
15 | "strconv"
16 | "strings"
17 | "time"
18 |
19 | "github.com/gorilla/mux"
20 | "github.com/rs/zerolog/log"
21 | )
22 |
23 | const (
24 | TusResumableHeader = "Tus-Resumable"
25 | TusExtensionHeader = "Tus-Extension"
26 | TusVersionHeader = "Tus-Version"
27 | TusMaxSizeHeader = "Tus-Max-Size"
28 | TusChecksumAlgorithmHeader = "Tus-Checksum-Algorithm"
29 |
30 | TusVersion = "1.0.0"
31 | UploadOffsetHeader = "Upload-Offset"
32 | UploadLengthHeader = "Upload-Length"
33 | UploadMetadataHeader = "Upload-Metadata"
34 | UploadDeferLengthHeader = "Upload-Defer-Length"
35 | UploadExpiresHeader = "Upload-Expires"
36 | UploadChecksumHeader = "Upload-Checksum"
37 | ContentTypeHeader = "Content-Type"
38 |
39 | UploadMaxDuration = 10 * time.Minute
40 | )
41 |
42 | type Extension string
43 |
44 | const (
45 | CreationExtension Extension = "creation"
46 | ExpirationExtension Extension = "expiration"
47 | ChecksumExtension Extension = "checksum"
48 | TerminationExtension Extension = "termination"
49 | ConcatenationExtension Extension = "concatenation"
50 | )
51 |
52 | type Extensions []Extension
53 |
54 | func (e Extensions) Enabled(ext Extension) bool {
55 | for _, v := range e {
56 | if v == ext {
57 | return true
58 | }
59 | }
60 | return false
61 | }
62 |
63 | func (e Extensions) String() string {
64 | var s []string
65 | for _, v := range e {
66 | s = append(s, string(v))
67 | }
68 | return strings.Join(s, ",")
69 | }
70 |
71 | var (
72 | defaultMaxSize = uint64(0)
73 | defaultSupportedExtensions = Extensions{
74 | CreationExtension,
75 | ExpirationExtension,
76 | ChecksumExtension,
77 | }
78 | SupportedTusVersion = []string{
79 | "0.2.0",
80 | "1.0.0",
81 | }
82 | SupportedChecksumAlgorithms = []string{
83 | "sha1",
84 | "md5",
85 | }
86 | )
87 |
88 | type Options struct {
89 | Extensions Extensions
90 | MaxSize uint64
91 | }
92 |
93 | type Option func(*Options)
94 |
95 | func WithExtensions(extensions Extensions) Option {
96 | return func(o *Options) {
97 | o.Extensions = extensions
98 | }
99 | }
100 |
101 | func WithMaxSize(size uint64) Option {
102 | return func(o *Options) {
103 | o.MaxSize = size
104 | }
105 | }
106 |
107 | func NewController(s Storage, opts ...Option) Controller {
108 | o := Options{
109 | Extensions: defaultSupportedExtensions,
110 | MaxSize: defaultMaxSize,
111 | }
112 | for _, opt := range opts {
113 | opt(&o)
114 | }
115 | return Controller{
116 | store: s,
117 | extensions: o.Extensions,
118 | maxSize: o.MaxSize,
119 | }
120 | }
121 |
122 | type Storage interface {
123 | Find(id string) (File, bool, error)
124 | Save(id string, f File)
125 | }
126 |
127 | type Controller struct {
128 | store Storage
129 | extensions Extensions
130 | maxSize uint64
131 | }
132 |
133 | func TusResumableHeaderCheck(next http.Handler) http.Handler {
134 | return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
135 | if r.Method == http.MethodOptions {
136 | next.ServeHTTP(w, r)
137 | return
138 | }
139 |
140 | if r.Header.Get(TusResumableHeader) == "" {
141 | w.WriteHeader(http.StatusBadRequest)
142 | w.Write([]byte("Tus-Resumable header is missing"))
143 | return
144 | }
145 |
146 | tusVersion := r.Header.Get(TusResumableHeader)
147 | supported := false
148 | for _, version := range SupportedTusVersion {
149 | if tusVersion == version {
150 | supported = true
151 | break
152 | }
153 | }
154 | if !supported {
155 | w.WriteHeader(http.StatusPreconditionFailed)
156 | w.Write([]byte("Tus version not supported"))
157 | return
158 | }
159 | next.ServeHTTP(w, r)
160 | })
161 | }
162 |
163 | func TusResumableHeaderInjections(next http.Handler) http.Handler {
164 | return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
165 | if r.Method != http.MethodOptions {
166 | w.Header().Set(TusResumableHeader, TusVersion)
167 | }
168 | next.ServeHTTP(w, r)
169 | })
170 | }
171 |
172 | func (c *Controller) GetConfig() http.HandlerFunc {
173 | return func(w http.ResponseWriter, r *http.Request) {
174 | w.Header().Add(TusVersionHeader, strings.Join(SupportedTusVersion, ","))
175 | if len(c.extensions) > 0 {
176 | w.Header().Add(TusExtensionHeader, c.extensions.String())
177 | }
178 | if c.maxSize != 0 {
179 | w.Header().Add(TusMaxSizeHeader, fmt.Sprint(c.maxSize))
180 | }
181 | if c.extensions.Enabled(ChecksumExtension) {
182 | w.Header().Add(TusChecksumAlgorithmHeader, strings.Join(SupportedChecksumAlgorithms, ","))
183 | }
184 | w.WriteHeader(http.StatusNoContent)
185 | }
186 | }
187 |
188 | func (c *Controller) GetOffset() http.HandlerFunc {
189 | return func(w http.ResponseWriter, r *http.Request) {
190 | vars := mux.Vars(r)
191 | fileID := vars["file_id"]
192 | log.Debug().Str("file_id", fileID).Msg("Check request path and query")
193 | fm, ok, err := c.store.Find(fileID)
194 | if !ok {
195 | w.WriteHeader(http.StatusNotFound)
196 | w.Write([]byte("File not found"))
197 | return
198 | }
199 | if err != nil {
200 | writeError(w, http.StatusInternalServerError, err)
201 | return
202 | }
203 |
204 | w.Header().Add(UploadOffsetHeader, fmt.Sprint(fm.UploadedSize))
205 | if !fm.IsDeferLength {
206 | w.Header().Add(UploadLengthHeader, fmt.Sprint(fm.TotalSize))
207 | }
208 |
209 | w.Header().Add("Cache-Control", "no-store")
210 | if !fm.ExpiresAt.IsZero() {
211 | w.Header().Add(UploadExpiresHeader, uploadExpiresAt(fm.ExpiresAt))
212 | }
213 |
214 | if !fm.ExpiresAt.IsZero() && fm.ExpiresAt.Before(time.Now()) {
215 | log.Debug().Str("file_id", fileID).Msg("file expired")
216 | writeError(w, http.StatusGone, errors.New("file expired"))
217 | return
218 | }
219 |
220 | w.WriteHeader(http.StatusNoContent)
221 | }
222 | }
223 |
224 | func newChecksum(value string) (checksum, error) {
225 | if value == "" {
226 | return checksum{}, nil
227 | }
228 | d := strings.Split(value, " ")
229 | if len(d) != 2 {
230 | return checksum{}, fmt.Errorf("invalid checksum format")
231 | }
232 | if d[0] != "md5" && d[0] != "sha1" {
233 | return checksum{}, fmt.Errorf("unsupported checksum algorithm")
234 | }
235 | return checksum{
236 | Algorithm: d[0],
237 | Value: d[1],
238 | }, nil
239 | }
240 |
241 | type checksum struct {
242 | Algorithm string
243 | Value string
244 | }
245 |
246 | func (c *Controller) ResumeUpload() http.HandlerFunc {
247 | return func(w http.ResponseWriter, r *http.Request) {
248 | r.Body = http.MaxBytesReader(w, r.Body, 64<<20) //64MB
249 | doneCh := make(chan struct{})
250 | defer close(doneCh)
251 |
252 | go func() {
253 | select {
254 | case <-doneCh:
255 | log.Info().Msg("Upload completed")
256 | return
257 | case <-r.Context().Done():
258 | log.Warn().Err(r.Context().Err()).Msg("Upload canceled")
259 | return
260 | }
261 | }()
262 |
263 | // r.Body = http.MaxBytesReader(w, r.Body, 10<<20) //10MB
264 | vars := mux.Vars(r)
265 | fileID := vars["file_id"]
266 |
267 | contentType := r.Header.Get(ContentTypeHeader)
268 | if contentType != "application/offset+octet-stream" {
269 | log.Debug().Str("content_type", contentType).Msg("Invalid Content-Type")
270 | writeError(w, http.StatusUnsupportedMediaType, errors.New("invalid Content-Type header: expected application/offset+octet-stream"))
271 | return
272 | }
273 |
274 | var checksum checksum
275 | if c.extensions.Enabled(ChecksumExtension) {
276 | var err error
277 | checksum, err = newChecksum(r.Header.Get(UploadChecksumHeader))
278 | if err != nil {
279 | log.Debug().Err(err).Msg("Invalid checksum header")
280 | writeError(w, http.StatusBadRequest, err)
281 | return
282 | }
283 | }
284 |
285 | fm, ok, err := c.store.Find(fileID)
286 | if !ok {
287 | log.Debug().Str("file_id", fileID).Msg("file not found")
288 | writeError(w, http.StatusNotFound, errors.New("file not found"))
289 | return
290 | }
291 | if err != nil {
292 | writeError(w, http.StatusInternalServerError, err)
293 | return
294 | }
295 |
296 | if c.extensions.Enabled(ExpirationExtension) && fm.ExpiresAt.Before(time.Now()) {
297 | log.Debug().Str("file_id", fileID).Msg("file expired")
298 | writeError(w, http.StatusGone, errors.New("file expired"))
299 | return
300 | }
301 |
302 | uploadOffset := r.Header.Get(UploadOffsetHeader)
303 | offset, err := strconv.ParseUint(uploadOffset, 10, 64)
304 | if err != nil {
305 | log.Debug().Err(err).
306 | Str("upload_offset", uploadOffset).
307 | Msg("Invalid Upload-Offset header: not a number")
308 | writeError(w, http.StatusBadRequest, errors.New("invalid Upload-Offset header: not a number"))
309 | return
310 | }
311 |
312 | log.Debug().Uint64("offset_request", offset).
313 | Uint64("uploaded_size", fm.UploadedSize).
314 | Msg("Check size")
315 |
316 | if offset != fm.UploadedSize {
317 | log.Warn().Msg("upload-Offset header does not match the current offset")
318 | writeError(w, http.StatusConflict, errors.New("upload-Offset header does not match the current offset"))
319 | return
320 | }
321 |
322 | f, err := os.OpenFile(fm.Path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644)
323 | if err != nil {
324 | log.Error().Err(err).Msg("error opening the file")
325 | writeError(w, http.StatusBadRequest, errors.New("error opening the file"))
326 | return
327 | }
328 | defer f.Close()
329 | log.Debug().Str("stored_file", f.Name()).Msg("File Opened")
330 |
331 | // Store the current position before writing
332 | originalPos, err := f.Seek(0, io.SeekEnd)
333 | if err != nil {
334 | log.Error().Err(err).Msg("error getting file position")
335 | writeError(w, http.StatusInternalServerError, errors.New("error preparing file"))
336 | return
337 | }
338 |
339 | var n int64
340 | if c.extensions.Enabled(ChecksumExtension) && checksum.Algorithm != "" {
341 | var hash hash.Hash
342 | switch checksum.Algorithm {
343 | case "md5":
344 | hash = md5.New()
345 | case "sha1":
346 | hash = sha1.New()
347 | default:
348 | writeError(w, http.StatusBadRequest, errors.New("unsupported checksum algorithm"))
349 | return
350 | }
351 |
352 | log.Debug().Msg("write the data to the file")
353 |
354 | reader := io.TeeReader(r.Body, hash)
355 | n, err = io.Copy(f, reader)
356 | if err != nil {
357 | // Revert to original position on error
358 | f.Seek(originalPos, io.SeekStart)
359 | f.Truncate(originalPos) // Ensure file is truncated to original size
360 |
361 | log.Error().Err(err).Msg("error writing file")
362 | writeError(w, http.StatusInternalServerError, errors.New("error writing file"))
363 | return
364 | }
365 |
366 | cur, _ := f.Seek(0, io.SeekCurrent)
367 |
368 | log.Debug().
369 | Int64("written_size", n).
370 | Int64("cur", cur).
371 | Msg("temporary data has been written, but not flushed")
372 |
373 | log.Debug().Msg("validate the checksum")
374 |
375 | calculatedHash := hex.EncodeToString(hash.Sum(nil))
376 | if calculatedHash != checksum.Value {
377 | // Revert to original position if checksum fails
378 | f.Seek(originalPos, io.SeekStart)
379 | f.Truncate(originalPos) // Ensure file is truncated to original size
380 | log.Debug().Msg("Checksum mismatch")
381 | writeError(w, 460, errors.New("checksum mismatch"))
382 | return
383 | }
384 |
385 | fm.UploadedSize += uint64(n)
386 | c.store.Save(fm.ID, fm)
387 | } else {
388 | n, err = io.Copy(f, r.Body)
389 | if err != nil {
390 |
391 | fm.UploadedSize += uint64(n)
392 | c.store.Save(fm.ID, fm)
393 |
394 | log.Info().
395 | Int64("written_size", n).
396 | Msg("partial message is written")
397 |
398 | var netErr net.Error
399 | if errors.As(err, &netErr) && netErr.Timeout() {
400 | log.Warn().Err(err).Msg("network timeout while writing file")
401 | writeError(w, http.StatusRequestTimeout, fmt.Errorf("network timeout: %w", err))
402 | return
403 | }
404 |
405 | log.Error().Err(err).Msg("error writing the file")
406 | writeError(w, http.StatusInternalServerError, fmt.Errorf("error writing the file: %w", err))
407 | return
408 | }
409 | fm.UploadedSize += uint64(n)
410 | c.store.Save(fm.ID, fm)
411 | }
412 |
413 | log.Debug().
414 | Int64("written_size", n).
415 | Str("stored_file", f.Name()).
416 | Msg("File Uploaded")
417 |
418 | log.Debug().Msg("prepare the response header")
419 | w.Header().Add(UploadOffsetHeader, fmt.Sprint(fm.UploadedSize))
420 | if !fm.ExpiresAt.IsZero() {
421 | w.Header().Add(UploadExpiresHeader, uploadExpiresAt(fm.ExpiresAt))
422 | }
423 | w.WriteHeader(http.StatusNoContent)
424 | }
425 | }
426 |
427 | func (c *Controller) CreateUpload() http.HandlerFunc {
428 | return func(w http.ResponseWriter, r *http.Request) {
429 | uploadDeferLength := r.Header.Get(UploadDeferLengthHeader)
430 | if uploadDeferLength != "" && uploadDeferLength != "1" {
431 | writeError(w, http.StatusBadRequest, errors.New("invalid Upload-Defer-Length header"))
432 | return
433 | }
434 |
435 | fm := NewFile()
436 | fm.ExpiresAt = time.Now().Add(UploadMaxDuration)
437 |
438 | isDeferLength := uploadDeferLength == "1"
439 | if !isDeferLength {
440 | totalLength := r.Header.Get(UploadLengthHeader)
441 | totalSize, err := strconv.ParseUint(totalLength, 10, 64)
442 | if err != nil {
443 | writeError(w, http.StatusBadRequest, errors.New("invalid Upload-Length header"))
444 | return
445 | }
446 | fm.IsDeferLength = false
447 | fm.TotalSize = totalSize
448 | }
449 |
450 | if fm.TotalSize > c.maxSize {
451 | writeError(w, http.StatusRequestEntityTooLarge, errors.New("upload length exceeds the maximum size"))
452 | return
453 | }
454 |
455 | uploadMetadata := r.Header.Get(UploadMetadataHeader)
456 | log.Debug().Str("upload_metadata", uploadMetadata).Msg("Check request header")
457 |
458 | err := fm.ParseMetadata(uploadMetadata)
459 | if err != nil {
460 | writeError(w, http.StatusBadRequest, err)
461 | return
462 | }
463 |
464 | c.store.Save(fm.ID, fm)
465 |
466 | w.Header().Add("Location", fmt.Sprintf("http://127.0.0.1:8080/files/%s", fm.ID))
467 | if !fm.ExpiresAt.IsZero() {
468 | w.Header().Add(UploadExpiresHeader, uploadExpiresAt(fm.ExpiresAt))
469 | }
470 | w.WriteHeader(http.StatusCreated)
471 | }
472 | }
473 |
474 | func uploadExpiresAt(t time.Time) string {
475 | return t.Format("Mon, 02 Jan 2006 15:04:05 GMT")
476 | }
477 |
478 | type cError struct {
479 | Message string `json:"message"`
480 | }
481 |
482 | func writeError(w http.ResponseWriter, code int, err error) {
483 | w.WriteHeader(code)
484 |
485 | b, _ := json.Marshal(cError{Message: err.Error()})
486 | w.Header().Set("Content-Type", "application/json")
487 | w.Write(b)
488 | }
489 |
--------------------------------------------------------------------------------
/api/v3/api_test.go:
--------------------------------------------------------------------------------
1 | package v3_test
2 |
3 | import (
4 | "bytes"
5 | "net/http"
6 | "net/http/httptest"
7 | "testing"
8 | "time"
9 |
10 | "github.com/gorilla/mux"
11 | . "github.com/imrenagi/go-http-upload/api/v3"
12 | "github.com/stretchr/testify/assert"
13 | )
14 |
15 | func newFakeStore(m map[string]File) *fakeStore {
16 | return &fakeStore{
17 | files: m,
18 | }
19 | }
20 |
21 | type fakeStore struct {
22 | files map[string]File
23 | }
24 |
25 | func (s *fakeStore) Find(id string) (File, bool, error) {
26 | metadata, exists := s.files[id]
27 | return metadata, exists, nil
28 | }
29 |
30 | func (s *fakeStore) Save(id string, metadata File) {
31 | s.files[id] = metadata
32 | }
33 |
34 | func TestGetOffset(t *testing.T) {
35 | t.Run("The Server MUST always include the Upload-Offset header in the response for a HEAD request. The Server SHOULD acknowledge successful HEAD requests with a 200 OK or 204 No Content status.",
36 | func(t *testing.T) {
37 | m := map[string]File{
38 | "a": {
39 | ID: "a",
40 | UploadedSize: 0,
41 | },
42 | }
43 | ctrl := NewController(newFakeStore(m))
44 |
45 | req := httptest.NewRequest(http.MethodHead, "/api/v1/files/a", nil)
46 | w := httptest.NewRecorder()
47 |
48 | router := mux.NewRouter()
49 | router.HandleFunc("/api/v1/files/{file_id}", ctrl.GetOffset())
50 | router.ServeHTTP(w, req)
51 |
52 | assert.Contains(t, []int{http.StatusOK, http.StatusNoContent}, w.Code, "Expected status code %v, got %v", http.StatusOK, w.Code)
53 | assert.Equal(t, "0", w.Header().Get(UploadOffsetHeader), "Expected Upload-Offset header to be 0, got %v", w.Header().Get(UploadOffsetHeader))
54 |
55 | //The Server MUST prevent the client and/or proxies from caching the response by adding the Cache-Control: no-store header to the response.
56 | assert.Equal(t, "no-store", w.Header().Get("Cache-Control"), "Expected Cache-Control header to be no-store, got %v", w.Header().Get("Cache-Control"))
57 | })
58 |
59 | t.Run("If the size of the upload is known, the Server MUST include the Upload-Length header in the response.", func(t *testing.T) {
60 | m := map[string]File{
61 | "a": {
62 | ID: "a",
63 | UploadedSize: 19,
64 | TotalSize: 100,
65 | },
66 | }
67 | ctrl := NewController(newFakeStore(m))
68 |
69 | req := httptest.NewRequest(http.MethodHead, "/api/v1/files/a", nil)
70 | w := httptest.NewRecorder()
71 |
72 | router := mux.NewRouter()
73 | router.HandleFunc("/api/v1/files/{file_id}", ctrl.GetOffset())
74 | router.ServeHTTP(w, req)
75 |
76 | assert.Equal(t, "100", w.Header().Get(UploadLengthHeader))
77 | assert.Equal(t, "19", w.Header().Get(UploadOffsetHeader))
78 | })
79 |
80 | t.Run("If the resource is not found, the Server SHOULD return either the 404 Not Found status without the Upload-Offset header.", func(t *testing.T) {
81 | m := map[string]File{}
82 | ctrl := NewController(newFakeStore(m))
83 |
84 | req := httptest.NewRequest(http.MethodHead, "/api/v1/files/a", nil)
85 | w := httptest.NewRecorder()
86 |
87 | router := mux.NewRouter()
88 | router.HandleFunc("/api/v1/files/{file_id}", ctrl.GetOffset())
89 | router.ServeHTTP(w, req)
90 |
91 | assert.Equal(t, http.StatusNotFound, w.Code)
92 | assert.Empty(t, w.Header().Get(UploadOffsetHeader))
93 | })
94 |
95 | }
96 |
97 | func TestTusResumableHeader(t *testing.T) {
98 | t.Run("Return 400 if The Tus-Resumable header is not included in HEAD request", func(t *testing.T) {
99 | m := map[string]File{}
100 | ctrl := NewController(newFakeStore(m))
101 |
102 | req := httptest.NewRequest(http.MethodHead, "/api/v1/files/a", nil)
103 | w := httptest.NewRecorder()
104 |
105 | router := mux.NewRouter()
106 | router.Use(TusResumableHeaderCheck)
107 | router.HandleFunc("/api/v1/files/{file_id}", ctrl.GetOffset())
108 | router.ServeHTTP(w, req)
109 |
110 | assert.Equal(t, http.StatusBadRequest, w.Code)
111 | // the Server MUST NOT process the request.
112 | assert.Empty(t, w.Header().Get(UploadOffsetHeader))
113 | assert.Empty(t, w.Header().Get(UploadLengthHeader))
114 | })
115 |
116 | t.Run("Return 412 if The Tus-Resumable header is not supported by the server. server must not process the request", func(t *testing.T) {
117 | m := map[string]File{}
118 | ctrl := NewController(newFakeStore(m))
119 |
120 | req := httptest.NewRequest(http.MethodHead, "/api/v1/files/a", nil)
121 | req.Header.Set(TusResumableHeader, "1.0.1")
122 | w := httptest.NewRecorder()
123 |
124 | router := mux.NewRouter()
125 | router.Use(TusResumableHeaderCheck)
126 | router.HandleFunc("/api/v1/files/{file_id}", ctrl.GetOffset())
127 | router.ServeHTTP(w, req)
128 |
129 | assert.Equal(t, http.StatusPreconditionFailed, w.Code)
130 | // the Server MUST NOT process the request.
131 | assert.Empty(t, w.Header().Get(UploadOffsetHeader))
132 | assert.Empty(t, w.Header().Get(UploadLengthHeader))
133 | })
134 |
135 | t.Run("Multipe value of The Tus-Resumable header can be supported by the server", func(t *testing.T) {
136 | m := map[string]File{
137 | "a": {
138 | ID: "a",
139 | UploadedSize: 19,
140 | TotalSize: 100,
141 | },
142 | }
143 | ctrl := NewController(newFakeStore(m))
144 | router := mux.NewRouter()
145 | router.Use(TusResumableHeaderCheck)
146 | router.HandleFunc("/api/v1/files/{file_id}", ctrl.GetOffset())
147 |
148 | req := httptest.NewRequest(http.MethodHead, "/api/v1/files/a", nil)
149 | req.Header.Set(TusResumableHeader, "0.2.0")
150 | w := httptest.NewRecorder()
151 | router.ServeHTTP(w, req)
152 | assert.Contains(t, []int{http.StatusOK, http.StatusNoContent}, w.Code, "Expected status code %v, got %v", http.StatusOK, w.Code)
153 |
154 | req = httptest.NewRequest(http.MethodHead, "/api/v1/files/a", nil)
155 | req.Header.Set(TusResumableHeader, "1.0.0")
156 | w = httptest.NewRecorder()
157 | router.ServeHTTP(w, req)
158 | assert.Contains(t, []int{http.StatusOK, http.StatusNoContent}, w.Code, "Expected status code %v, got %v", http.StatusOK, w.Code)
159 | })
160 |
161 | t.Run("The Tus-Resumable header MUST be included in every response in HEAD requests. ", func(t *testing.T) {
162 | m := map[string]File{
163 | "a": {
164 | ID: "a",
165 | UploadedSize: 19,
166 | TotalSize: 100,
167 | },
168 | }
169 | ctrl := NewController(newFakeStore(m))
170 | router := mux.NewRouter()
171 | router.Use(TusResumableHeaderInjections)
172 | router.HandleFunc("/api/v1/files/{file_id}", ctrl.GetOffset())
173 |
174 | req := httptest.NewRequest(http.MethodHead, "/api/v1/files/a", nil)
175 | w := httptest.NewRecorder()
176 | router.ServeHTTP(w, req)
177 | assert.Equal(t, "1.0.0", w.Header().Get(TusResumableHeader))
178 | assert.Contains(t, []int{http.StatusOK, http.StatusNoContent}, w.Code, "Expected status code %v, got %v", http.StatusOK, w.Code)
179 | })
180 | }
181 |
182 | func TestGetConfig(t *testing.T) {
183 | t.Run("A successful response indicated by the 204 No Content or 200 OK status MUST contain the Tus-Version header", func(t *testing.T) {
184 | m := map[string]File{}
185 | ctrl := NewController(newFakeStore(m))
186 |
187 | req := httptest.NewRequest(http.MethodOptions, "/api/v1/files", nil)
188 | w := httptest.NewRecorder()
189 |
190 | router := mux.NewRouter()
191 | router.HandleFunc("/api/v1/files", ctrl.GetConfig())
192 | router.ServeHTTP(w, req)
193 |
194 | assert.Contains(t, []int{http.StatusOK, http.StatusNoContent}, w.Code, "Expected status code %v, got %v", http.StatusOK, w.Code)
195 | assert.Equal(t, "0.2.0,1.0.0", w.Header().Get(TusVersionHeader))
196 | assert.Empty(t, w.Header().Get(TusResumableHeader))
197 | })
198 |
199 | t.Run("It MAY include the Tus-Extension and Tus-Max-Size headers.", func(t *testing.T) {
200 | m := map[string]File{}
201 | ctrl := NewController(newFakeStore(m),
202 | WithExtensions(Extensions{CreationExtension,
203 | ExpirationExtension,
204 | ChecksumExtension}),
205 | WithMaxSize(1073741824))
206 |
207 | req := httptest.NewRequest(http.MethodOptions, "/api/v1/files", nil)
208 | w := httptest.NewRecorder()
209 |
210 | router := mux.NewRouter()
211 | router.HandleFunc("/api/v1/files", ctrl.GetConfig())
212 | router.ServeHTTP(w, req)
213 |
214 | assert.Equal(t, "creation,expiration,checksum", w.Header().Get(TusExtensionHeader))
215 | assert.Equal(t, "1073741824", w.Header().Get(TusMaxSizeHeader))
216 | assert.Equal(t, "sha1,md5", w.Header().Get(TusChecksumAlgorithmHeader))
217 | })
218 |
219 | t.Run("The extension header must be omitted if the server does not support any extensions", func(t *testing.T) {
220 | m := map[string]File{}
221 | ctrl := NewController(newFakeStore(m),
222 | WithExtensions(Extensions{}),
223 | )
224 |
225 | req := httptest.NewRequest(http.MethodOptions, "/api/v1/files", nil)
226 | w := httptest.NewRecorder()
227 |
228 | router := mux.NewRouter()
229 | router.HandleFunc("/api/v1/files", ctrl.GetConfig())
230 | router.ServeHTTP(w, req)
231 |
232 | assert.Empty(t, w.Header().Get(TusExtensionHeader))
233 | assert.Empty(t, w.Header().Get(TusMaxSizeHeader))
234 | assert.Empty(t, w.Header().Get(TusChecksumAlgorithmHeader))
235 |
236 | })
237 | }
238 |
239 | func TestResumeUpload(t *testing.T) {
240 |
241 | t.Run("Upload-Offset must be included in the request", func(t *testing.T) {
242 | m := map[string]File{
243 | "a": {
244 | ID: "a",
245 | UploadedSize: 0,
246 | TotalSize: 10,
247 | },
248 | }
249 | ctrl := NewController(newFakeStore(m), WithExtensions(Extensions{}))
250 |
251 | req := httptest.NewRequest(http.MethodPatch, "/api/v1/files/a", nil)
252 | w := httptest.NewRecorder()
253 |
254 | router := mux.NewRouter()
255 | router.HandleFunc("/api/v1/files/{file_id}", ctrl.ResumeUpload()).Methods(http.MethodPatch)
256 | router.ServeHTTP(w, req)
257 |
258 | assert.Equal(t, http.StatusBadRequest, w.Code)
259 | assert.Equal(t, `{"message":"invalid Upload-Offset header: not a number"}`, w.Body.String())
260 | })
261 |
262 | t.Run("Upload-Offset must be included in the request with value gte 0", func(t *testing.T) {
263 | m := map[string]File{
264 | "a": {
265 | ID: "a",
266 | UploadedSize: 0,
267 | TotalSize: 10,
268 | },
269 | }
270 | ctrl := NewController(newFakeStore(m), WithExtensions(Extensions{}))
271 |
272 | req := httptest.NewRequest(http.MethodPatch, "/api/v1/files/a", nil)
273 | req.Header.Set("Upload-Offset", "-1")
274 | w := httptest.NewRecorder()
275 |
276 | router := mux.NewRouter()
277 | router.HandleFunc("/api/v1/files/{file_id}", ctrl.ResumeUpload()).Methods(http.MethodPatch)
278 | router.ServeHTTP(w, req)
279 |
280 | assert.Equal(t, http.StatusBadRequest, w.Code)
281 | assert.Equal(t, `{"message":"invalid Upload-Offset header: negative value"}`, w.Body.String())
282 | })
283 |
284 | t.Run("When PATCH requests doesnt use Content-Type: application/offset+octet-stream, server SHOULD return a 415 Unsupported Media Type status", func(t *testing.T) {
285 | m := map[string]File{
286 | "a": {
287 | ID: "a",
288 | UploadedSize: 0,
289 | TotalSize: 10,
290 | },
291 | }
292 | ctrl := NewController(newFakeStore(m), WithExtensions(Extensions{}))
293 |
294 | req := httptest.NewRequest(http.MethodPatch, "/api/v1/files/a", nil)
295 | req.Header.Set("Content-Type", "application/json")
296 | req.Header.Set("Upload-Offset", "0")
297 | w := httptest.NewRecorder()
298 |
299 | router := mux.NewRouter()
300 | router.HandleFunc("/api/v1/files/{file_id}", ctrl.ResumeUpload()).Methods(http.MethodPatch)
301 | router.ServeHTTP(w, req)
302 |
303 | assert.Equal(t, http.StatusUnsupportedMediaType, w.Code)
304 | assert.Equal(t, `{"message":"invalid Content-Type header: expected application/offset+octet-stream"}`, w.Body.String())
305 | })
306 |
307 | t.Run("If the server receives a PATCH request against a non-existent resource it SHOULD return a 404 Not Found status.", func(t *testing.T) {
308 | m := map[string]File{}
309 | ctrl := NewController(newFakeStore(m), WithExtensions(Extensions{}))
310 |
311 | req := httptest.NewRequest(http.MethodPatch, "/api/v1/files/a", nil)
312 | req.Header.Set("Content-Type", "application/offset+octet-stream")
313 | req.Header.Set("Upload-Offset", "0")
314 | w := httptest.NewRecorder()
315 |
316 | router := mux.NewRouter()
317 | router.HandleFunc("/api/v1/files/{file_id}", ctrl.ResumeUpload()).Methods(http.MethodPatch)
318 | router.ServeHTTP(w, req)
319 |
320 | assert.Equal(t, http.StatusNotFound, w.Code)
321 | assert.Equal(t, `{"message":"file not found"}`, w.Body.String())
322 | })
323 |
324 | t.Run(" If the offsets do not match, the Server MUST respond with the 409 Conflict status without modifying the upload resource.", func(t *testing.T) {
325 | m := map[string]File{
326 | "a": {
327 | ID: "a",
328 | UploadedSize: 0,
329 | TotalSize: 10,
330 | },
331 | }
332 | ctrl := NewController(newFakeStore(m), WithExtensions(Extensions{}))
333 |
334 | req := httptest.NewRequest(http.MethodPatch, "/api/v1/files/a", nil)
335 | req.Header.Set("Content-Type", "application/offset+octet-stream")
336 | req.Header.Set("Upload-Offset", "10")
337 | w := httptest.NewRecorder()
338 |
339 | router := mux.NewRouter()
340 | router.HandleFunc("/api/v1/files/{file_id}", ctrl.ResumeUpload()).Methods(http.MethodPatch)
341 | router.ServeHTTP(w, req)
342 |
343 | assert.Equal(t, http.StatusConflict, w.Code)
344 | assert.Equal(t, `{"message":"upload-Offset header does not match the current offset"}`, w.Body.String())
345 | })
346 |
347 | t.Run("The Server MUST acknowledge successful PATCH requests with the 204 No Content status. It MUST include the Upload-Offset header containing the new offset", func(t *testing.T) {
348 | m := map[string]File{
349 | "a": {
350 | ID: "a",
351 | UploadedSize: 0,
352 | TotalSize: 5,
353 | },
354 | }
355 | ctrl := NewController(newFakeStore(m), WithExtensions(Extensions{}))
356 |
357 | buf := bytes.NewBufferString("ccc")
358 | req := httptest.NewRequest(http.MethodPatch, "/api/v1/files/a", buf)
359 | req.Header.Set("Content-Type", "application/offset+octet-stream")
360 | req.Header.Set("Upload-Offset", "0")
361 | w := httptest.NewRecorder()
362 |
363 | router := mux.NewRouter()
364 | router.HandleFunc("/api/v1/files/{file_id}", ctrl.ResumeUpload()).Methods(http.MethodPatch)
365 | router.ServeHTTP(w, req)
366 |
367 | assert.Equal(t, http.StatusNoContent, w.Code)
368 | assert.Equal(t, "3", w.Header().Get(UploadOffsetHeader))
369 | })
370 | }
371 |
372 | func TestExpiration(t *testing.T) {
373 | t.Run("The expiration header may be included in the HEAD response when the upload is going to expire.", func(t *testing.T) {
374 | m := map[string]File{
375 | "a": {
376 | ID: "a",
377 | UploadedSize: 0,
378 | TotalSize: 5,
379 | ExpiresAt: time.Now().Add(1 * time.Hour),
380 | },
381 | }
382 | ctrl := NewController(newFakeStore(m), WithExtensions(Extensions{ExpirationExtension}))
383 |
384 | req := httptest.NewRequest(http.MethodHead, "/api/v1/files/a", nil)
385 | w := httptest.NewRecorder()
386 |
387 | router := mux.NewRouter()
388 | router.HandleFunc("/api/v1/files/{file_id}", ctrl.GetOffset())
389 | router.ServeHTTP(w, req)
390 |
391 | format := "Mon, 02 Jan 2006 15:04:05 GMT"
392 | ts := w.Header().Get(UploadExpiresHeader)
393 | tt, err := time.Parse(format, ts)
394 | assert.NoError(t, err)
395 |
396 | assert.Equal(t, m["a"].ExpiresAt.Format(format), tt.Format(format))
397 | })
398 |
399 | t.Run("the Server SHOULD respond with 410 Gone status if the Server is keeping track of expired uploads", func(t *testing.T) {
400 | m := map[string]File{
401 | "a": {
402 | ID: "a",
403 | UploadedSize: 0,
404 | TotalSize: 5,
405 | ExpiresAt: time.Now().Add(-1 * time.Hour),
406 | },
407 | }
408 | ctrl := NewController(newFakeStore(m), WithExtensions(Extensions{ExpirationExtension}))
409 |
410 | req := httptest.NewRequest(http.MethodHead, "/api/v1/files/a", nil)
411 | w := httptest.NewRecorder()
412 |
413 | router := mux.NewRouter()
414 | router.HandleFunc("/api/v1/files/{file_id}", ctrl.GetOffset())
415 | router.ServeHTTP(w, req)
416 |
417 | format := "Mon, 02 Jan 2006 15:04:05 GMT"
418 | ts := w.Header().Get(UploadExpiresHeader)
419 | tt, err := time.Parse(format, ts)
420 | assert.NoError(t, err)
421 | assert.Equal(t, m["a"].ExpiresAt.Format(format), tt.Format(format))
422 | assert.Equal(t, http.StatusGone, w.Code)
423 | })
424 |
425 | t.Run("This header MUST be included in every PATCH response if the upload is going to expire.", func(t *testing.T) {
426 | m := map[string]File{
427 | "a": {
428 | ID: "a",
429 | UploadedSize: 0,
430 | TotalSize: 5,
431 | ExpiresAt: time.Now().Add(1 * time.Hour),
432 | },
433 | }
434 | ctrl := NewController(newFakeStore(m), WithExtensions(Extensions{ExpirationExtension}))
435 |
436 | buf := bytes.NewBufferString("ccc")
437 | req := httptest.NewRequest(http.MethodPatch, "/api/v1/files/a", buf)
438 | req.Header.Set("Content-Type", "application/offset+octet-stream")
439 | req.Header.Set("Upload-Offset", "0")
440 | w := httptest.NewRecorder()
441 |
442 | router := mux.NewRouter()
443 | router.HandleFunc("/api/v1/files/{file_id}", ctrl.ResumeUpload()).Methods(http.MethodPatch)
444 | router.ServeHTTP(w, req)
445 |
446 | assert.Equal(t, http.StatusNoContent, w.Code)
447 | assert.Equal(t, "3", w.Header().Get(UploadOffsetHeader))
448 |
449 | format := "Mon, 02 Jan 2006 15:04:05 GMT"
450 | ts := w.Header().Get(UploadExpiresHeader)
451 | tt, err := time.Parse(format, ts)
452 | assert.NoError(t, err)
453 | assert.Equal(t, m["a"].ExpiresAt.Format(format), tt.Format(format))
454 | })
455 |
456 | t.Run("If a Client does attempt to resume an upload which has since been removed by the Server, the Server SHOULD respond with 410 Gone status", func(t *testing.T) {
457 | m := map[string]File{
458 | "a": {
459 | ID: "a",
460 | UploadedSize: 0,
461 | TotalSize: 5,
462 | ExpiresAt: time.Now().Add(-1 * time.Hour),
463 | },
464 | }
465 | ctrl := NewController(newFakeStore(m), WithExtensions(Extensions{ExpirationExtension}))
466 |
467 | buf := bytes.NewBufferString("ccc")
468 | req := httptest.NewRequest(http.MethodPatch, "/api/v1/files/a", buf)
469 | req.Header.Set("Content-Type", "application/offset+octet-stream")
470 | req.Header.Set("Upload-Offset", "0")
471 | w := httptest.NewRecorder()
472 |
473 | router := mux.NewRouter()
474 | router.HandleFunc("/api/v1/files/{file_id}", ctrl.ResumeUpload()).Methods(http.MethodPatch)
475 | router.ServeHTTP(w, req)
476 |
477 | assert.Equal(t, http.StatusGone, w.Code)
478 | assert.Empty(t, w.Header().Get(UploadOffsetHeader))
479 | assert.Empty(t, w.Header().Get(UploadExpiresHeader))
480 | assert.Equal(t, `{"message":"file expired"}`, w.Body.String())
481 |
482 | })
483 | }
484 |
485 | func TestChecksum(t *testing.T) {
486 | t.Run("The Upload-Checksum header MUST consist of the name of the used checksum algorithm and the Base64 encoded checksum separated by a space.", func(t *testing.T) {
487 | m := map[string]File{
488 | "a": {
489 | ID: "a",
490 | UploadedSize: 0,
491 | TotalSize: 1,
492 | },
493 | }
494 | ctrl := NewController(newFakeStore(m), WithExtensions(Extensions{ChecksumExtension}))
495 |
496 | buf := bytes.NewBufferString("1")
497 | req := httptest.NewRequest(http.MethodPatch, "/api/v1/files/a", buf)
498 | req.Header.Set("Content-Type", "application/offset+octet-stream")
499 | req.Header.Set("Upload-Offset", "0")
500 | req.Header.Set("Upload-Checksum", "md5 c4ca4238a0b923820dcc509a6f75849b")
501 | w := httptest.NewRecorder()
502 |
503 | router := mux.NewRouter()
504 | router.HandleFunc("/api/v1/files/{file_id}", ctrl.ResumeUpload()).Methods(http.MethodPatch)
505 | router.ServeHTTP(w, req)
506 |
507 | assert.Equal(t, http.StatusNoContent, w.Code)
508 | assert.Equal(t, "1", w.Header().Get(UploadOffsetHeader))
509 | })
510 |
511 | t.Run("The Server MUST support at least the SHA1 checksum algorithm identified by sha1", func(t *testing.T) {
512 | m := map[string]File{
513 | "a": {
514 | ID: "a",
515 | UploadedSize: 0,
516 | TotalSize: 1,
517 | },
518 | }
519 | ctrl := NewController(newFakeStore(m), WithExtensions(Extensions{ChecksumExtension}))
520 |
521 | buf := bytes.NewBufferString("1")
522 | req := httptest.NewRequest(http.MethodPatch, "/api/v1/files/a", buf)
523 | req.Header.Set("Content-Type", "application/offset+octet-stream")
524 | req.Header.Set("Upload-Offset", "0")
525 | req.Header.Set("Upload-Checksum", "sha1 356a192b7913b04c54574d18c28d46e6395428ab")
526 | w := httptest.NewRecorder()
527 |
528 | router := mux.NewRouter()
529 | router.HandleFunc("/api/v1/files/{file_id}", ctrl.ResumeUpload()).Methods(http.MethodPatch)
530 | router.ServeHTTP(w, req)
531 |
532 | assert.Equal(t, http.StatusNoContent, w.Code)
533 | assert.Equal(t, "1", w.Header().Get(UploadOffsetHeader))
534 | })
535 |
536 | t.Run("Patch must failed when The Upload-Checksum header only has 1 segment", func(t *testing.T) {
537 | m := map[string]File{
538 | "a": {
539 | ID: "a",
540 | UploadedSize: 0,
541 | TotalSize: 1,
542 | },
543 | }
544 | ctrl := NewController(newFakeStore(m), WithExtensions(Extensions{ChecksumExtension}))
545 |
546 | buf := bytes.NewBufferString("1")
547 | req := httptest.NewRequest(http.MethodPatch, "/api/v1/files/a", buf)
548 | req.Header.Set("Content-Type", "application/offset+octet-stream")
549 | req.Header.Set("Upload-Offset", "0")
550 | req.Header.Set("Upload-Checksum", "c4ca4238a0b923820dcc509a6f75849b")
551 | w := httptest.NewRecorder()
552 |
553 | router := mux.NewRouter()
554 | router.HandleFunc("/api/v1/files/{file_id}", ctrl.ResumeUpload()).Methods(http.MethodPatch)
555 | router.ServeHTTP(w, req)
556 |
557 | assert.Equal(t, http.StatusBadRequest, w.Code)
558 | assert.Equal(t, `{"message":"invalid checksum format"}`, w.Body.String())
559 | })
560 |
561 | t.Run("Patch must failed when The Upload-Checksum header use unsupported hash algorithm", func(t *testing.T) {
562 | m := map[string]File{
563 | "a": {
564 | ID: "a",
565 | UploadedSize: 0,
566 | TotalSize: 1,
567 | },
568 | }
569 | ctrl := NewController(newFakeStore(m), WithExtensions(Extensions{ChecksumExtension}))
570 |
571 | buf := bytes.NewBufferString("1")
572 | req := httptest.NewRequest(http.MethodPatch, "/api/v1/files/a", buf)
573 | req.Header.Set("Content-Type", "application/offset+octet-stream")
574 | req.Header.Set("Upload-Offset", "0")
575 | req.Header.Set("Upload-Checksum", "sha256 c4ca4238a0b923820dcc509a6f75849b")
576 | w := httptest.NewRecorder()
577 |
578 | router := mux.NewRouter()
579 | router.HandleFunc("/api/v1/files/{file_id}", ctrl.ResumeUpload()).Methods(http.MethodPatch)
580 | router.ServeHTTP(w, req)
581 |
582 | assert.Equal(t, http.StatusBadRequest, w.Code)
583 | assert.Equal(t, `{"message":"unsupported checksum algorithm"}`, w.Body.String())
584 | })
585 |
586 | t.Run("Patch must failed when The checksum value not matched", func(t *testing.T) {
587 | m := map[string]File{
588 | "a": {
589 | ID: "a",
590 | UploadedSize: 0,
591 | TotalSize: 1,
592 | },
593 | }
594 | ctrl := NewController(newFakeStore(m), WithExtensions(Extensions{ChecksumExtension}))
595 |
596 | buf := bytes.NewBufferString("1")
597 | req := httptest.NewRequest(http.MethodPatch, "/api/v1/files/a", buf)
598 | req.Header.Set("Content-Type", "application/offset+octet-stream")
599 | req.Header.Set("Upload-Offset", "0")
600 | req.Header.Set("Upload-Checksum", "md5 c4ca4238a0b923820dcc509a6f758495")
601 | w := httptest.NewRecorder()
602 |
603 | router := mux.NewRouter()
604 | router.HandleFunc("/api/v1/files/{file_id}", ctrl.ResumeUpload()).Methods(http.MethodPatch)
605 | router.ServeHTTP(w, req)
606 |
607 | assert.Equal(t, 460, w.Code)
608 | assert.Equal(t, `{"message":"checksum mismatch"}`, w.Body.String())
609 | })
610 | }
611 |
--------------------------------------------------------------------------------
/api/v3/file.go:
--------------------------------------------------------------------------------
1 | package v3
2 |
3 | import (
4 | "encoding/base64"
5 | "errors"
6 | "strings"
7 | "time"
8 |
9 | "github.com/google/uuid"
10 | )
11 |
12 | func NewFile() File {
13 | id := uuid.New().String()
14 | f := File{
15 | ID: id,
16 | IsDeferLength: true,
17 | Path: "/tmp/file-upload-" + id,
18 | }
19 | return f
20 | }
21 |
22 | type File struct {
23 | ID string
24 | Name string
25 | TotalSize uint64
26 | UploadedSize uint64
27 | ContentType string
28 | Checksum string
29 | ExpiresAt time.Time
30 | Path string
31 | IsDeferLength bool
32 | }
33 |
34 | func (f *File) ParseMetadata(m string) error {
35 | md := make(map[string]string)
36 | kvs := strings.Split(m, ",")
37 | for _, kv := range kvs {
38 | if kv == "" {
39 | continue
40 | }
41 | parts := strings.Fields(kv)
42 | if len(parts) != 2 {
43 | return errors.New("invalid metadata")
44 | }
45 | decoded, err := base64.StdEncoding.DecodeString(parts[1])
46 | if err != nil {
47 | return err
48 | }
49 | md[parts[0]] = string(decoded)
50 | }
51 | contentType, ok := md["content-type"]
52 | if !ok {
53 | return errors.New("missing content-type")
54 | }
55 | checksum, ok := md["checksum"]
56 | if !ok {
57 | return errors.New("missing checksum")
58 | }
59 | name, ok := md["filename"]
60 | if !ok {
61 | return errors.New("missing filename")
62 | }
63 | f.Name = name
64 | f.ContentType = contentType
65 | f.Checksum = checksum
66 | return nil
67 | }
68 |
--------------------------------------------------------------------------------
/api/v3/store.go:
--------------------------------------------------------------------------------
1 | package v3
2 |
3 | import "sync"
4 |
5 | type Store struct {
6 | sync.RWMutex
7 | files map[string]File
8 | }
9 |
10 | func NewStore() *Store {
11 | return &Store{
12 | files: make(map[string]File),
13 | }
14 | }
15 |
16 | func (s *Store) Find(id string) (File, bool, error) {
17 | s.RLock()
18 | defer s.RUnlock()
19 | metadata, exists := s.files[id]
20 | return metadata, exists, nil
21 | }
22 |
23 | func (s *Store) Save(id string, metadata File) {
24 | s.Lock()
25 | defer s.Unlock()
26 | s.files[id] = metadata
27 | }
28 |
--------------------------------------------------------------------------------
/api/v4/api.go:
--------------------------------------------------------------------------------
1 | package v3
2 |
3 | import (
4 | "context"
5 | "encoding/json"
6 | "errors"
7 | "fmt"
8 | "io"
9 | "net"
10 | "net/http"
11 | "strconv"
12 | "strings"
13 | "time"
14 |
15 | "cloud.google.com/go/storage"
16 | "github.com/google/uuid"
17 | "github.com/gorilla/mux"
18 | "github.com/rs/zerolog/log"
19 | )
20 |
21 | const (
22 | TusResumableHeader = "Tus-Resumable"
23 | TusExtensionHeader = "Tus-Extension"
24 | TusVersionHeader = "Tus-Version"
25 | TusMaxSizeHeader = "Tus-Max-Size"
26 | TusChecksumAlgorithmHeader = "Tus-Checksum-Algorithm"
27 |
28 | TusVersion = "1.0.0"
29 | UploadOffsetHeader = "Upload-Offset"
30 | UploadLengthHeader = "Upload-Length"
31 | UploadMetadataHeader = "Upload-Metadata"
32 | UploadDeferLengthHeader = "Upload-Defer-Length"
33 | UploadExpiresHeader = "Upload-Expires"
34 | UploadChecksumHeader = "Upload-Checksum"
35 | ContentTypeHeader = "Content-Type"
36 |
37 | UploadMaxDuration = 10 * time.Minute
38 | )
39 |
40 | type Extension string
41 |
42 | const (
43 | CreationExtension Extension = "creation"
44 | ExpirationExtension Extension = "expiration"
45 | ChecksumExtension Extension = "checksum"
46 | TerminationExtension Extension = "termination"
47 | ConcatenationExtension Extension = "concatenation"
48 | )
49 |
50 | type Extensions []Extension
51 |
52 | func (e Extensions) Enabled(ext Extension) bool {
53 | for _, v := range e {
54 | if v == ext {
55 | return true
56 | }
57 | }
58 | return false
59 | }
60 |
61 | func (e Extensions) String() string {
62 | var s []string
63 | for _, v := range e {
64 | s = append(s, string(v))
65 | }
66 | return strings.Join(s, ",")
67 | }
68 |
69 | var (
70 | defaultMaxSize = uint64(0)
71 | defaultSupportedExtensions = Extensions{
72 | CreationExtension,
73 | ExpirationExtension,
74 | ChecksumExtension,
75 | }
76 | SupportedTusVersion = []string{
77 | "0.2.0",
78 | "1.0.0",
79 | }
80 | SupportedChecksumAlgorithms = []string{
81 | "sha1",
82 | "md5",
83 | }
84 | )
85 |
86 | type Options struct {
87 | Extensions Extensions
88 | MaxSize uint64
89 | }
90 |
91 | type Option func(*Options)
92 |
93 | func WithExtensions(extensions Extensions) Option {
94 | return func(o *Options) {
95 | o.Extensions = extensions
96 | }
97 | }
98 |
99 | func WithMaxSize(size uint64) Option {
100 | return func(o *Options) {
101 | o.MaxSize = size
102 | }
103 | }
104 |
105 | func NewController(s Storage, opts ...Option) Controller {
106 | o := Options{
107 | Extensions: defaultSupportedExtensions,
108 | MaxSize: defaultMaxSize,
109 | }
110 | for _, opt := range opts {
111 | opt(&o)
112 | }
113 |
114 | ctx := context.Background()
115 | client, err := storage.NewClient(ctx)
116 | if err != nil {
117 | log.Fatal().Err(err).Msg("error creating storage client")
118 | }
119 |
120 | bkt := client.Bucket("go-http-upload-gcs-test")
121 |
122 | return Controller{
123 | store: s,
124 | extensions: o.Extensions,
125 | maxSize: o.MaxSize,
126 | storage: client,
127 | bucket: bkt,
128 | }
129 | }
130 |
131 | type Storage interface {
132 | Find(id string) (FileMetadata, bool)
133 | Save(id string, metadata FileMetadata)
134 | }
135 |
136 | type Controller struct {
137 | store Storage
138 | extensions Extensions
139 | maxSize uint64
140 | storage *storage.Client
141 | bucket *storage.BucketHandle
142 | }
143 |
144 | func TusResumableHeaderCheck(next http.Handler) http.Handler {
145 | return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
146 | if r.Method == http.MethodOptions {
147 | next.ServeHTTP(w, r)
148 | return
149 | }
150 |
151 | if r.Header.Get(TusResumableHeader) == "" {
152 | w.WriteHeader(http.StatusBadRequest)
153 | w.Write([]byte("Tus-Resumable header is missing"))
154 | return
155 | }
156 |
157 | tusVersion := r.Header.Get(TusResumableHeader)
158 | supported := false
159 | for _, version := range SupportedTusVersion {
160 | if tusVersion == version {
161 | supported = true
162 | break
163 | }
164 | }
165 | if !supported {
166 | w.WriteHeader(http.StatusPreconditionFailed)
167 | w.Write([]byte("Tus version not supported"))
168 | return
169 | }
170 | next.ServeHTTP(w, r)
171 | })
172 | }
173 |
174 | func TusResumableHeaderInjections(next http.Handler) http.Handler {
175 | return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
176 | if r.Method != http.MethodOptions {
177 | w.Header().Set(TusResumableHeader, TusVersion)
178 | }
179 | next.ServeHTTP(w, r)
180 | })
181 | }
182 |
183 | func (c *Controller) GetConfig() http.HandlerFunc {
184 | return func(w http.ResponseWriter, r *http.Request) {
185 | w.Header().Add(TusVersionHeader, strings.Join(SupportedTusVersion, ","))
186 | if len(c.extensions) > 0 {
187 | w.Header().Add(TusExtensionHeader, c.extensions.String())
188 | }
189 | if c.maxSize != 0 {
190 | w.Header().Add(TusMaxSizeHeader, fmt.Sprint(c.maxSize))
191 | }
192 | if c.extensions.Enabled(ChecksumExtension) {
193 | w.Header().Add(TusChecksumAlgorithmHeader, strings.Join(SupportedChecksumAlgorithms, ","))
194 | }
195 | w.WriteHeader(http.StatusNoContent)
196 | }
197 | }
198 |
199 | func (c *Controller) GetOffset() http.HandlerFunc {
200 | return func(w http.ResponseWriter, r *http.Request) {
201 | vars := mux.Vars(r)
202 | fileID := vars["file_id"]
203 | log.Debug().Str("file_id", fileID).Msg("Check request path and query")
204 | fm, ok := c.store.Find(fileID)
205 | if !ok {
206 | w.WriteHeader(http.StatusNotFound)
207 | w.Write([]byte("File not found"))
208 | return
209 | }
210 |
211 | w.Header().Add(UploadOffsetHeader, fmt.Sprint(fm.UploadedSize))
212 | w.Header().Add(UploadLengthHeader, fmt.Sprint(fm.TotalSize))
213 | w.Header().Add("Cache-Control", "no-store")
214 | if fm.Metadata != "" {
215 | w.Header().Add(UploadMetadataHeader, fm.Metadata)
216 | }
217 | if !fm.ExpiresAt.IsZero() {
218 | w.Header().Add(UploadExpiresHeader, uploadExpiresAt(fm.ExpiresAt))
219 | }
220 |
221 | if !fm.ExpiresAt.IsZero() && fm.ExpiresAt.Before(time.Now()) {
222 | log.Debug().Str("file_id", fileID).Msg("file expired")
223 | writeError(w, http.StatusGone, errors.New("file expired"))
224 | return
225 | }
226 |
227 | w.WriteHeader(http.StatusNoContent)
228 | }
229 | }
230 |
231 | func newChecksum(value string) (checksum, error) {
232 | if value == "" {
233 | return checksum{}, nil
234 | }
235 | d := strings.Split(value, " ")
236 | if len(d) != 2 {
237 | return checksum{}, fmt.Errorf("invalid checksum format")
238 | }
239 | if d[0] != "md5" && d[0] != "sha1" {
240 | return checksum{}, fmt.Errorf("unsupported checksum algorithm")
241 | }
242 | return checksum{
243 | Algorithm: d[0],
244 | Value: d[1],
245 | }, nil
246 | }
247 |
248 | type checksum struct {
249 | Algorithm string
250 | Value string
251 | }
252 |
253 | func (c *Controller) ResumeUpload() http.HandlerFunc {
254 | return func(w http.ResponseWriter, r *http.Request) {
255 | r.Body = http.MaxBytesReader(w, r.Body, 64<<20) //64MB
256 | doneCh := make(chan struct{})
257 | defer close(doneCh)
258 |
259 | go func() {
260 | select {
261 | case <-doneCh:
262 | log.Info().Msg("Upload completed")
263 | return
264 | case <-r.Context().Done():
265 | log.Warn().Err(r.Context().Err()).Msg("Upload canceled")
266 | return
267 | }
268 | }()
269 |
270 | // r.Body = http.MaxBytesReader(w, r.Body, 10<<20) //10MB
271 | vars := mux.Vars(r)
272 | fileID := vars["file_id"]
273 |
274 | uploadOffset := r.Header.Get(UploadOffsetHeader)
275 | offset, err := strconv.ParseInt(uploadOffset, 10, 64)
276 | if err != nil {
277 | log.Debug().Err(err).
278 | Str("upload_offset", uploadOffset).
279 | Msg("Invalid Upload-Offset header: not a number")
280 | writeError(w, http.StatusBadRequest, errors.New("invalid Upload-Offset header: not a number"))
281 | return
282 | }
283 | if offset < 0 {
284 | log.Debug().Str("upload_offset", uploadOffset).Msg("Invalid Upload-Offset header: negative value")
285 | writeError(w, http.StatusBadRequest, errors.New("invalid Upload-Offset header: negative value"))
286 | return
287 | }
288 |
289 | contentType := r.Header.Get(ContentTypeHeader)
290 | if contentType != "application/offset+octet-stream" {
291 | log.Debug().Str("content_type", contentType).Msg("Invalid Content-Type")
292 | writeError(w, http.StatusUnsupportedMediaType, errors.New("invalid Content-Type header: expected application/offset+octet-stream"))
293 | return
294 | }
295 |
296 | fm, ok := c.store.Find(fileID)
297 | if !ok {
298 | log.Debug().Str("file_id", fileID).Msg("file not found")
299 | writeError(w, http.StatusNotFound, errors.New("file not found"))
300 | return
301 | }
302 |
303 | if c.extensions.Enabled(ExpirationExtension) && fm.ExpiresAt.Before(time.Now()) {
304 | log.Debug().Str("file_id", fileID).Msg("file expired")
305 | writeError(w, http.StatusGone, errors.New("file expired"))
306 | return
307 | }
308 |
309 | log.Debug().Int64("offset_request", offset).
310 | Int64("uploaded_size", fm.UploadedSize).
311 | Msg("Check size")
312 |
313 | if offset != fm.UploadedSize {
314 | log.Warn().Msg("upload-Offset header does not match the current offset")
315 | writeError(w, http.StatusConflict, errors.New("upload-Offset header does not match the current offset"))
316 | return
317 | }
318 |
319 | objName := fmt.Sprintf("%s-%d", fileID, offset)
320 | obj := c.bucket.Object(objName)
321 | objW := obj.NewWriter(r.Context())
322 |
323 | // objW.CRC32C = crc32.Checksum(data, crc32.MakeTable(crc32.Castagnoli))
324 | // objW.SendCRC32C = true
325 | defer objW.Close()
326 |
327 | n, err := io.Copy(objW, r.Body)
328 | if err != nil {
329 |
330 | fm.UploadedSize += n
331 | c.store.Save(fm.ID, fm)
332 |
333 | log.Info().
334 | Int64("written_size", n).
335 | Msg("partial message is written")
336 |
337 | var netErr net.Error
338 | if errors.As(err, &netErr) && netErr.Timeout() {
339 | log.Warn().Err(err).Msg("network timeout while writing file")
340 | writeError(w, http.StatusRequestTimeout, fmt.Errorf("network timeout: %w", err))
341 | return
342 | }
343 |
344 | log.Error().Err(err).Msg("error writing the file")
345 | writeError(w, http.StatusInternalServerError, fmt.Errorf("error writing the file: %w", err))
346 | return
347 | }
348 |
349 | fm.UploadedSize += n
350 | c.store.Save(fm.ID, fm)
351 |
352 | objPath := fmt.Sprintf("gs://%s/%s", c.bucket.BucketName(), objName)
353 |
354 | log.Debug().
355 | Int64("written_size", n).
356 | Str("stored_file", objPath).
357 | Msg("File Uploaded")
358 |
359 | log.Debug().Msg("prepare the response header")
360 | w.Header().Add(UploadOffsetHeader, fmt.Sprint(fm.UploadedSize))
361 | if !fm.ExpiresAt.IsZero() {
362 | w.Header().Add(UploadExpiresHeader, uploadExpiresAt(fm.ExpiresAt))
363 | }
364 | w.WriteHeader(http.StatusNoContent)
365 | }
366 | }
367 |
368 | func (c *Controller) CreateUpload() http.HandlerFunc {
369 | return func(w http.ResponseWriter, r *http.Request) {
370 | uploadDeferLength := r.Header.Get(UploadDeferLengthHeader)
371 | if uploadDeferLength != "" && uploadDeferLength != "1" {
372 | w.WriteHeader(http.StatusBadRequest)
373 | w.Write([]byte("Invalid Upload-Defer-Length header"))
374 | return
375 | }
376 |
377 | isDeferLength := uploadDeferLength == "1"
378 | if isDeferLength {
379 | w.WriteHeader(http.StatusNotImplemented)
380 | w.Write([]byte("Upload-Defer-Length is not implemented"))
381 | return
382 | }
383 |
384 | // TODO doesn't this upload length optional?
385 | totalLength := r.Header.Get(UploadLengthHeader)
386 | totalSize, err := strconv.ParseUint(totalLength, 10, 64)
387 | if err != nil {
388 | w.WriteHeader(http.StatusBadRequest)
389 | w.Write([]byte("Invalid Upload-Length header"))
390 | return
391 | }
392 |
393 | if c.maxSize > 0 && totalSize > c.maxSize {
394 | w.WriteHeader(http.StatusRequestEntityTooLarge)
395 | w.Write([]byte("Upload-Length exceeds the maximum size"))
396 | }
397 |
398 | uploadMetadata := r.Header.Get(UploadMetadataHeader)
399 | log.Debug().Str("upload_metadata", uploadMetadata).Msg("Check request header")
400 |
401 | fm := FileMetadata{
402 | ID: uuid.New().String(),
403 | TotalSize: totalSize,
404 | Metadata: uploadMetadata,
405 | ExpiresAt: time.Now().Add(UploadMaxDuration),
406 | }
407 | c.store.Save(fm.ID, fm)
408 |
409 | w.Header().Add("Location", fmt.Sprintf("/files/%s", fm.ID))
410 | if !fm.ExpiresAt.IsZero() {
411 | w.Header().Add(UploadExpiresHeader, uploadExpiresAt(fm.ExpiresAt))
412 | }
413 | w.WriteHeader(http.StatusCreated)
414 | w.Write([]byte("CreateUpload"))
415 | }
416 | }
417 |
418 | func uploadExpiresAt(t time.Time) string {
419 | return t.Format("Mon, 02 Jan 2006 15:04:05 GMT")
420 | }
421 |
422 | type cError struct {
423 | Message string `json:"message"`
424 | }
425 |
426 | func writeError(w http.ResponseWriter, code int, err error) {
427 | w.WriteHeader(code)
428 |
429 | b, _ := json.Marshal(cError{Message: err.Error()})
430 | w.Header().Set("Content-Type", "application/json")
431 | w.Write(b)
432 | }
433 |
--------------------------------------------------------------------------------
/api/v4/api_test.go:
--------------------------------------------------------------------------------
1 | package v3_test
2 |
3 | import (
4 | "bytes"
5 | "net/http"
6 | "net/http/httptest"
7 | "testing"
8 | "time"
9 |
10 | "github.com/gorilla/mux"
11 | . "github.com/imrenagi/go-http-upload/api/v3"
12 | "github.com/stretchr/testify/assert"
13 | )
14 |
15 | func newFakeStore(m map[string]FileMetadata) *fakeStore {
16 | return &fakeStore{
17 | files: m,
18 | }
19 | }
20 |
21 | type fakeStore struct {
22 | files map[string]FileMetadata
23 | }
24 |
25 | func (s *fakeStore) Find(id string) (FileMetadata, bool) {
26 | metadata, exists := s.files[id]
27 | return metadata, exists
28 | }
29 |
30 | func (s *fakeStore) Save(id string, metadata FileMetadata) {
31 | s.files[id] = metadata
32 | }
33 |
34 | func TestGetOffset(t *testing.T) {
35 | t.Run("The Server MUST always include the Upload-Offset header in the response for a HEAD request. The Server SHOULD acknowledge successful HEAD requests with a 200 OK or 204 No Content status.",
36 | func(t *testing.T) {
37 | m := map[string]FileMetadata{
38 | "a": {
39 | ID: "a",
40 | UploadedSize: 0,
41 | },
42 | }
43 | ctrl := NewController(newFakeStore(m))
44 |
45 | req := httptest.NewRequest(http.MethodHead, "/api/v1/files/a", nil)
46 | w := httptest.NewRecorder()
47 |
48 | router := mux.NewRouter()
49 | router.HandleFunc("/api/v1/files/{file_id}", ctrl.GetOffset())
50 | router.ServeHTTP(w, req)
51 |
52 | assert.Contains(t, []int{http.StatusOK, http.StatusNoContent}, w.Code, "Expected status code %v, got %v", http.StatusOK, w.Code)
53 | assert.Equal(t, "0", w.Header().Get(UploadOffsetHeader), "Expected Upload-Offset header to be 0, got %v", w.Header().Get(UploadOffsetHeader))
54 |
55 | //The Server MUST prevent the client and/or proxies from caching the response by adding the Cache-Control: no-store header to the response.
56 | assert.Equal(t, "no-store", w.Header().Get("Cache-Control"), "Expected Cache-Control header to be no-store, got %v", w.Header().Get("Cache-Control"))
57 | })
58 |
59 | t.Run("If the size of the upload is known, the Server MUST include the Upload-Length header in the response.", func(t *testing.T) {
60 | m := map[string]FileMetadata{
61 | "a": {
62 | ID: "a",
63 | UploadedSize: 19,
64 | TotalSize: 100,
65 | },
66 | }
67 | ctrl := NewController(newFakeStore(m))
68 |
69 | req := httptest.NewRequest(http.MethodHead, "/api/v1/files/a", nil)
70 | w := httptest.NewRecorder()
71 |
72 | router := mux.NewRouter()
73 | router.HandleFunc("/api/v1/files/{file_id}", ctrl.GetOffset())
74 | router.ServeHTTP(w, req)
75 |
76 | assert.Equal(t, "100", w.Header().Get(UploadLengthHeader))
77 | assert.Equal(t, "19", w.Header().Get(UploadOffsetHeader))
78 | })
79 |
80 | t.Run("If the resource is not found, the Server SHOULD return either the 404 Not Found status without the Upload-Offset header.", func(t *testing.T) {
81 | m := map[string]FileMetadata{}
82 | ctrl := NewController(newFakeStore(m))
83 |
84 | req := httptest.NewRequest(http.MethodHead, "/api/v1/files/a", nil)
85 | w := httptest.NewRecorder()
86 |
87 | router := mux.NewRouter()
88 | router.HandleFunc("/api/v1/files/{file_id}", ctrl.GetOffset())
89 | router.ServeHTTP(w, req)
90 |
91 | assert.Equal(t, http.StatusNotFound, w.Code)
92 | assert.Empty(t, w.Header().Get(UploadOffsetHeader))
93 | })
94 |
95 | }
96 |
97 | func TestTusResumableHeader(t *testing.T) {
98 | t.Run("Return 400 if The Tus-Resumable header is not included in HEAD request", func(t *testing.T) {
99 | m := map[string]FileMetadata{}
100 | ctrl := NewController(newFakeStore(m))
101 |
102 | req := httptest.NewRequest(http.MethodHead, "/api/v1/files/a", nil)
103 | w := httptest.NewRecorder()
104 |
105 | router := mux.NewRouter()
106 | router.Use(TusResumableHeaderCheck)
107 | router.HandleFunc("/api/v1/files/{file_id}", ctrl.GetOffset())
108 | router.ServeHTTP(w, req)
109 |
110 | assert.Equal(t, http.StatusBadRequest, w.Code)
111 | // the Server MUST NOT process the request.
112 | assert.Empty(t, w.Header().Get(UploadOffsetHeader))
113 | assert.Empty(t, w.Header().Get(UploadLengthHeader))
114 | })
115 |
116 | t.Run("Return 412 if The Tus-Resumable header is not supported by the server. server must not process the request", func(t *testing.T) {
117 | m := map[string]FileMetadata{}
118 | ctrl := NewController(newFakeStore(m))
119 |
120 | req := httptest.NewRequest(http.MethodHead, "/api/v1/files/a", nil)
121 | req.Header.Set(TusResumableHeader, "1.0.1")
122 | w := httptest.NewRecorder()
123 |
124 | router := mux.NewRouter()
125 | router.Use(TusResumableHeaderCheck)
126 | router.HandleFunc("/api/v1/files/{file_id}", ctrl.GetOffset())
127 | router.ServeHTTP(w, req)
128 |
129 | assert.Equal(t, http.StatusPreconditionFailed, w.Code)
130 | // the Server MUST NOT process the request.
131 | assert.Empty(t, w.Header().Get(UploadOffsetHeader))
132 | assert.Empty(t, w.Header().Get(UploadLengthHeader))
133 | })
134 |
135 | t.Run("Multipe value of The Tus-Resumable header can be supported by the server", func(t *testing.T) {
136 | m := map[string]FileMetadata{
137 | "a": {
138 | ID: "a",
139 | UploadedSize: 19,
140 | TotalSize: 100,
141 | },
142 | }
143 | ctrl := NewController(newFakeStore(m))
144 | router := mux.NewRouter()
145 | router.Use(TusResumableHeaderCheck)
146 | router.HandleFunc("/api/v1/files/{file_id}", ctrl.GetOffset())
147 |
148 | req := httptest.NewRequest(http.MethodHead, "/api/v1/files/a", nil)
149 | req.Header.Set(TusResumableHeader, "0.2.0")
150 | w := httptest.NewRecorder()
151 | router.ServeHTTP(w, req)
152 | assert.Contains(t, []int{http.StatusOK, http.StatusNoContent}, w.Code, "Expected status code %v, got %v", http.StatusOK, w.Code)
153 |
154 | req = httptest.NewRequest(http.MethodHead, "/api/v1/files/a", nil)
155 | req.Header.Set(TusResumableHeader, "1.0.0")
156 | w = httptest.NewRecorder()
157 | router.ServeHTTP(w, req)
158 | assert.Contains(t, []int{http.StatusOK, http.StatusNoContent}, w.Code, "Expected status code %v, got %v", http.StatusOK, w.Code)
159 | })
160 |
161 | t.Run("The Tus-Resumable header MUST be included in every response in HEAD requests. ", func(t *testing.T) {
162 | m := map[string]FileMetadata{
163 | "a": {
164 | ID: "a",
165 | UploadedSize: 19,
166 | TotalSize: 100,
167 | },
168 | }
169 | ctrl := NewController(newFakeStore(m))
170 | router := mux.NewRouter()
171 | router.Use(TusResumableHeaderInjections)
172 | router.HandleFunc("/api/v1/files/{file_id}", ctrl.GetOffset())
173 |
174 | req := httptest.NewRequest(http.MethodHead, "/api/v1/files/a", nil)
175 | w := httptest.NewRecorder()
176 | router.ServeHTTP(w, req)
177 | assert.Equal(t, "1.0.0", w.Header().Get(TusResumableHeader))
178 | assert.Contains(t, []int{http.StatusOK, http.StatusNoContent}, w.Code, "Expected status code %v, got %v", http.StatusOK, w.Code)
179 | })
180 | }
181 |
182 | func TestGetConfig(t *testing.T) {
183 | t.Run("A successful response indicated by the 204 No Content or 200 OK status MUST contain the Tus-Version header", func(t *testing.T) {
184 | m := map[string]FileMetadata{}
185 | ctrl := NewController(newFakeStore(m))
186 |
187 | req := httptest.NewRequest(http.MethodOptions, "/api/v1/files", nil)
188 | w := httptest.NewRecorder()
189 |
190 | router := mux.NewRouter()
191 | router.HandleFunc("/api/v1/files", ctrl.GetConfig())
192 | router.ServeHTTP(w, req)
193 |
194 | assert.Contains(t, []int{http.StatusOK, http.StatusNoContent}, w.Code, "Expected status code %v, got %v", http.StatusOK, w.Code)
195 | assert.Equal(t, "0.2.0,1.0.0", w.Header().Get(TusVersionHeader))
196 | assert.Empty(t, w.Header().Get(TusResumableHeader))
197 | })
198 |
199 | t.Run("It MAY include the Tus-Extension and Tus-Max-Size headers.", func(t *testing.T) {
200 | m := map[string]FileMetadata{}
201 | ctrl := NewController(newFakeStore(m),
202 | WithExtensions(Extensions{CreationExtension,
203 | ExpirationExtension,
204 | ChecksumExtension}),
205 | WithMaxSize(1073741824))
206 |
207 | req := httptest.NewRequest(http.MethodOptions, "/api/v1/files", nil)
208 | w := httptest.NewRecorder()
209 |
210 | router := mux.NewRouter()
211 | router.HandleFunc("/api/v1/files", ctrl.GetConfig())
212 | router.ServeHTTP(w, req)
213 |
214 | assert.Equal(t, "creation,expiration,checksum", w.Header().Get(TusExtensionHeader))
215 | assert.Equal(t, "1073741824", w.Header().Get(TusMaxSizeHeader))
216 | assert.Equal(t, "sha1,md5", w.Header().Get(TusChecksumAlgorithmHeader))
217 | })
218 |
219 | t.Run("The extension header must be omitted if the server does not support any extensions", func(t *testing.T) {
220 | m := map[string]FileMetadata{}
221 | ctrl := NewController(newFakeStore(m),
222 | WithExtensions(Extensions{}),
223 | )
224 |
225 | req := httptest.NewRequest(http.MethodOptions, "/api/v1/files", nil)
226 | w := httptest.NewRecorder()
227 |
228 | router := mux.NewRouter()
229 | router.HandleFunc("/api/v1/files", ctrl.GetConfig())
230 | router.ServeHTTP(w, req)
231 |
232 | assert.Empty(t, w.Header().Get(TusExtensionHeader))
233 | assert.Empty(t, w.Header().Get(TusMaxSizeHeader))
234 | assert.Empty(t, w.Header().Get(TusChecksumAlgorithmHeader))
235 |
236 | })
237 | }
238 |
239 | func TestResumeUpload(t *testing.T) {
240 |
241 | t.Run("Upload-Offset must be included in the request", func(t *testing.T) {
242 | m := map[string]FileMetadata{
243 | "a": {
244 | ID: "a",
245 | UploadedSize: 0,
246 | TotalSize: 10,
247 | },
248 | }
249 | ctrl := NewController(newFakeStore(m), WithExtensions(Extensions{}))
250 |
251 | req := httptest.NewRequest(http.MethodPatch, "/api/v1/files/a", nil)
252 | w := httptest.NewRecorder()
253 |
254 | router := mux.NewRouter()
255 | router.HandleFunc("/api/v1/files/{file_id}", ctrl.ResumeUpload()).Methods(http.MethodPatch)
256 | router.ServeHTTP(w, req)
257 |
258 | assert.Equal(t, http.StatusBadRequest, w.Code)
259 | assert.Equal(t, `{"message":"invalid Upload-Offset header: not a number"}`, w.Body.String())
260 | })
261 |
262 | t.Run("Upload-Offset must be included in the request with value gte 0", func(t *testing.T) {
263 | m := map[string]FileMetadata{
264 | "a": {
265 | ID: "a",
266 | UploadedSize: 0,
267 | TotalSize: 10,
268 | },
269 | }
270 | ctrl := NewController(newFakeStore(m), WithExtensions(Extensions{}))
271 |
272 | req := httptest.NewRequest(http.MethodPatch, "/api/v1/files/a", nil)
273 | req.Header.Set("Upload-Offset", "-1")
274 | w := httptest.NewRecorder()
275 |
276 | router := mux.NewRouter()
277 | router.HandleFunc("/api/v1/files/{file_id}", ctrl.ResumeUpload()).Methods(http.MethodPatch)
278 | router.ServeHTTP(w, req)
279 |
280 | assert.Equal(t, http.StatusBadRequest, w.Code)
281 | assert.Equal(t, `{"message":"invalid Upload-Offset header: negative value"}`, w.Body.String())
282 | })
283 |
284 | t.Run("When PATCH requests doesnt use Content-Type: application/offset+octet-stream, server SHOULD return a 415 Unsupported Media Type status", func(t *testing.T) {
285 | m := map[string]FileMetadata{
286 | "a": {
287 | ID: "a",
288 | UploadedSize: 0,
289 | TotalSize: 10,
290 | },
291 | }
292 | ctrl := NewController(newFakeStore(m), WithExtensions(Extensions{}))
293 |
294 | req := httptest.NewRequest(http.MethodPatch, "/api/v1/files/a", nil)
295 | req.Header.Set("Content-Type", "application/json")
296 | req.Header.Set("Upload-Offset", "0")
297 | w := httptest.NewRecorder()
298 |
299 | router := mux.NewRouter()
300 | router.HandleFunc("/api/v1/files/{file_id}", ctrl.ResumeUpload()).Methods(http.MethodPatch)
301 | router.ServeHTTP(w, req)
302 |
303 | assert.Equal(t, http.StatusUnsupportedMediaType, w.Code)
304 | assert.Equal(t, `{"message":"invalid Content-Type header: expected application/offset+octet-stream"}`, w.Body.String())
305 | })
306 |
307 | t.Run("If the server receives a PATCH request against a non-existent resource it SHOULD return a 404 Not Found status.", func(t *testing.T) {
308 | m := map[string]FileMetadata{}
309 | ctrl := NewController(newFakeStore(m), WithExtensions(Extensions{}))
310 |
311 | req := httptest.NewRequest(http.MethodPatch, "/api/v1/files/a", nil)
312 | req.Header.Set("Content-Type", "application/offset+octet-stream")
313 | req.Header.Set("Upload-Offset", "0")
314 | w := httptest.NewRecorder()
315 |
316 | router := mux.NewRouter()
317 | router.HandleFunc("/api/v1/files/{file_id}", ctrl.ResumeUpload()).Methods(http.MethodPatch)
318 | router.ServeHTTP(w, req)
319 |
320 | assert.Equal(t, http.StatusNotFound, w.Code)
321 | assert.Equal(t, `{"message":"file not found"}`, w.Body.String())
322 | })
323 |
324 | t.Run(" If the offsets do not match, the Server MUST respond with the 409 Conflict status without modifying the upload resource.", func(t *testing.T) {
325 | m := map[string]FileMetadata{
326 | "a": {
327 | ID: "a",
328 | UploadedSize: 0,
329 | TotalSize: 10,
330 | },
331 | }
332 | ctrl := NewController(newFakeStore(m), WithExtensions(Extensions{}))
333 |
334 | req := httptest.NewRequest(http.MethodPatch, "/api/v1/files/a", nil)
335 | req.Header.Set("Content-Type", "application/offset+octet-stream")
336 | req.Header.Set("Upload-Offset", "10")
337 | w := httptest.NewRecorder()
338 |
339 | router := mux.NewRouter()
340 | router.HandleFunc("/api/v1/files/{file_id}", ctrl.ResumeUpload()).Methods(http.MethodPatch)
341 | router.ServeHTTP(w, req)
342 |
343 | assert.Equal(t, http.StatusConflict, w.Code)
344 | assert.Equal(t, `{"message":"upload-Offset header does not match the current offset"}`, w.Body.String())
345 | })
346 |
347 | t.Run("The Server MUST acknowledge successful PATCH requests with the 204 No Content status. It MUST include the Upload-Offset header containing the new offset", func(t *testing.T) {
348 | m := map[string]FileMetadata{
349 | "a": {
350 | ID: "a",
351 | UploadedSize: 0,
352 | TotalSize: 5,
353 | },
354 | }
355 | ctrl := NewController(newFakeStore(m), WithExtensions(Extensions{}))
356 |
357 | buf := bytes.NewBufferString("ccc")
358 | req := httptest.NewRequest(http.MethodPatch, "/api/v1/files/a", buf)
359 | req.Header.Set("Content-Type", "application/offset+octet-stream")
360 | req.Header.Set("Upload-Offset", "0")
361 | w := httptest.NewRecorder()
362 |
363 | router := mux.NewRouter()
364 | router.HandleFunc("/api/v1/files/{file_id}", ctrl.ResumeUpload()).Methods(http.MethodPatch)
365 | router.ServeHTTP(w, req)
366 |
367 | assert.Equal(t, http.StatusNoContent, w.Code)
368 | assert.Equal(t, "3", w.Header().Get(UploadOffsetHeader))
369 | })
370 | }
371 |
372 | func TestExpiration(t *testing.T) {
373 | t.Run("The expiration header may be included in the HEAD response when the upload is going to expire.", func(t *testing.T) {
374 | m := map[string]FileMetadata{
375 | "a": {
376 | ID: "a",
377 | UploadedSize: 0,
378 | TotalSize: 5,
379 | ExpiresAt: time.Now().Add(1 * time.Hour),
380 | },
381 | }
382 | ctrl := NewController(newFakeStore(m), WithExtensions(Extensions{ExpirationExtension}))
383 |
384 | req := httptest.NewRequest(http.MethodHead, "/api/v1/files/a", nil)
385 | w := httptest.NewRecorder()
386 |
387 | router := mux.NewRouter()
388 | router.HandleFunc("/api/v1/files/{file_id}", ctrl.GetOffset())
389 | router.ServeHTTP(w, req)
390 |
391 | format := "Mon, 02 Jan 2006 15:04:05 GMT"
392 | ts := w.Header().Get(UploadExpiresHeader)
393 | tt, err := time.Parse(format, ts)
394 | assert.NoError(t, err)
395 |
396 | assert.Equal(t, m["a"].ExpiresAt.Format(format), tt.Format(format))
397 | })
398 |
399 | t.Run("the Server SHOULD respond with 410 Gone status if the Server is keeping track of expired uploads", func(t *testing.T) {
400 | m := map[string]FileMetadata{
401 | "a": {
402 | ID: "a",
403 | UploadedSize: 0,
404 | TotalSize: 5,
405 | ExpiresAt: time.Now().Add(-1 * time.Hour),
406 | },
407 | }
408 | ctrl := NewController(newFakeStore(m), WithExtensions(Extensions{ExpirationExtension}))
409 |
410 | req := httptest.NewRequest(http.MethodHead, "/api/v1/files/a", nil)
411 | w := httptest.NewRecorder()
412 |
413 | router := mux.NewRouter()
414 | router.HandleFunc("/api/v1/files/{file_id}", ctrl.GetOffset())
415 | router.ServeHTTP(w, req)
416 |
417 | format := "Mon, 02 Jan 2006 15:04:05 GMT"
418 | ts := w.Header().Get(UploadExpiresHeader)
419 | tt, err := time.Parse(format, ts)
420 | assert.NoError(t, err)
421 | assert.Equal(t, m["a"].ExpiresAt.Format(format), tt.Format(format))
422 | assert.Equal(t, http.StatusGone, w.Code)
423 | })
424 |
425 | t.Run("This header MUST be included in every PATCH response if the upload is going to expire.", func(t *testing.T) {
426 | m := map[string]FileMetadata{
427 | "a": {
428 | ID: "a",
429 | UploadedSize: 0,
430 | TotalSize: 5,
431 | ExpiresAt: time.Now().Add(1 * time.Hour),
432 | },
433 | }
434 | ctrl := NewController(newFakeStore(m), WithExtensions(Extensions{ExpirationExtension}))
435 |
436 | buf := bytes.NewBufferString("ccc")
437 | req := httptest.NewRequest(http.MethodPatch, "/api/v1/files/a", buf)
438 | req.Header.Set("Content-Type", "application/offset+octet-stream")
439 | req.Header.Set("Upload-Offset", "0")
440 | w := httptest.NewRecorder()
441 |
442 | router := mux.NewRouter()
443 | router.HandleFunc("/api/v1/files/{file_id}", ctrl.ResumeUpload()).Methods(http.MethodPatch)
444 | router.ServeHTTP(w, req)
445 |
446 | assert.Equal(t, http.StatusNoContent, w.Code)
447 | assert.Equal(t, "3", w.Header().Get(UploadOffsetHeader))
448 |
449 | format := "Mon, 02 Jan 2006 15:04:05 GMT"
450 | ts := w.Header().Get(UploadExpiresHeader)
451 | tt, err := time.Parse(format, ts)
452 | assert.NoError(t, err)
453 | assert.Equal(t, m["a"].ExpiresAt.Format(format), tt.Format(format))
454 | })
455 |
456 | t.Run("If a Client does attempt to resume an upload which has since been removed by the Server, the Server SHOULD respond with 410 Gone status", func(t *testing.T) {
457 | m := map[string]FileMetadata{
458 | "a": {
459 | ID: "a",
460 | UploadedSize: 0,
461 | TotalSize: 5,
462 | ExpiresAt: time.Now().Add(-1 * time.Hour),
463 | },
464 | }
465 | ctrl := NewController(newFakeStore(m), WithExtensions(Extensions{ExpirationExtension}))
466 |
467 | buf := bytes.NewBufferString("ccc")
468 | req := httptest.NewRequest(http.MethodPatch, "/api/v1/files/a", buf)
469 | req.Header.Set("Content-Type", "application/offset+octet-stream")
470 | req.Header.Set("Upload-Offset", "0")
471 | w := httptest.NewRecorder()
472 |
473 | router := mux.NewRouter()
474 | router.HandleFunc("/api/v1/files/{file_id}", ctrl.ResumeUpload()).Methods(http.MethodPatch)
475 | router.ServeHTTP(w, req)
476 |
477 | assert.Equal(t, http.StatusGone, w.Code)
478 | assert.Empty(t, w.Header().Get(UploadOffsetHeader))
479 | assert.Empty(t, w.Header().Get(UploadExpiresHeader))
480 | assert.Equal(t, `{"message":"file expired"}`, w.Body.String())
481 |
482 | })
483 | }
484 |
485 | func TestChecksum(t *testing.T) {
486 | t.Run("The Upload-Checksum header MUST consist of the name of the used checksum algorithm and the Base64 encoded checksum separated by a space.", func(t *testing.T) {
487 | m := map[string]FileMetadata{
488 | "a": {
489 | ID: "a",
490 | UploadedSize: 0,
491 | TotalSize: 1,
492 | },
493 | }
494 | ctrl := NewController(newFakeStore(m), WithExtensions(Extensions{ChecksumExtension}))
495 |
496 | buf := bytes.NewBufferString("1")
497 | req := httptest.NewRequest(http.MethodPatch, "/api/v1/files/a", buf)
498 | req.Header.Set("Content-Type", "application/offset+octet-stream")
499 | req.Header.Set("Upload-Offset", "0")
500 | req.Header.Set("Upload-Checksum", "md5 c4ca4238a0b923820dcc509a6f75849b")
501 | w := httptest.NewRecorder()
502 |
503 | router := mux.NewRouter()
504 | router.HandleFunc("/api/v1/files/{file_id}", ctrl.ResumeUpload()).Methods(http.MethodPatch)
505 | router.ServeHTTP(w, req)
506 |
507 | assert.Equal(t, http.StatusNoContent, w.Code)
508 | assert.Equal(t, "1", w.Header().Get(UploadOffsetHeader))
509 | })
510 |
511 | t.Run("The Server MUST support at least the SHA1 checksum algorithm identified by sha1", func(t *testing.T) {
512 | m := map[string]FileMetadata{
513 | "a": {
514 | ID: "a",
515 | UploadedSize: 0,
516 | TotalSize: 1,
517 | },
518 | }
519 | ctrl := NewController(newFakeStore(m), WithExtensions(Extensions{ChecksumExtension}))
520 |
521 | buf := bytes.NewBufferString("1")
522 | req := httptest.NewRequest(http.MethodPatch, "/api/v1/files/a", buf)
523 | req.Header.Set("Content-Type", "application/offset+octet-stream")
524 | req.Header.Set("Upload-Offset", "0")
525 | req.Header.Set("Upload-Checksum", "sha1 356a192b7913b04c54574d18c28d46e6395428ab")
526 | w := httptest.NewRecorder()
527 |
528 | router := mux.NewRouter()
529 | router.HandleFunc("/api/v1/files/{file_id}", ctrl.ResumeUpload()).Methods(http.MethodPatch)
530 | router.ServeHTTP(w, req)
531 |
532 | assert.Equal(t, http.StatusNoContent, w.Code)
533 | assert.Equal(t, "1", w.Header().Get(UploadOffsetHeader))
534 | })
535 |
536 | t.Run("Patch must failed when The Upload-Checksum header only has 1 segment", func(t *testing.T) {
537 | m := map[string]FileMetadata{
538 | "a": {
539 | ID: "a",
540 | UploadedSize: 0,
541 | TotalSize: 1,
542 | },
543 | }
544 | ctrl := NewController(newFakeStore(m), WithExtensions(Extensions{ChecksumExtension}))
545 |
546 | buf := bytes.NewBufferString("1")
547 | req := httptest.NewRequest(http.MethodPatch, "/api/v1/files/a", buf)
548 | req.Header.Set("Content-Type", "application/offset+octet-stream")
549 | req.Header.Set("Upload-Offset", "0")
550 | req.Header.Set("Upload-Checksum", "c4ca4238a0b923820dcc509a6f75849b")
551 | w := httptest.NewRecorder()
552 |
553 | router := mux.NewRouter()
554 | router.HandleFunc("/api/v1/files/{file_id}", ctrl.ResumeUpload()).Methods(http.MethodPatch)
555 | router.ServeHTTP(w, req)
556 |
557 | assert.Equal(t, http.StatusBadRequest, w.Code)
558 | assert.Equal(t, `{"message":"invalid checksum format"}`, w.Body.String())
559 | })
560 |
561 | t.Run("Patch must failed when The Upload-Checksum header use unsupported hash algorithm", func(t *testing.T) {
562 | m := map[string]FileMetadata{
563 | "a": {
564 | ID: "a",
565 | UploadedSize: 0,
566 | TotalSize: 1,
567 | },
568 | }
569 | ctrl := NewController(newFakeStore(m), WithExtensions(Extensions{ChecksumExtension}))
570 |
571 | buf := bytes.NewBufferString("1")
572 | req := httptest.NewRequest(http.MethodPatch, "/api/v1/files/a", buf)
573 | req.Header.Set("Content-Type", "application/offset+octet-stream")
574 | req.Header.Set("Upload-Offset", "0")
575 | req.Header.Set("Upload-Checksum", "sha256 c4ca4238a0b923820dcc509a6f75849b")
576 | w := httptest.NewRecorder()
577 |
578 | router := mux.NewRouter()
579 | router.HandleFunc("/api/v1/files/{file_id}", ctrl.ResumeUpload()).Methods(http.MethodPatch)
580 | router.ServeHTTP(w, req)
581 |
582 | assert.Equal(t, http.StatusBadRequest, w.Code)
583 | assert.Equal(t, `{"message":"unsupported checksum algorithm"}`, w.Body.String())
584 | })
585 |
586 | t.Run("Patch must failed when The checksum value not matched", func(t *testing.T) {
587 | m := map[string]FileMetadata{
588 | "a": {
589 | ID: "a",
590 | UploadedSize: 0,
591 | TotalSize: 1,
592 | },
593 | }
594 | ctrl := NewController(newFakeStore(m), WithExtensions(Extensions{ChecksumExtension}))
595 |
596 | buf := bytes.NewBufferString("1")
597 | req := httptest.NewRequest(http.MethodPatch, "/api/v1/files/a", buf)
598 | req.Header.Set("Content-Type", "application/offset+octet-stream")
599 | req.Header.Set("Upload-Offset", "0")
600 | req.Header.Set("Upload-Checksum", "md5 c4ca4238a0b923820dcc509a6f758495")
601 | w := httptest.NewRecorder()
602 |
603 | router := mux.NewRouter()
604 | router.HandleFunc("/api/v1/files/{file_id}", ctrl.ResumeUpload()).Methods(http.MethodPatch)
605 | router.ServeHTTP(w, req)
606 |
607 | assert.Equal(t, 460, w.Code)
608 | assert.Equal(t, `{"message":"checksum mismatch"}`, w.Body.String())
609 | })
610 | }
611 |
--------------------------------------------------------------------------------
/api/v4/file.go:
--------------------------------------------------------------------------------
1 | package v3
2 |
3 | import (
4 | "time"
5 | )
6 |
7 | type FileMetadata struct {
8 | ID string
9 | TotalSize uint64
10 | UploadedSize int64
11 | Metadata string
12 | ExpiresAt time.Time
13 | Path string
14 | }
15 |
--------------------------------------------------------------------------------
/api/v4/notes.md:
--------------------------------------------------------------------------------
1 | do the resumable upload but to gcs
--------------------------------------------------------------------------------
/api/v4/store.go:
--------------------------------------------------------------------------------
1 | package v3
2 |
3 | import "sync"
4 |
5 |
6 |
7 | type Store struct {
8 | sync.RWMutex
9 | files map[string]FileMetadata
10 | }
11 |
12 | func NewStore() *Store {
13 | return &Store{
14 | files: make(map[string]FileMetadata),
15 | }
16 | }
17 |
18 | func (s *Store) Find(id string) (FileMetadata, bool) {
19 | s.RLock()
20 | defer s.RUnlock()
21 | metadata, exists := s.files[id]
22 | return metadata, exists
23 | }
24 |
25 | func (s *Store) Save(id string, metadata FileMetadata) {
26 | s.Lock()
27 | defer s.Unlock()
28 | s.files[id] = metadata
29 | }
30 |
31 |
--------------------------------------------------------------------------------
/cmd/composer/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 |
6 | "cloud.google.com/go/storage"
7 | "github.com/rs/zerolog/log"
8 | )
9 |
10 | func main() {
11 | ctx := context.Background()
12 | client, err := storage.NewClient(ctx)
13 | if err != nil {
14 | log.Fatal().Err(err).Msg("failed to create storage client")
15 | }
16 | defer client.Close()
17 |
18 | bucket := "imrenagi-upload-test"
19 |
20 | src1 := client.Bucket(bucket).Object("abdba280-4dc3-40df-a9dc-2dbc0fb47f75-0")
21 | src2 := client.Bucket(bucket).Object("abdba280-4dc3-40df-a9dc-2dbc0fb47f75-1")
22 | src3 := client.Bucket(bucket).Object("abdba280-4dc3-40df-a9dc-2dbc0fb47f75-2")
23 | src4 := client.Bucket(bucket).Object("abdba280-4dc3-40df-a9dc-2dbc0fb47f75-3")
24 | dst := client.Bucket(bucket).Object("abdba280-4dc3-40df-a9dc-2dbc0fb47f75")
25 |
26 | // ComposerFrom takes varargs, so you can put as many objects here
27 | // as you want.
28 | _, err = dst.ComposerFrom(src1, src2, src3, src4).Run(ctx)
29 | if err != nil {
30 | log.Fatal().Err(err).Msg("failed to compose objects")
31 | }
32 |
33 | src1.Delete(ctx)
34 | src2.Delete(ctx)
35 | src3.Delete(ctx)
36 | src4.Delete(ctx)
37 | }
38 |
--------------------------------------------------------------------------------
/cmd/resumable-client-chucked/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "io"
7 | "net/http"
8 | "os"
9 | "strconv"
10 | "strings"
11 | "time"
12 |
13 | "github.com/rs/zerolog"
14 | "github.com/rs/zerolog/log"
15 | )
16 |
17 | func main() {
18 | const chunkSize int64 = 32 * 1024 * 1024 // 64MB chunks
19 |
20 | stdOut := zerolog.ConsoleWriter{Out: os.Stdout}
21 | writers := []io.Writer{stdOut}
22 | zerolog.TimeFieldFormat = time.RFC3339Nano
23 | multi := zerolog.MultiLevelWriter(writers...)
24 | log.Logger = zerolog.New(multi).With().Timestamp().Logger()
25 |
26 | f, err := os.Open("testfile")
27 | if err != nil {
28 | log.Fatal().Err(err).Msg("Error opening file")
29 | }
30 |
31 | fi, err := f.Stat()
32 | if err != nil {
33 | f.Close()
34 | log.Fatal().Err(err).Msg("Error getting file info")
35 | }
36 | fileSize := fi.Size()
37 | log.Debug().Int64("size", fileSize).Msg("File size in bytes")
38 |
39 | req, err := http.NewRequest("POST", "http://localhost:8080/api/v3/files", nil)
40 | if err != nil {
41 | f.Close()
42 | log.Fatal().Err(err).Msg("Error creating request")
43 | }
44 | req.Header.Set("Content-Type", "application/octet-stream")
45 | req.Header.Set("Upload-Length", fmt.Sprint(fileSize))
46 | req.Header.Set("Tus-Resumable", "1.0.0")
47 |
48 | f.Close()
49 |
50 | httpClient := &http.Client{
51 | Transport: &http.Transport{
52 | DisableKeepAlives: true,
53 | },
54 | }
55 | resp, err := httpClient.Do(req)
56 | if err != nil {
57 |
58 | log.Fatal().Err(err).Msg("Error sending request")
59 | }
60 | defer resp.Body.Close()
61 | d, err := io.ReadAll(resp.Body)
62 | if err != nil {
63 | log.Fatal().Err(err).Msg("Error reading response")
64 | }
65 | log.Debug().Msg(string(d))
66 | log.Debug().Str("location", resp.Header.Get("Location")).
67 | Int("status", resp.StatusCode).
68 | Msg("Check file creation response")
69 |
70 | location := resp.Header.Get("Location")
71 | id := location[strings.LastIndex(location, "/")+1:]
72 | log.Debug().Str("id", id).Msg("Extracted file ID")
73 |
74 | for {
75 | req, err := http.NewRequest("HEAD", "http://localhost:8080/api/v3/files/"+id, nil)
76 | if err != nil {
77 | log.Fatal().Err(err).Msg("Error creating request")
78 | }
79 | req.Header.Set("Tus-Resumable", "1.0.0")
80 |
81 | resp, err := httpClient.Do(req)
82 | if err != nil {
83 | log.Fatal().Err(err).Msg("Error sending request")
84 | }
85 |
86 | d, err := io.ReadAll(resp.Body)
87 | if err != nil {
88 | log.Fatal().Err(err).Msg("Error reading response")
89 | }
90 | resp.Body.Close()
91 |
92 | log.Debug().Msg(string(d))
93 | uploadOffset := resp.Header.Get("Upload-Offset")
94 | offset, err := strconv.ParseInt(uploadOffset, 10, 64)
95 | if err != nil {
96 | log.Fatal().Err(err).Msg("Error parsing upload offset")
97 | }
98 | log.Debug().Str("Upload-Offset", uploadOffset).Msg("Check file upload offset ---")
99 |
100 | if offset >= fileSize {
101 | log.Debug().
102 | Str("Upload-Offset", uploadOffset).
103 | Str("fileSize", fmt.Sprint(fileSize)).
104 | Msg("File upload complete")
105 | break
106 | }
107 |
108 | f, err := os.Open("testfile")
109 | if err != nil {
110 | log.Fatal().Err(err).Msg("Error opening file")
111 | }
112 |
113 | start, err := f.Seek(offset, io.SeekStart)
114 | if err != nil {
115 | log.Fatal().Err(err).Msg("Error seeking to offset")
116 | }
117 | log.Debug().Int64("start", start).Msg("Seek to offset")
118 |
119 | // Create a limited reader for the chunk
120 | remainingBytes := fileSize - offset
121 | currentChunkSize := chunkSize
122 | if remainingBytes < chunkSize {
123 | currentChunkSize = remainingBytes
124 | }
125 | chunkReader := io.LimitReader(f, currentChunkSize)
126 |
127 | ctx := context.Background()
128 | req, err = http.NewRequestWithContext(ctx, "PATCH", "http://localhost:8080/api/v3/files/"+id, chunkReader)
129 | if err != nil {
130 | log.Fatal().Err(err).Msg("Error creating request")
131 | }
132 | req.Header.Set("Content-Type", "application/offset+octet-stream")
133 | req.Header.Set("Tus-Resumable", "1.0.0")
134 | req.Header.Set("Upload-Offset", fmt.Sprint(offset))
135 |
136 | log.Debug().
137 | Int64("chunk_size", currentChunkSize).
138 | Int64("offset", offset).
139 | Msg("Sending file chunk")
140 |
141 | resp, err = httpClient.Do(req)
142 | if err != nil {
143 | log.Warn().Err(err).Msg("Error sending request")
144 | f.Close()
145 | continue
146 | }
147 | if resp == nil {
148 | log.Debug().Msg("patch response is nil")
149 | f.Close()
150 | continue
151 | }
152 |
153 | d, err = io.ReadAll(resp.Body)
154 | if err != nil {
155 | log.Warn().Err(err).Msg("Error reading response")
156 | }
157 | resp.Body.Close()
158 | f.Close()
159 |
160 | log.Debug().Msg(string(d))
161 | log.Debug().Int("status", resp.StatusCode).
162 | Str("Upload-Offset", resp.Header.Get("Upload-Offset")).
163 | Msg("Check file upload response")
164 | }
165 | }
166 |
--------------------------------------------------------------------------------
/cmd/resumable-client-chucked/main.py:
--------------------------------------------------------------------------------
1 | import os
2 | import requests
3 | import logging
4 | from urllib.parse import urlparse
5 | import time
6 |
7 | # Configure logging
8 | logging.basicConfig(
9 | level=logging.DEBUG,
10 | format='%(asctime)s - %(levelname)s - %(message)s'
11 | )
12 | logger = logging.getLogger(__name__)
13 |
14 | CHUNK_SIZE = 32 * 1024 * 1024 # 32MB chunks
15 |
16 | def main():
17 | # Open and get file info
18 | try:
19 | file_path = "testfile"
20 | file_size = os.path.getsize(file_path)
21 | logger.debug(f"File size in bytes: {file_size}")
22 | except Exception as e:
23 | logger.fatal(f"Error accessing file: {e}")
24 | return
25 |
26 | # Create upload
27 | headers = {
28 | "Content-Type": "application/octet-stream",
29 | "Upload-Length": str(file_size),
30 | "Tus-Resumable": "1.0.0"
31 | }
32 |
33 | try:
34 | response = requests.post(
35 | "http://localhost:8080/api/v3/files",
36 | headers=headers
37 | )
38 | response.raise_for_status()
39 |
40 | location = response.headers.get("Location")
41 | file_id = location.split("/")[-1]
42 | logger.debug(f"Extracted file ID: {file_id}")
43 | except Exception as e:
44 | logger.fatal(f"Error creating upload: {e}")
45 | return
46 |
47 | while True:
48 | try:
49 | # Get current offset
50 | head_response = requests.head(
51 | f"http://localhost:8080/api/v3/files/{file_id}",
52 | headers={"Tus-Resumable": "1.0.0"}
53 | )
54 | head_response.raise_for_status()
55 |
56 | offset = int(head_response.headers.get("Upload-Offset", "0"))
57 | logger.debug(f"Current upload offset: {offset}")
58 |
59 | if offset >= file_size:
60 | logger.debug("File upload complete")
61 | break
62 |
63 | # Calculate chunk size for this iteration
64 | remaining_bytes = file_size - offset
65 | current_chunk_size = min(CHUNK_SIZE, remaining_bytes)
66 |
67 | # Open file and seek to offset
68 | with open(file_path, "rb") as f:
69 | f.seek(offset)
70 | chunk = f.read(current_chunk_size)
71 |
72 | logger.debug(f"Sending chunk: size={len(chunk)}, offset={offset}")
73 |
74 | # Upload chunk
75 | headers = {
76 | "Content-Type": "application/offset+octet-stream",
77 | "Tus-Resumable": "1.0.0",
78 | "Upload-Offset": str(offset)
79 | }
80 |
81 | patch_response = requests.patch(
82 | f"http://localhost:8080/api/v3/files/{file_id}",
83 | headers=headers,
84 | data=chunk
85 | )
86 | patch_response.raise_for_status()
87 |
88 | logger.debug(
89 | f"Upload response: status={patch_response.status_code}, "
90 | f"new_offset={patch_response.headers.get('Upload-Offset')}"
91 | )
92 |
93 | except Exception as e:
94 | logger.warning(f"Error during upload: {e}")
95 | time.sleep(1) # Wait before retry
96 | continue
97 |
98 | if __name__ == "__main__":
99 | main()
100 |
--------------------------------------------------------------------------------
/cmd/resumable-client/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "io"
7 | "net/http"
8 | "os"
9 | "strconv"
10 | "strings"
11 | "time"
12 |
13 | "github.com/rs/zerolog"
14 | "github.com/rs/zerolog/log"
15 | )
16 |
17 | func main() {
18 |
19 | stdOut := zerolog.ConsoleWriter{Out: os.Stdout}
20 | writers := []io.Writer{stdOut}
21 | zerolog.TimeFieldFormat = time.RFC3339Nano
22 | multi := zerolog.MultiLevelWriter(writers...)
23 | log.Logger = zerolog.New(multi).With().Timestamp().Logger()
24 |
25 | f, err := os.Open("testfile")
26 | if err != nil {
27 | log.Fatal().Err(err).Msg("Error opening file")
28 | }
29 | defer f.Close()
30 |
31 | fi, err := f.Stat()
32 | if err != nil {
33 | log.Fatal().Err(err).Msg("Error getting file info")
34 | }
35 | fileSize := fi.Size()
36 | log.Debug().Int64("size", fileSize).Msg("File size in bytes")
37 |
38 | req, err := http.NewRequest("POST", "http://localhost:8080/api/v3/files", nil)
39 | if err != nil {
40 | log.Fatal().Err(err).Msg("Error creating request")
41 | }
42 | req.Header.Set("Content-Type", "application/octet-stream")
43 | req.Header.Set("Upload-Length", fmt.Sprint(fileSize))
44 | req.Header.Set("Tus-Resumable", "1.0.0")
45 |
46 | httpClient := &http.Client{
47 | Transport: &http.Transport{
48 | DisableKeepAlives: true,
49 | },
50 | }
51 | resp, err := httpClient.Do(req)
52 | if err != nil {
53 | log.Fatal().Err(err).Msg("Error sending request")
54 | }
55 | defer resp.Body.Close()
56 | d, err := io.ReadAll(resp.Body)
57 | if err != nil {
58 | log.Fatal().Err(err).Msg("Error reading response")
59 | }
60 | log.Debug().Msg(string(d))
61 | log.Debug().Str("location", resp.Header.Get("Location")).
62 | Int("status", resp.StatusCode).
63 | Msg("Check file creation response")
64 |
65 | location := resp.Header.Get("Location")
66 | id := location[strings.LastIndex(location, "/")+1:]
67 | log.Debug().Str("id", id).Msg("Extracted file ID")
68 |
69 | for {
70 |
71 | req, err := http.NewRequest("HEAD", "http://localhost:8080/api/v3/files/"+id, nil)
72 | if err != nil {
73 | log.Fatal().Err(err).Msg("Error creating request")
74 | }
75 | req.Header.Set("Tus-Resumable", "1.0.0")
76 |
77 | resp, err := httpClient.Do(req)
78 | if err != nil {
79 | log.Fatal().Err(err).Msg("Error sending request")
80 | }
81 |
82 | d, err := io.ReadAll(resp.Body)
83 | if err != nil {
84 | log.Fatal().Err(err).Msg("Error reading response")
85 | }
86 | resp.Body.Close()
87 |
88 | log.Debug().Msg(string(d))
89 |
90 | uploadOffset := resp.Header.Get("Upload-Offset")
91 | offset, err := strconv.ParseInt(uploadOffset, 10, 64)
92 | if err != nil {
93 | log.Fatal().Err(err).Msg("Error parsing upload offset")
94 | }
95 | log.Debug().Str("Upload-Offset", uploadOffset).Msg("Check file upload offset ---")
96 |
97 | if offset >= fileSize {
98 | log.Debug().
99 | Str("Upload-Offset", uploadOffset).
100 | Str("fileSize", fmt.Sprint(fileSize)).
101 | Msg("File upload complete")
102 | break
103 | }
104 |
105 | f, err := os.Open("testfile")
106 | if err != nil {
107 | log.Fatal().Err(err).Msg("Error opening file")
108 | }
109 | defer f.Close()
110 |
111 | start, err := f.Seek(offset, io.SeekStart)
112 | if err != nil {
113 | log.Fatal().Err(err).Msg("Error seeking to offset")
114 | }
115 | log.Debug().Int64("start", start).Msg("Seek to offset")
116 |
117 | ctx := context.Background()
118 | // ctx, cancel := context.WithTimeout(ctx, 1*time.Second)
119 | // defer cancel()
120 | req, err = http.NewRequestWithContext(ctx, "PATCH", "http://localhost:8080/api/v3/files/"+id, f)
121 | if err != nil {
122 | log.Fatal().Err(err).Msg("Error creating request")
123 | }
124 | req.Header.Set("Content-Type", "application/offset+octet-stream")
125 | req.Header.Set("Tus-Resumable", "1.0.0")
126 | req.Header.Set("Upload-Offset", fmt.Sprint(offset))
127 |
128 | log.Debug().Msg("Sending file data")
129 |
130 | resp, err = httpClient.Do(req)
131 | if err != nil {
132 | log.Warn().Err(err).Msg("Error sending request")
133 | }
134 | if resp == nil {
135 | log.Debug().Msg("patch response is nil")
136 | continue
137 | }
138 |
139 | d, err = io.ReadAll(resp.Body)
140 | if err != nil {
141 | log.Warn().Err(err).Msg("Error reading response")
142 | }
143 | resp.Body.Close()
144 |
145 | log.Debug().Msg(string(d))
146 | log.Debug().Int("status", resp.StatusCode).
147 | Str("Upload-Offset", resp.Header.Get("Upload-Offset")).
148 | Msg("Check file upload response")
149 |
150 | }
151 |
152 | }
153 |
--------------------------------------------------------------------------------
/cmd/server/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 |
6 | "github.com/imrenagi/go-http-upload/server"
7 | "github.com/rs/zerolog/log"
8 | )
9 |
10 | func main() {
11 | ctx := context.Background()
12 | // Initialize the logger
13 | _ = server.InitializeLogger("debug")
14 |
15 | server := server.New(server.Opts{})
16 | if err := server.Run(ctx); err != nil {
17 | log.Fatal().Err(err).Msg("failed to run the server")
18 | }
19 | }
20 |
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3.9'
2 | services:
3 | grafana:
4 | image: grafana/grafana:latest
5 | ports:
6 | - 3000:3000
7 | volumes:
8 | - ./scripts/grafana/provisioning:/etc/grafana/provisioning
9 | - ./scripts/grafana/dashboards:/etc/grafana/demo-dashboards
10 | - grafana_data:/var/lib/grafana
11 |
12 | prometheus:
13 | image: prom/prometheus:latest
14 | volumes:
15 | - ./scripts/prometheus:/etc/prometheus
16 | - prometheus_data:/prometheus
17 | ports:
18 | - "9090:9090"
19 | command:
20 | - '--config.file=/etc/prometheus/prometheus.yml'
21 | - '--storage.tsdb.retention.time=5m'
22 | - '--storage.tsdb.retention.size=10GB'
23 | extra_hosts:
24 | - "host.docker.internal:host-gateway"
25 |
26 | node_exporter:
27 | image: quay.io/prometheus/node-exporter:latest
28 | command:
29 | - '--path.rootfs=/host'
30 | restart: unless-stopped
31 | volumes:
32 | - '/:/host:ro,rslave'
33 | ports:
34 | - 9100:9100
35 |
36 | volumes:
37 | grafana_data:
38 | postgres:
39 | redis_data:
40 | prometheus_data:
41 |
--------------------------------------------------------------------------------
/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/imrenagi/go-http-upload
2 |
3 | go 1.23.0
4 |
5 | require (
6 | cel.dev/expr v0.16.2 // indirect
7 | cloud.google.com/go v0.116.0 // indirect
8 | cloud.google.com/go/auth v0.13.0 // indirect
9 | cloud.google.com/go/auth/oauth2adapt v0.2.6 // indirect
10 | cloud.google.com/go/compute/metadata v0.6.0 // indirect
11 | cloud.google.com/go/iam v1.2.2 // indirect
12 | cloud.google.com/go/monitoring v1.21.2 // indirect
13 | cloud.google.com/go/storage v1.49.0 // indirect
14 | github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0 // indirect
15 | github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 // indirect
16 | github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1 // indirect
17 | github.com/beorn7/perks v1.0.1 // indirect
18 | github.com/cenkalti/backoff/v4 v4.3.0 // indirect
19 | github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect
20 | github.com/cespare/xxhash/v2 v2.3.0 // indirect
21 | github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 // indirect
22 | github.com/davecgh/go-spew v1.1.1 // indirect
23 | github.com/envoyproxy/go-control-plane v0.13.1 // indirect
24 | github.com/envoyproxy/protoc-gen-validate v1.1.0 // indirect
25 | github.com/felixge/httpsnoop v1.0.4 // indirect
26 | github.com/go-logr/logr v1.4.2 // indirect
27 | github.com/go-logr/stdr v1.2.2 // indirect
28 | github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
29 | github.com/google/s2a-go v0.1.8 // indirect
30 | github.com/google/uuid v1.6.0 // indirect
31 | github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect
32 | github.com/googleapis/gax-go/v2 v2.14.0 // indirect
33 | github.com/gorilla/mux v1.8.1 // indirect
34 | github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 // indirect
35 | github.com/klauspost/compress v1.17.9 // indirect
36 | github.com/mattn/go-colorable v0.1.13 // indirect
37 | github.com/mattn/go-isatty v0.0.19 // indirect
38 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
39 | github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
40 | github.com/pmezard/go-difflib v1.0.0 // indirect
41 | github.com/prometheus/client_golang v1.20.5 // indirect
42 | github.com/prometheus/client_model v0.6.1 // indirect
43 | github.com/prometheus/common v0.61.0 // indirect
44 | github.com/prometheus/procfs v0.15.1 // indirect
45 | github.com/rs/zerolog v1.33.0 // indirect
46 | github.com/stretchr/testify v1.10.0 // indirect
47 | go.opencensus.io v0.24.0 // indirect
48 | go.opentelemetry.io/auto/sdk v1.1.0 // indirect
49 | go.opentelemetry.io/contrib/detectors/gcp v1.31.0 // indirect
50 | go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect
51 | go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect
52 | go.opentelemetry.io/otel v1.33.0 // indirect
53 | go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 // indirect
54 | go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 // indirect
55 | go.opentelemetry.io/otel/exporters/prometheus v0.55.0 // indirect
56 | go.opentelemetry.io/otel/metric v1.33.0 // indirect
57 | go.opentelemetry.io/otel/sdk v1.33.0 // indirect
58 | go.opentelemetry.io/otel/sdk/metric v1.33.0 // indirect
59 | go.opentelemetry.io/otel/trace v1.33.0 // indirect
60 | go.opentelemetry.io/proto/otlp v1.4.0 // indirect
61 | golang.org/x/crypto v0.31.0 // indirect
62 | golang.org/x/net v0.33.0 // indirect
63 | golang.org/x/oauth2 v0.24.0 // indirect
64 | golang.org/x/sync v0.10.0 // indirect
65 | golang.org/x/sys v0.28.0 // indirect
66 | golang.org/x/text v0.21.0 // indirect
67 | golang.org/x/time v0.8.0 // indirect
68 | google.golang.org/api v0.214.0 // indirect
69 | google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 // indirect
70 | google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 // indirect
71 | google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 // indirect
72 | google.golang.org/grpc v1.69.2 // indirect
73 | google.golang.org/protobuf v1.35.2 // indirect
74 | gopkg.in/yaml.v3 v3.0.1 // indirect
75 | )
76 |
--------------------------------------------------------------------------------
/go.sum:
--------------------------------------------------------------------------------
1 | cel.dev/expr v0.16.2 h1:RwRhoH17VhAu9U5CMvMhH1PDVgf0tuz9FT+24AfMLfU=
2 | cel.dev/expr v0.16.2/go.mod h1:gXngZQMkWJoSbE8mOzehJlXQyubn/Vg0vR9/F3W7iw8=
3 | cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
4 | cloud.google.com/go v0.116.0 h1:B3fRrSDkLRt5qSHWe40ERJvhvnQwdZiHu0bJOpldweE=
5 | cloud.google.com/go v0.116.0/go.mod h1:cEPSRWPzZEswwdr9BxE6ChEn01dWlTaF05LiC2Xs70U=
6 | cloud.google.com/go/auth v0.13.0 h1:8Fu8TZy167JkW8Tj3q7dIkr2v4cndv41ouecJx0PAHs=
7 | cloud.google.com/go/auth v0.13.0/go.mod h1:COOjD9gwfKNKz+IIduatIhYJQIc0mG3H102r/EMxX6Q=
8 | cloud.google.com/go/auth/oauth2adapt v0.2.6 h1:V6a6XDu2lTwPZWOawrAa9HUK+DB2zfJyTuciBG5hFkU=
9 | cloud.google.com/go/auth/oauth2adapt v0.2.6/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8=
10 | cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I=
11 | cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg=
12 | cloud.google.com/go/iam v1.2.2 h1:ozUSofHUGf/F4tCNy/mu9tHLTaxZFLOUiKzjcgWHGIA=
13 | cloud.google.com/go/iam v1.2.2/go.mod h1:0Ys8ccaZHdI1dEUilwzqng/6ps2YB6vRsjIe00/+6JY=
14 | cloud.google.com/go/monitoring v1.21.2 h1:FChwVtClH19E7pJ+e0xUhJPGksctZNVOk2UhMmblmdU=
15 | cloud.google.com/go/monitoring v1.21.2/go.mod h1:hS3pXvaG8KgWTSz+dAdyzPrGUYmi2Q+WFX8g2hqVEZU=
16 | cloud.google.com/go/storage v1.49.0 h1:zenOPBOWHCnojRd9aJZAyQXBYqkJkdQS42dxL55CIMw=
17 | cloud.google.com/go/storage v1.49.0/go.mod h1:k1eHhhpLvrPjVGfo0mOUPEJ4Y2+a/Hv5PiwehZI9qGU=
18 | github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
19 | github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0 h1:3c8yed4lgqTt+oTQ+JNMDo+F4xprBf+O/il4ZC0nRLw=
20 | github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0/go.mod h1:obipzmGjfSjam60XLwGfqUkJsfiheAl+TUjG+4yzyPM=
21 | github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 h1:UQ0AhxogsIRZDkElkblfnwjc3IaltCm2HUMvezQaL7s=
22 | github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1/go.mod h1:jyqM3eLpJ3IbIFDTKVz2rF9T/xWGW0rIriGwnz8l9Tk=
23 | github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1 h1:8nn+rsCvTq9axyEh382S0PFLBeaFwNsT43IrPWzctRU=
24 | github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1/go.mod h1:viRWSEhtMZqz1rhwmOVKkWl6SwmVowfL9O2YR5gI2PE=
25 | github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
26 | github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
27 | github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
28 | github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
29 | github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
30 | github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g=
31 | github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw=
32 | github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
33 | github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
34 | github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
35 | github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
36 | github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 h1:QVw89YDxXxEe+l8gU8ETbOasdwEV+avkR75ZzsVV9WI=
37 | github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
38 | github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
39 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
40 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
41 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
42 | github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
43 | github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
44 | github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
45 | github.com/envoyproxy/go-control-plane v0.13.1 h1:vPfJZCkob6yTMEgS+0TwfTUfbHjfy/6vOJ8hUWX/uXE=
46 | github.com/envoyproxy/go-control-plane v0.13.1/go.mod h1:X45hY0mufo6Fd0KW3rqsGvQMw58jvjymeCzBU3mWyHw=
47 | github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
48 | github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM=
49 | github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4=
50 | github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
51 | github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
52 | github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
53 | github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
54 | github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
55 | github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
56 | github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
57 | github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
58 | github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
59 | github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
60 | github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
61 | github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
62 | github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
63 | github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
64 | github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
65 | github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
66 | github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
67 | github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
68 | github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
69 | github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
70 | github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
71 | github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
72 | github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
73 | github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
74 | github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
75 | github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
76 | github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
77 | github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
78 | github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM=
79 | github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA=
80 | github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
81 | github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
82 | github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
83 | github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw=
84 | github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA=
85 | github.com/googleapis/gax-go/v2 v2.14.0 h1:f+jMrjBPl+DL9nI4IQzLUxMq7XrAqFYB7hBPqMNIe8o=
86 | github.com/googleapis/gax-go/v2 v2.14.0/go.mod h1:lhBCnjdLrWRaPvLWhmc8IS24m9mr07qSYnHncrgo+zk=
87 | github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
88 | github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
89 | github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 h1:TmHmbvxPmaegwhDubVz0lICL0J5Ka2vwTzhoePEXsGE=
90 | github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0/go.mod h1:qztMSjm835F2bXf+5HKAPIS5qsmQDqZna/PgVt4rWtI=
91 | github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
92 | github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
93 | github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
94 | github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
95 | github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
96 | github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
97 | github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
98 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
99 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
100 | github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
101 | github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo=
102 | github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8=
103 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
104 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
105 | github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
106 | github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
107 | github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
108 | github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
109 | github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
110 | github.com/prometheus/common v0.61.0 h1:3gv/GThfX0cV2lpO7gkTUwZru38mxevy90Bj8YFSRQQ=
111 | github.com/prometheus/common v0.61.0/go.mod h1:zr29OCN/2BsJRaFwG8QOBr41D6kkchKbpeNH7pAjb/s=
112 | github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
113 | github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
114 | github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
115 | github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8=
116 | github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss=
117 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
118 | github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
119 | github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
120 | github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
121 | github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
122 | github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
123 | github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
124 | github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
125 | go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
126 | go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
127 | go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
128 | go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
129 | go.opentelemetry.io/contrib/detectors/gcp v1.31.0 h1:G1JQOreVrfhRkner+l4mrGxmfqYCAuy76asTDAo0xsA=
130 | go.opentelemetry.io/contrib/detectors/gcp v1.31.0/go.mod h1:tzQL6E1l+iV44YFTkcAeNQqzXUiekSYP9jjJjXwEd00=
131 | go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc=
132 | go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI=
133 | go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU=
134 | go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q=
135 | go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw=
136 | go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I=
137 | go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA=
138 | go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI=
139 | go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 h1:5pojmb1U1AogINhN3SurB+zm/nIcusopeBNp42f45QM=
140 | go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0/go.mod h1:57gTHJSE5S1tqg+EKsLPlTWhpHMsWlVmer+LA926XiA=
141 | go.opentelemetry.io/otel/exporters/prometheus v0.55.0 h1:sSPw658Lk2NWAv74lkD3B/RSDb+xRFx46GjkrL3VUZo=
142 | go.opentelemetry.io/otel/exporters/prometheus v0.55.0/go.mod h1:nC00vyCmQixoeaxF6KNyP42II/RHa9UdruK02qBmHvI=
143 | go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ=
144 | go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M=
145 | go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM=
146 | go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM=
147 | go.opentelemetry.io/otel/sdk/metric v1.33.0 h1:Gs5VK9/WUJhNXZgn8MR6ITatvAmKeIuCtNbsP3JkNqU=
148 | go.opentelemetry.io/otel/sdk/metric v1.33.0/go.mod h1:dL5ykHZmm1B1nVRk9dDjChwDmt81MjVp3gLkQRwKf/Q=
149 | go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s=
150 | go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck=
151 | go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg=
152 | go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY=
153 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
154 | golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
155 | golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
156 | golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
157 | golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
158 | golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
159 | golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
160 | golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
161 | golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
162 | golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
163 | golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
164 | golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
165 | golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
166 | golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
167 | golang.org/x/net v0.32.0 h1:ZqPmj8Kzc+Y6e0+skZsuACbx+wzMgo5MQsJh9Qd6aYI=
168 | golang.org/x/net v0.32.0/go.mod h1:CwU0IoeOlnQQWJ6ioyFrfRuomB8GKF6KbYXZVyeXNfs=
169 | golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
170 | golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
171 | golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
172 | golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE=
173 | golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
174 | golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
175 | golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
176 | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
177 | golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
178 | golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
179 | golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
180 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
181 | golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
182 | golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
183 | golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
184 | golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
185 | golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
186 | golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
187 | golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
188 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
189 | golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
190 | golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
191 | golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
192 | golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg=
193 | golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
194 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
195 | golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
196 | golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
197 | golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
198 | golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
199 | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
200 | google.golang.org/api v0.214.0 h1:h2Gkq07OYi6kusGOaT/9rnNljuXmqPnaig7WGPmKbwA=
201 | google.golang.org/api v0.214.0/go.mod h1:bYPpLG8AyeMWwDU6NXoB00xC0DFkikVvd5MfwoxjLqE=
202 | google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
203 | google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
204 | google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
205 | google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
206 | google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
207 | google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 h1:ToEetK57OidYuqD4Q5w+vfEnPvPpuTwedCNVohYJfNk=
208 | google.golang.org/genproto v0.0.0-20241118233622-e639e219e697/go.mod h1:JJrvXBWRZaFMxBufik1a4RpFw4HhgVtBBWQeQgUj2cc=
209 | google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q=
210 | google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08=
211 | google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 h1:8ZmaLZE4XWrtU3MyClkYqqtl6Oegr3235h7jxsDyqCY=
212 | google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU=
213 | google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
214 | google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
215 | google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
216 | google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
217 | google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
218 | google.golang.org/grpc v1.68.1 h1:oI5oTa11+ng8r8XMMN7jAOmWfPZWbYpCFaMUTACxkM0=
219 | google.golang.org/grpc v1.68.1/go.mod h1:+q1XYFJjShcqn0QZHvCyeR4CXPA+llXIeUIfIe00waw=
220 | google.golang.org/grpc v1.69.2 h1:U3S9QEtbXC0bYNvRtcoklF3xGtLViumSYxWykJS+7AU=
221 | google.golang.org/grpc v1.69.2/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
222 | google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
223 | google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
224 | google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
225 | google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
226 | google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
227 | google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
228 | google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
229 | google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
230 | google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
231 | google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io=
232 | google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
233 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
234 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
235 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
236 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
237 | honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
238 | honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
239 |
--------------------------------------------------------------------------------
/next.md:
--------------------------------------------------------------------------------
1 | * change writer to gcs
2 | * concatenation protocol
3 | * combining files
--------------------------------------------------------------------------------
/notes.md:
--------------------------------------------------------------------------------
1 | # Notes
2 |
3 |
4 | 1. This code block is used to read the request body and write it to a file. The code reads the request body in chunks of 32KB and writes it to the file. The code also logs the number of bytes read and written. The code uses a buffer of 32KB to read the request body. The code reads the request body in a loop until the end of the request body is reached. The code writes the data to the file in chunks of 32KB. The code logs the number of bytes written to the file. The code also logs any errors that occur during reading or writing the request body.
5 | ```go
6 | var n int64
7 | buf := make([]byte, 32*1024) // 32KB buffer
8 | for {
9 | log.Debug().Msg("read the request body")
10 | nr, er := r.Body.Read(buf)
11 | log.Debug().Int("read_size", nr).Msg("read the request body")
12 | if nr > 0 {
13 | nw, ew := f.Write(buf[0:nr])
14 | if nw < 0 || nr < nw {
15 | nw = 0
16 | if ew == nil {
17 | ew = errors.New("invalid write result")
18 | }
19 | }
20 | n += int64(nw)
21 | if ew != nil {
22 | log.Error().Err(ew).Msg("error writing the file")
23 | err = ew
24 | break
25 | }
26 | if nr != nw {
27 | log.Error().Err(io.ErrShortWrite).Msg("error writing the file")
28 | err = io.ErrShortWrite
29 | break
30 | }
31 | }
32 |
33 | if er != nil {
34 | log.Error().Err(er).Msg("error reading the request body")
35 | if er != io.EOF {
36 | err = er
37 | }
38 | break
39 | }
40 | log.Debug().Int64("written_size", n).Msg("write the data to the file xx")
41 | }
42 | ```
43 |
44 |
45 | ```go
46 | func (c *Controller) ResumeUpload() http.HandlerFunc {
47 | return func(w http.ResponseWriter, r *http.Request) {
48 | // ... existing code ...
49 |
50 | f, err := os.OpenFile(fm.FilePath(), os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644)
51 | if err != nil {
52 | log.Error().Err(err).Msg("error opening the file")
53 | writeError(w, http.StatusBadRequest, errors.New("error opening the file"))
54 | return
55 | }
56 | defer f.Close()
57 |
58 | // Store the current position before writing
59 | originalPos, err := f.Seek(0, io.SeekCurrent)
60 | if err != nil {
61 | log.Error().Err(err).Msg("error getting file position")
62 | writeError(w, http.StatusInternalServerError, errors.New("error preparing file"))
63 | return
64 | }
65 |
66 | var n int64
67 | if c.extensions.Enabled(ChecksumExtension) && checksum.Algorithm != "" {
68 | var hash hash.Hash
69 | switch checksum.Algorithm {
70 | case "md5":
71 | hash = md5.New()
72 | case "sha1":
73 | hash = sha1.New()
74 | default:
75 | writeError(w, http.StatusBadRequest, errors.New("unsupported checksum algorithm"))
76 | return
77 | }
78 |
79 | reader := io.TeeReader(r.Body, hash)
80 | n, err = io.Copy(f, reader)
81 | if err != nil {
82 | // Revert to original position on error
83 | f.Seek(originalPos, io.SeekStart)
84 | log.Error().Err(err).Msg("error writing file")
85 | writeError(w, http.StatusInternalServerError, errors.New("error writing file"))
86 | return
87 | }
88 |
89 | calculatedHash := hex.EncodeToString(hash.Sum(nil))
90 | if calculatedHash != checksum.Value {
91 | // Revert to original position if checksum fails
92 | f.Seek(originalPos, io.SeekStart)
93 | f.Truncate(originalPos) // Ensure file is truncated to original size
94 | log.Debug().Msg("Checksum mismatch")
95 | writeError(w, 460, errors.New("checksum mismatch"))
96 | return
97 | }
98 | } else {
99 | n, err = io.Copy(f, r.Body)
100 | if err != nil {
101 | // Revert to original position on error
102 | f.Seek(originalPos, io.SeekStart)
103 | f.Truncate(originalPos)
104 | log.Error().Err(err).Msg("error writing file")
105 | writeError(w, http.StatusInternalServerError, errors.New("error writing file"))
106 | return
107 | }
108 | }
109 |
110 | // If we get here, everything succeeded
111 | fm.UploadedSize += n
112 | c.store.Save(fm.ID, fm)
113 |
114 | // ... rest of response handling ...
115 | }
116 | }
117 | ```
118 |
119 | ```go
120 |
121 | func main() {
122 | f, err := os.OpenFile("test.txt", os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
123 | if err != nil {
124 | log.Fatal().Err(err).Msg("failed to open file")
125 | }
126 | defer f.Close()
127 |
128 | originalPos, err := f.Seek(0, io.SeekEnd)
129 | log.Info().Int64("originalPos", originalPos).Msg("getting the original post")
130 | if err != nil {
131 | log.Fatal().Err(err).Msg("failed to seek")
132 | }
133 | n, err := f.WriteString("test")
134 | if err != nil {
135 | log.Fatal().Err(err).Msg("failed to write")
136 | }
137 |
138 | cur, _ := f.Seek(0, io.SeekCurrent)
139 | log.Info().Int64("cur", cur).Msg("getting the position after write")
140 |
141 | x := int64(-1 * n)
142 | num, err := f.Seek(x, io.SeekEnd)
143 | log.Info().Int64("num", num).Msg("getting the start position for truncate")
144 | if err != nil {
145 | log.Fatal().Err(err).Msg("failed to seek")
146 | }
147 | err = f.Truncate(num)
148 | if err != nil {
149 | log.Fatal().Err(err).Msg("failed to write")
150 | }
151 | }
152 | ```
153 |
154 | # Testing
155 |
156 | 1. Upload a 100MB file with no timeout on the server and client side.
157 |
158 | Server is able to handle the upload properly.
159 |
160 | 1. Upload a 100MB file with a timeout on the server side. Just big upload to the server
161 |
162 | * Try adding idle timeout to the server 3 s -> upload succeeds
163 | * Try adding read timeout to the server 3 s or 5 s-> server timeout, but some data is already wwritten on the server side and client successfully retries the upload.
164 |
165 | * Try adding write timeout for server 3s -> client get EOF, but server is able to get all the data.
166 |
167 | ```bash
168 | 1:26AM WRN Error sending request error="Patch \"http://localhost:8080/api/v3/files/6470f427-0b21-40c7-8ac5-10f186e0b2b3\": EOF"
169 |
170 |
171 | ```
172 |
173 | Changing the timeout to 10s has no issue on the client. Write timeout is for writing the response back to the client. This timer start since the first time server try to write to the client.
174 |
175 | http://monitoring.imrenagi.com:3080/share/halLhkMdG2uKLr6ExorTz
176 |
177 |
178 |
--------------------------------------------------------------------------------
/scripts/grafana/provisioning/dashboards/demo.yml:
--------------------------------------------------------------------------------
1 | apiVersion: 1
2 | providers:
3 | - allowUiUpdates: true
4 | disableDeletion: false
5 | name: demo
6 | options:
7 | foldersFromFilesStructure: true
8 | path: /etc/grafana/demo-dashboards
9 | updateIntervalSeconds: 10
10 |
--------------------------------------------------------------------------------
/scripts/grafana/provisioning/datasources/demo.yml:
--------------------------------------------------------------------------------
1 | apiVersion: 1
2 | datasources:
3 | - access: proxy
4 | editable: false
5 | isDefault: true
6 | name: Prometheus
7 | type: prometheus
8 | url: http://prometheus:9090
9 | version: 1
10 |
--------------------------------------------------------------------------------
/scripts/locust/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | share/python-wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 | MANIFEST
28 |
29 | # PyInstaller
30 | # Usually these files are written by a python script from a template
31 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
32 | *.manifest
33 | *.spec
34 |
35 | # Installer logs
36 | pip-log.txt
37 | pip-delete-this-directory.txt
38 |
39 | # Unit test / coverage reports
40 | htmlcov/
41 | .tox/
42 | .nox/
43 | .coverage
44 | .coverage.*
45 | .cache
46 | nosetests.xml
47 | coverage.xml
48 | *.cover
49 | *.py,cover
50 | .hypothesis/
51 | .pytest_cache/
52 | cover/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | .pybuilder/
76 | target/
77 |
78 | # Jupyter Notebook
79 | .ipynb_checkpoints
80 |
81 | # IPython
82 | profile_default/
83 | ipython_config.py
84 |
85 | # pyenv
86 | # For a library or package, you might want to ignore these files since the code is
87 | # intended to run in multiple environments; otherwise, check them in:
88 | # .python-version
89 |
90 | # pipenv
91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
94 | # install all needed dependencies.
95 | #Pipfile.lock
96 |
97 | # poetry
98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99 | # This is especially recommended for binary packages to ensure reproducibility, and is more
100 | # commonly ignored for libraries.
101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102 | #poetry.lock
103 |
104 | # pdm
105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106 | #pdm.lock
107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108 | # in version control.
109 | # https://pdm.fming.dev/#use-with-ide
110 | .pdm.toml
111 |
112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
113 | __pypackages__/
114 |
115 | # Celery stuff
116 | celerybeat-schedule
117 | celerybeat.pid
118 |
119 | # SageMath parsed files
120 | *.sage.py
121 |
122 | # Environments
123 | .env
124 | .venv
125 | env/
126 | venv/
127 | ENV/
128 | env.bak/
129 | venv.bak/
130 |
131 | # Spyder project settings
132 | .spyderproject
133 | .spyproject
134 |
135 | # Rope project settings
136 | .ropeproject
137 |
138 | # mkdocs documentation
139 | /site
140 |
141 | # mypy
142 | .mypy_cache/
143 | .dmypy.json
144 | dmypy.json
145 |
146 | # Pyre type checker
147 | .pyre/
148 |
149 | # pytype static type analyzer
150 | .pytype/
151 |
152 | # Cython debug symbols
153 | cython_debug/
154 |
155 | # PyCharm
156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
158 | # and can be added to the global gitignore or merged into this file. For a more nuclear
159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
160 | #.idea/
161 |
162 |
163 | *mb_file
164 |
--------------------------------------------------------------------------------
/scripts/locust/locustfiles/locustfile.py:
--------------------------------------------------------------------------------
1 | from locust import FastHttpUser, task, between
2 | import uuid
3 |
4 | class NaiveUploader(FastHttpUser):
5 | # wait_time = between(0,1)
6 | @task(1)
7 | def upload_files(self):
8 | with open("1mb_file", "rb") as f:
9 | file_id = "file-upload-" + str(uuid.uuid4())
10 | self.client.post("/api/v1/binary", data=f.read(), headers={"X-Api-File-Name": file_id}, name="/api/v1/binary")
11 |
--------------------------------------------------------------------------------
/scripts/locust/requirements.txt:
--------------------------------------------------------------------------------
1 | locust==2.19.0
2 | pandas
3 | matplotlib
4 | numpy
5 | seaborn
6 |
--------------------------------------------------------------------------------
/scripts/prometheus/prometheus.yml:
--------------------------------------------------------------------------------
1 | scrape_configs:
2 | - job_name: grafana
3 | scrape_interval: 3s
4 | static_configs:
5 | - targets: ['grafana:3000']
6 | - job_name: application
7 | scrape_interval: 3s
8 | static_configs:
9 | - targets: ['host.docker.internal:8080']
10 | - job_name: node_exporter
11 | scrape_interval: 3s
12 | static_configs:
13 | - targets: ['node_exporter:9100']
14 |
15 |
--------------------------------------------------------------------------------
/server/log.go:
--------------------------------------------------------------------------------
1 | package server
2 |
3 | import (
4 | "io"
5 | "net/http"
6 | "os"
7 | "time"
8 |
9 | "github.com/google/uuid"
10 | "github.com/rs/zerolog"
11 | "github.com/rs/zerolog/log"
12 | )
13 |
14 | func InitializeLogger(lvl string) func() {
15 | level, err := zerolog.ParseLevel(lvl)
16 | if err != nil {
17 | log.Fatal().Err(err).Msg("unable to parse log level")
18 | }
19 | zerolog.SetGlobalLevel(level)
20 |
21 | // var stdOut io.Writer = os.Stdout
22 | stdOut := zerolog.ConsoleWriter{Out: os.Stdout}
23 |
24 | writers := []io.Writer{stdOut}
25 | zerolog.TimeFieldFormat = time.RFC3339Nano
26 |
27 | multi := zerolog.MultiLevelWriter(writers...)
28 | log.Logger = zerolog.New(multi).With().Timestamp().Logger()
29 |
30 | return func() {}
31 | }
32 |
33 | func LogInterceptor(next http.Handler) http.Handler {
34 | return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
35 |
36 | log := log.With().Str("request_id", uuid.New().String()).Logger()
37 |
38 | log.Debug().
39 | Str("method", r.Method).
40 | Str("path", r.URL.Path).
41 | Str("remote", r.RemoteAddr).
42 | Msg("request started")
43 |
44 | next.ServeHTTP(w, r.WithContext(log.WithContext(r.Context())))
45 | })
46 | }
47 |
--------------------------------------------------------------------------------
/server/opentelemetry.go:
--------------------------------------------------------------------------------
1 | package server
2 |
3 | import (
4 | "context"
5 |
6 | "github.com/rs/zerolog/log"
7 | "go.opentelemetry.io/otel"
8 | "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
9 | "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
10 | "go.opentelemetry.io/otel/exporters/prometheus"
11 | "go.opentelemetry.io/otel/sdk/metric"
12 | "go.opentelemetry.io/otel/sdk/resource"
13 | "go.opentelemetry.io/otel/sdk/trace"
14 | semconv "go.opentelemetry.io/otel/semconv/v1.10.0"
15 | "google.golang.org/grpc"
16 | )
17 |
18 | type ShutdownFn func(context.Context) error
19 |
20 | func InitMeterProvider(ctx context.Context, name string, reader metric.Reader) ShutdownFn {
21 | res := telemetryResource(ctx, name)
22 | meterProvider := metric.NewMeterProvider(
23 | metric.WithResource(res),
24 | metric.WithReader(reader))
25 | otel.SetMeterProvider(meterProvider)
26 | return meterProvider.Shutdown
27 | }
28 |
29 | func InitTraceProvider(ctx context.Context, name string, spanExporter trace.SpanExporter) ShutdownFn {
30 | res := telemetryResource(ctx, name)
31 | bsp := trace.NewBatchSpanProcessor(spanExporter)
32 | tracerProvider := trace.NewTracerProvider(
33 | trace.WithSampler(trace.TraceIDRatioBased(1)),
34 | trace.WithResource(res),
35 | trace.WithSpanProcessor(bsp),
36 | )
37 | otel.SetTracerProvider(tracerProvider)
38 | return tracerProvider.Shutdown
39 | }
40 |
41 | func telemetryResource(ctx context.Context, serviceName string) *resource.Resource {
42 | res, err := resource.New(ctx,
43 | resource.WithFromEnv(),
44 | resource.WithProcess(),
45 | resource.WithTelemetrySDK(),
46 | resource.WithHost(),
47 | resource.WithAttributes(
48 | // the service name used to display traces in backend
49 | semconv.ServiceNameKey.String(serviceName),
50 | ),
51 | )
52 | if err != nil {
53 | log.Fatal().Err(err).Msg("unable to initialize telemetry resource")
54 | }
55 | return res
56 | }
57 |
58 | func NewPrometheusExporter(ctx context.Context) *prometheus.Exporter {
59 | exporter, err := prometheus.New()
60 | if err != nil {
61 | log.Fatal().Err(err).Msg("failed to initialize prometheus exporter")
62 | }
63 | return exporter
64 | }
65 |
66 | func NewOTLPTraceExporter(ctx context.Context, otlpEndpoint string) *otlptrace.Exporter {
67 | traceClient := otlptracegrpc.NewClient(
68 | otlptracegrpc.WithInsecure(),
69 | otlptracegrpc.WithEndpoint(otlpEndpoint),
70 | otlptracegrpc.WithDialOption(grpc.WithBlock()))
71 | traceExp, err := otlptrace.New(ctx, traceClient)
72 | if err != nil {
73 | log.Fatal().Err(err).Msgf("Failed to create the collector trace exporter")
74 | }
75 | return traceExp
76 | }
77 |
--------------------------------------------------------------------------------
/server/server.go:
--------------------------------------------------------------------------------
1 | package server
2 |
3 | import (
4 | "context"
5 | "net/http"
6 | "time"
7 |
8 | "github.com/gorilla/mux"
9 | v1 "github.com/imrenagi/go-http-upload/api/v1"
10 | v3 "github.com/imrenagi/go-http-upload/api/v3"
11 | v4 "github.com/imrenagi/go-http-upload/api/v4"
12 | "github.com/prometheus/client_golang/prometheus/promhttp"
13 | "github.com/rs/zerolog/log"
14 | "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
15 | "go.opentelemetry.io/otel"
16 | )
17 |
18 | var meter = otel.Meter("github.com/imrenagi/go-http-upload/server")
19 |
20 | type Opts struct {
21 | }
22 |
23 | func New(opts Opts) Server {
24 | s := Server{
25 | opts: opts,
26 | }
27 | return s
28 | }
29 |
30 | type Server struct {
31 | opts Opts
32 | }
33 |
34 | // Run runs the gRPC-Gateway, dialing the provided address.
35 | func (s *Server) Run(ctx context.Context) error {
36 | log.Info().Msg("starting server")
37 |
38 | serviceName := "go-http-uploader"
39 |
40 | prometheusExporter := NewPrometheusExporter(ctx)
41 | meterShutdownFn := InitMeterProvider(ctx, serviceName, prometheusExporter)
42 |
43 | httpServer := &http.Server{
44 | Addr: ":8080",
45 | Handler: s.newHTTPHandler(),
46 | // ReadTimeout is the maximum duration for reading the entire request, including the body.
47 | // This prevents slowloris attacks.
48 | // This is useful for handling request from slow client so that it won't hold the connection for too long.
49 | ReadTimeout: 30 * time.Second,
50 | // WriteTimeout is the maximum duration before timing out writes of the response.
51 | // This is useful for handling slow client which read the response slowly.
52 | WriteTimeout: 10 * time.Second,
53 | // ReadHeaderTimeout is necessary here to prevent slowloris attacks.
54 | // https://www.cloudflare.com/learning/ddos/ddos-attack-tools/slowloris/
55 | ReadHeaderTimeout: 5 * time.Second,
56 | // IdleTimeout is the maximum amount of time to wait for the next request when keep-alives are enabled.
57 | IdleTimeout: 5 * time.Second,
58 | }
59 |
60 | go func() {
61 | log.Info().Msgf("Starting http server on :8080")
62 | if err := httpServer.ListenAndServe(); err != nil && err != http.ErrServerClosed {
63 | log.Fatal().Err(err).Msgf("listen:%+s\n", err)
64 | }
65 | }()
66 |
67 | <-ctx.Done()
68 |
69 | gracefulShutdownPeriod := 30 * time.Second
70 | log.Warn().Msg("shutting down http server")
71 | shutdownCtx, cancel := context.WithTimeout(context.Background(), gracefulShutdownPeriod)
72 | defer cancel()
73 | if err := httpServer.Shutdown(shutdownCtx); err != nil {
74 | log.Error().Err(err).Msg("failed to shutdown http server gracefully")
75 | }
76 | log.Warn().Msg("http server gracefully stopped")
77 |
78 | if err := meterShutdownFn(ctx); err != nil {
79 | log.Error().Err(err).Msg("failed to shutdown meter provider")
80 | }
81 | return nil
82 | }
83 |
84 | func (s *Server) newHTTPHandler() http.Handler {
85 | mux := mux.NewRouter()
86 | mux.Use(
87 | otelhttp.NewMiddleware("uploader"),
88 | LogInterceptor)
89 | mux.Handle("/metrics", promhttp.Handler())
90 | apiRouter := mux.PathPrefix("/api").Subrouter()
91 |
92 | apiV1Router := apiRouter.PathPrefix("/v1").Subrouter()
93 | apiV1Router.Handle("/form", otelhttp.WithRouteTag("/api/v1/form", http.HandlerFunc(v1.FormUpload())))
94 | apiV1Router.Handle("/binary", otelhttp.WithRouteTag("/api/v1/binary", http.HandlerFunc(v1.BinaryUpload())))
95 | mux.Handle("/v1", otelhttp.WithRouteTag("/v1", http.HandlerFunc(v1.Web()))).Methods(http.MethodGet)
96 |
97 | v3Controller := v3.NewController(v3.NewStore())
98 | apiV3Router := apiRouter.PathPrefix("/v3").Subrouter()
99 | apiV3Router.Use(v3.TusResumableHeaderCheck, v3.TusResumableHeaderInjections)
100 | apiV3Router.Handle("/files", otelhttp.WithRouteTag("/api/v3/files", http.HandlerFunc(v3Controller.GetConfig()))).Methods(http.MethodOptions)
101 | apiV3Router.Handle("/files", otelhttp.WithRouteTag("/api/v3/files", http.HandlerFunc(v3Controller.CreateUpload()))).Methods(http.MethodPost)
102 | apiV3Router.Handle("/files/{file_id}", otelhttp.WithRouteTag("/api/v3/files/{file_id}", http.HandlerFunc(v3Controller.GetOffset()))).Methods(http.MethodHead)
103 | apiV3Router.Handle("/files/{file_id}", otelhttp.WithRouteTag("/api/v3/files/{file_id}", http.HandlerFunc(v3Controller.ResumeUpload()))).Methods(http.MethodPatch)
104 |
105 | apiV3Router.HandleFunc("/files/{file_id}/upload", v3Controller.CreateUpload()).Methods(http.MethodPost)
106 |
107 | v4Controller := v4.NewController(v4.NewStore())
108 | apiV4Router := apiRouter.PathPrefix("/v4").Subrouter()
109 | apiV4Router.Use(v4.TusResumableHeaderCheck, v4.TusResumableHeaderInjections)
110 | apiV4Router.Handle("/files", otelhttp.WithRouteTag("/api/v4/files", http.HandlerFunc(v4Controller.GetConfig()))).Methods(http.MethodOptions)
111 | apiV4Router.Handle("/files", otelhttp.WithRouteTag("/api/v4/files", http.HandlerFunc(v4Controller.CreateUpload()))).Methods(http.MethodPost)
112 | apiV4Router.Handle("/files/{file_id}", otelhttp.WithRouteTag("/api/v4/files/{file_id}", http.HandlerFunc(v4Controller.GetOffset()))).Methods(http.MethodHead)
113 | apiV4Router.Handle("/files/{file_id}", otelhttp.WithRouteTag("/api/v4/files/{file_id}", http.HandlerFunc(v4Controller.ResumeUpload()))).Methods(http.MethodPatch)
114 |
115 | return otelhttp.NewHandler(mux, "/")
116 | }
117 |
--------------------------------------------------------------------------------