├── Jenkinsfile ├── test.env ├── main.go ├── pipeline.yaml ├── .dockerignore ├── test.Dockerfile ├── .gitignore ├── gofmt.sh ├── docker-compose.yml ├── Dockerfile ├── install_protoc.sh ├── README.md ├── go.mod ├── Makefile ├── CHANGELOG.md ├── docker-compose.debug.yml ├── .golangci.yml ├── .vscode └── launch.json ├── bucket ├── bucket.go └── bucket_test.go ├── internal └── test │ └── helper.go ├── proto └── upload_service.proto ├── server └── server.go ├── object ├── handler.go ├── service.go └── object_test.go ├── LICENSE └── go.sum /Jenkinsfile: -------------------------------------------------------------------------------- 1 | Pipeline() 2 | -------------------------------------------------------------------------------- /test.env: -------------------------------------------------------------------------------- 1 | S3_ACCESS_KEY=F6WUUG27HBUFSIXVZL59 2 | S3_SECRET_KEY=BPlIUU6SX0ZxiCMo3tIpCMAUdnmkN9Eo9K42NsRR 3 | S3_ENDPOINT=http://127.0.0.1:9000 -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import "github.com/meateam/upload-service/server" 4 | 5 | func main() { 6 | server.NewServer(nil).Serve(nil) 7 | } 8 | -------------------------------------------------------------------------------- /pipeline.yaml: -------------------------------------------------------------------------------- 1 | repoName: "meateam/upload-service" 2 | runTests: true 3 | testCommand: "" 4 | deployUponTestSuccess: true 5 | deployCommand: 6 | deployEnvironment: -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | npm-debug.log 3 | Dockerfile* 4 | docker-compose* 5 | .dockerignore 6 | .git 7 | .gitignore 8 | .env 9 | */bin 10 | */obj 11 | README.md 12 | LICENSE 13 | .vscode -------------------------------------------------------------------------------- /test.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:alpine 2 | LABEL description="test" 3 | RUN apk add --no-cache git 4 | ENV GO111MODULE=on 5 | ENV CGO_ENABLED=0 6 | WORKDIR /go/src/app 7 | COPY go.mod go.sum ./ 8 | RUN go mod download 9 | COPY . . 10 | ENTRYPOINT ["go", "test", "-v", "./..."] -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.exe~ 4 | *.dll 5 | *.so 6 | *.dylib 7 | 8 | # Test binary, build with `go test -c` 9 | *.test 10 | 11 | # Output of the go coverage tool, specifically when used with LiteIDE 12 | *.out 13 | upload-service 14 | data 15 | debug 16 | -------------------------------------------------------------------------------- /gofmt.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | gofiles=$(find . -name "*.go") 3 | [ -z "$gofiles" ] && exit 0 4 | 5 | unformatted=$(gofmt -l $gofiles) 6 | [ -z "$unformatted" ] && exit 0 7 | 8 | echo >&2 "Formatting go files..." 9 | for fn in $unformatted; do 10 | echo " gofmt -w $PWD/$fn" 11 | gofmt -w $PWD/$fn 12 | done 13 | 14 | exit 1 -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | upload-service: 4 | image: upload-service 5 | build: . 6 | env_file: 7 | - ../kdrive.env 8 | ports: 9 | - 8080:8080 10 | depends_on: 11 | - minio 12 | 13 | minio: 14 | image: minio/minio 15 | volumes: 16 | - './data:/data' 17 | ports: 18 | - '9000:9000' 19 | environment: 20 | MINIO_ACCESS_KEY: F6WUUG27HBUFSIXVZL59 21 | MINIO_SECRET_KEY: BPlIUU6SX0ZxiCMo3tIpCMAUdnmkN9Eo9K42NsRR 22 | command: server /data 23 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | #build stage 2 | FROM golang:alpine AS builder 3 | ENV GO111MODULE=on 4 | RUN apk add --no-cache git make 5 | RUN GRPC_HEALTH_PROBE_VERSION=v0.3.0 && \ 6 | wget -qO/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-amd64 && \ 7 | chmod +x /bin/grpc_health_probe 8 | WORKDIR /go/src/app 9 | COPY go.mod go.sum ./ 10 | RUN go mod download 11 | COPY . . 12 | RUN make build-app 13 | 14 | #final stage 15 | FROM scratch 16 | COPY --from=builder /go/src/app/upload-service /upload-service 17 | COPY --from=builder /bin/grpc_health_probe /bin/grpc_health_probe 18 | LABEL Name=upload-service Version=0.0.1 19 | EXPOSE 8080 20 | ENTRYPOINT ["/upload-service"] -------------------------------------------------------------------------------- /install_protoc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | TEMP_DIR=/tmp 4 | 5 | VERSION=`curl --silent "https://api.github.com/repos/protocolbuffers/protobuf/releases/latest" | grep -Po '"tag_name": "v\K.*?(?=")'` 6 | 7 | [[ -z VERSION ]] && echo "Could not get version from github" 8 | 9 | mkdir $TEMP_DIR/protoc_inst 10 | 11 | cd $TEMP_DIR/protoc_inst 12 | 13 | # Make sure you grab the latest version 14 | curl -OL "https://github.com/google/protobuf/releases/download/v$VERSION/protoc-$VERSION-linux-x86_64.zip" 15 | 16 | # Unzip 17 | unzip "protoc-$VERSION-linux-x86_64.zip" -d protoc3 18 | 19 | # Move protoc to /usr/local/bin/ 20 | mv protoc3/bin/* /usr/local/bin/ 21 | 22 | # Move protoc3/include to /usr/local/include/ 23 | mv protoc3/include/* /usr/local/include/ 24 | 25 | rm -rf $TEMP_DIR/protoc_inst -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Upload-Service 2 | 3 | [![Go Report Card](https://goreportcard.com/badge/github.com/meateam/upload-service)](https://goreportcard.com/report/github.com/meateam/upload-service) 4 | [![GoDoc](https://godoc.org/github.com/meateam/upload-service?status.svg)](https://godoc.org/github.com/meateam/upload-service) 5 | 6 | S3 file Upload Service 7 | 8 | ## Compile proto 9 | 10 | In order to compile the proto file make sure you have `protobuf` and `protoc-gen-go` 11 | 12 | ### Installing protobuf on Linux 13 | 14 | `./install_protoc.sh` 15 | 16 | ### Installing protoc-gen-go 17 | 18 | `go get -u github.com/golang/protobuf/protoc-gen-go` 19 | 20 | [example guide to gRPC and protobuf](https://grpc.io/docs/quickstart/go.html) 21 | 22 | **Compiling Protobuf To Golang:** 23 | `protoc -I proto/ proto/upload_service.proto --go_out=plugins=grpc:./proto` 24 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/meateam/upload-service 2 | 3 | go 1.13 4 | 5 | require ( 6 | github.com/aws/aws-sdk-go v1.23.21 7 | github.com/golang/protobuf v1.4.2 8 | github.com/google/go-cmp v0.5.0 9 | github.com/grpc-ecosystem/go-grpc-middleware v1.1.0 10 | github.com/meateam/elasticsearch-logger v1.1.3-0.20190901111807-4e8b84fb9fda 11 | github.com/sirupsen/logrus v1.4.2 12 | github.com/spf13/viper v1.4.0 13 | go.elastic.co/apm/module/apmhttp v1.5.0 14 | golang.org/x/net v0.0.0-20190912160710-24e19bdeb0f2 // indirect 15 | google.golang.org/grpc v1.27.0 16 | google.golang.org/protobuf v1.25.0 17 | ) 18 | 19 | replace github.com/meateam/upload-service/bucket => ./bucket 20 | 21 | replace github.com/meateam/upload-service/object => ./object 22 | 23 | replace github.com/meateam/upload-service/internal/test => ./internal/test 24 | 25 | replace github.com/meateam/upload-service/server => ./server 26 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # Basic go commands 2 | PROTOC=protoc 3 | 4 | # Binary names 5 | BINARY_NAME=upload-service 6 | 7 | all: clean deps fmt test build 8 | build: build-proto build-app 9 | test: 10 | docker-compose -f "docker-compose.yml" up -d minio && \ 11 | S3_ACCESS_KEY=F6WUUG27HBUFSIXVZL59 S3_SECRET_KEY=BPlIUU6SX0ZxiCMo3tIpCMAUdnmkN9Eo9K42NsRR S3_ENDPOINT=http://127.0.0.1:9000 go test -v ./... && \ 12 | docker-compose down && sudo rm -rf data 13 | clean: 14 | go clean 15 | sudo rm -rf $(BINARY_NAME) 16 | run: build 17 | ./$(BINARY_NAME) 18 | S3_ACCESS_KEY=F6WUUG27HBUFSIXVZL59 S3_SECRET_KEY=BPlIUU6SX0ZxiCMo3tIpCMAUdnmkN9Eo9K42NsRR S3_ENDPOINT=http://127.0.0.1:9000 ./$(BINARY_NAME) 19 | deps: 20 | go get -u github.com/golang/protobuf/protoc-gen-go 21 | build-app: 22 | CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o $(BINARY_NAME) -v 23 | build-proto: 24 | rm -f proto/*.pb.go 25 | protoc -I proto/ proto/*.proto --go_out=plugins=grpc:./proto 26 | fmt: 27 | ./gofmt.sh -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | All notable changes to this project will be documented in this file. 4 | 5 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), 6 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 7 | 8 | ## [Unreleased] 9 | 10 | ## [v2.0.1] - 2021-02-13 11 | 12 | ### Changed 13 | - ISSUE([99](https://github.com/meateam/drive-project/issues/99)): update pkg, delete unused pkg and update deps 14 | 15 | 16 | ## [v2.0.0] - 2020-10-28 17 | 18 | ### Added 19 | 20 | - FEAT([58](https://github.com/meateam/upload-service/pull/58)): RPC method CopyObject. 21 | 22 | - FEAT([58](https://github.com/meateam/upload-service/pull/58)): RPC method MoveObject. 23 | 24 | 25 | [unreleased]: https://github.com/meateam/upload-service/compare/master...develop 26 | [v2.0.0]: https://github.com/meateam/upload-service/compare/v1.3...v2.0.0 27 | [v2.0.1]: https://github.com/meateam/upload-service/compare/v2.0.0...v2.0.1 28 | 29 | -------------------------------------------------------------------------------- /docker-compose.debug.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | minio: 4 | image: minio/minio 5 | volumes: 6 | - './data:/data' 7 | ports: 8 | - '9000:9000' 9 | environment: 10 | MINIO_ACCESS_KEY: F6WUUG27HBUFSIXVZL59 11 | MINIO_SECRET_KEY: BPlIUU6SX0ZxiCMo3tIpCMAUdnmkN9Eo9K42NsRR 12 | command: server /data 13 | upload-service: 14 | image: upload-service 15 | build: . 16 | environment: 17 | S3_ACCESS_KEY: F6WUUG27HBUFSIXVZL59 18 | S3_SECRET_KEY: BPlIUU6SX0ZxiCMo3tIpCMAUdnmkN9Eo9K42NsRR 19 | S3_ENDPOINT: http://minio:9000 20 | TCP_PORT: 8080 21 | HEALTH_CHECK_INTERVAL: 5 22 | ELASTIC_APM_SERVER_URL: 'http://localhost:8200' 23 | ELASTIC_APM_SERVICE_NAME: 'upload-service' 24 | ELASTIC_APM_SERVICE_VERSION: '0.1' 25 | ELASTIC_APM_ENVIRONMENT: 'development' 26 | ELASTIC_APM_ACTIVE: 'true' 27 | ELASTIC_APM_CAPTURE_BODY: 'all' 28 | ELASTIC_APM_METRICS_INTERVAL: '10s' 29 | ELASTIC_APM_IGNORE_URLS: '/grpc.health.v1.Health/Check' 30 | ports: 31 | - 8080:8080 32 | depends_on: 33 | - minio 34 | -------------------------------------------------------------------------------- /.golangci.yml: -------------------------------------------------------------------------------- 1 | # This file contains all available configuration options 2 | 3 | # all available settings of specific linters 4 | linters-settings: 5 | errcheck: 6 | # report about not checking of errors in type assetions: `a := b.(MyStruct)`; 7 | # default is false: such cases aren't reported by default. 8 | check-type-assertions: true 9 | gocyclo: 10 | # minimal code complexity to report, 30 by default (but we recommend 10-20) 11 | min-complexity: 15 12 | dupl: 13 | # tokens count to trigger issue, 150 by default 14 | threshold: 150 15 | maligned: 16 | # print struct with more effective memory layout or not, false by default 17 | suggest-new: true 18 | misspell: 19 | # Correct spellings using locale preferences for US or UK. 20 | # Default is to use a neutral variety of English. 21 | # Setting locale to US will correct the British spelling of 'colour' to 'color'. 22 | locale: US 23 | lll: 24 | # max line length, lines longer will be reported. Default is 120. 25 | # '\t' is counted as 1 character by default, and can be changed with the tab-width option 26 | line-length: 115 27 | # tab width in spaces. Default to 1. 28 | tab-width: 2 29 | prealloc: 30 | # XXX: we don't recommend using this linter before doing performance profiling. 31 | # For most programs usage of prealloc will be a premature optimization. 32 | 33 | # Report preallocation suggestions only on simple loops that have no returns/breaks/continues/gotos in them. 34 | # True by default. 35 | simple: true 36 | range-loops: false # Report preallocation suggestions on range loops, true by default 37 | for-loops: false # Report preallocation suggestions on for loops, false by default 38 | linters: 39 | enable: 40 | - prealloc 41 | - errcheck 42 | - gocyclo 43 | - dupl 44 | - maligned 45 | - misspell 46 | - lll 47 | - prealloc 48 | enable-all: false 49 | disable-all: false 50 | -------------------------------------------------------------------------------- /.vscode/launch.json: -------------------------------------------------------------------------------- 1 | { 2 | // Use IntelliSense to learn about possible attributes. 3 | // Hover to view descriptions of existing attributes. 4 | // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 5 | "version": "0.2.0", 6 | "configurations": [ 7 | { 8 | "name": "Launch Package", 9 | "type": "go", 10 | "request": "launch", 11 | "mode": "debug", 12 | "program": "${workspaceFolder}", 13 | "env": { 14 | "S3_ACCESS_KEY": "F6WUUG27HBUFSIXVZL59", 15 | "S3_SECRET_KEY": "BPlIUU6SX0ZxiCMo3tIpCMAUdnmkN9Eo9K42NsRR", 16 | "S3_ENDPOINT": "http://127.0.0.1:9000", 17 | "TCP_PORT": "8081", 18 | "HOST_NAME": "upload-service", 19 | "ELASTICSEARCH_URL": "http://localhost:9200", 20 | "LOG_INDEX": "kdrive", 21 | "LOG_LEVEL": "debug", 22 | "ELASTIC_APM_SERVER_URL": "http://localhost:8200", 23 | "ELASTIC_APM_SERVICE_NAME": "upload-service", 24 | "ELASTIC_APM_SERVICE_VERSION": "0.1", 25 | "ELASTIC_APM_ENVIRONMENT": "development", 26 | "ELASTIC_APM_ACTIVE": "true", 27 | "US_ELASTIC_APM_IGNORE_URLS": "/grpc.health.v1.Health/Check", 28 | "ELASTIC_APM_CAPTURE_BODY": "all", 29 | "ELASTIC_APM_METRICS_INTERVAL": "10s", 30 | "HEALTH_CHECK_INTERVAL": "3", 31 | } 32 | }, 33 | { 34 | "name": "Launch test package", 35 | "type": "go", 36 | "request": "launch", 37 | "mode": "test", 38 | "program": "${workspaceFolder}", 39 | "env": { 40 | "S3_ACCESS_KEY": "F6WUUG27HBUFSIXVZL59", 41 | "S3_SECRET_KEY": "BPlIUU6SX0ZxiCMo3tIpCMAUdnmkN9Eo9K42NsRR", 42 | "S3_ENDPOINT": "http://127.0.0.1:9000", 43 | "HEALTH_CHECK_INTERVAL": "3", 44 | 45 | } 46 | } 47 | ] 48 | } -------------------------------------------------------------------------------- /bucket/bucket.go: -------------------------------------------------------------------------------- 1 | package bucket 2 | 3 | import ( 4 | "fmt" 5 | "regexp" 6 | "strings" 7 | 8 | "github.com/aws/aws-sdk-go/aws" 9 | "github.com/aws/aws-sdk-go/service/s3" 10 | ) 11 | 12 | // Service is a structure used for bucket operations on S3 13 | type Service struct { 14 | s3Client *s3.S3 15 | } 16 | 17 | // NewService creates a Service and returns it. 18 | func NewService(s3Client *s3.S3) *Service { 19 | return &Service{s3Client: s3Client} 20 | } 21 | 22 | // BucketExists returns true if a bucket exists and the S3Client has permission 23 | // to access it, false otherwise. 24 | func (s Service) BucketExists(ctx aws.Context, bucket *string) bool { 25 | if bucket == nil { 26 | return false 27 | } 28 | 29 | normalizedBucketName := s.NormalizeCephBucketName(*bucket) 30 | input := &s3.HeadBucketInput{ 31 | Bucket: aws.String(normalizedBucketName), 32 | } 33 | 34 | _, err := s.s3Client.HeadBucketWithContext(ctx, input) 35 | 36 | return (err == nil) 37 | } 38 | 39 | // CreateBucket creates a bucket with the given bucket name and returns true or false 40 | // if it's created or not, returns an error if it didn't. 41 | func (s Service) CreateBucket(ctx aws.Context, bucket *string) (bool, error) { 42 | if bucket == nil { 43 | return false, fmt.Errorf("bucket is nil") 44 | } 45 | 46 | normalizedBucketName := s.NormalizeCephBucketName(*bucket) 47 | cparams := &s3.CreateBucketInput{ 48 | Bucket: aws.String(normalizedBucketName), // Required 49 | } 50 | 51 | // Create a new bucket using the CreateBucket call. 52 | _, err := s.s3Client.CreateBucketWithContext(ctx, cparams) 53 | if err != nil { 54 | return false, fmt.Errorf("failed to create bucket: %v", err) 55 | } 56 | 57 | return true, nil 58 | } 59 | 60 | // NormalizeCephBucketName gets a bucket name and normalizes it 61 | // according to ceph s3's constraints. 62 | func (s Service) NormalizeCephBucketName(bucketName string) string { 63 | lowerCaseBucketName := strings.ToLower(bucketName) 64 | 65 | // Make a Regex for catching only letters and numbers. 66 | reg := regexp.MustCompile("[^a-zA-Z0-9]+") 67 | return reg.ReplaceAllString(lowerCaseBucketName, "-") 68 | } 69 | -------------------------------------------------------------------------------- /internal/test/helper.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "log" 5 | 6 | "github.com/aws/aws-sdk-go/aws" 7 | "github.com/aws/aws-sdk-go/service/s3" 8 | ) 9 | 10 | // EmptyAndDeleteBucket empties the Amazon S3 bucket and deletes it. 11 | func EmptyAndDeleteBucket(s3Client *s3.S3, bucket string) error { 12 | log.Print("removing objects from S3 bucket : ", bucket) 13 | 14 | params := &s3.ListObjectsInput{ 15 | Bucket: aws.String(bucket), 16 | MaxKeys: aws.Int64(10000), 17 | } 18 | 19 | for { 20 | // Requesting for batch of objects from s3 bucket 21 | objects, err := s3Client.ListObjects(params) 22 | if err != nil { 23 | break 24 | } 25 | 26 | // Checks if the bucket is already empty 27 | if len((*objects).Contents) == 0 { 28 | log.Print("bucket is already empty") 29 | break 30 | } 31 | log.Print("first object in batch | ", *(objects.Contents[0].Key)) 32 | 33 | // Creating an array of pointers of ObjectIdentifier 34 | objectsToDelete := make([]*s3.ObjectIdentifier, 0, 1000) 35 | for _, object := range (*objects).Contents { 36 | obj := s3.ObjectIdentifier{ 37 | Key: object.Key, 38 | } 39 | objectsToDelete = append(objectsToDelete, &obj) 40 | } 41 | 42 | // Creating JSON payload for bulk delete 43 | deleteArray := s3.Delete{Objects: objectsToDelete} 44 | deleteParams := &s3.DeleteObjectsInput{ 45 | Bucket: aws.String(bucket), 46 | Delete: &deleteArray, 47 | } 48 | 49 | // Running the Bulk delete job (limit 1000) 50 | _, err = s3Client.DeleteObjects(deleteParams) 51 | if err != nil { 52 | return err 53 | } 54 | if *(*objects).IsTruncated { //if there are more objects in the bucket, IsTruncated = true 55 | params.Marker = (*deleteParams).Delete.Objects[len((*deleteParams).Delete.Objects)-1].Key 56 | log.Print("requesting next batch | ", *(params.Marker)) 57 | } else { // If all objects in the bucket have been cleaned up. 58 | break 59 | } 60 | } 61 | 62 | log.Print("Emptied S3 bucket : ", bucket) 63 | if _, err := s3Client.DeleteBucket(&s3.DeleteBucketInput{Bucket: aws.String(bucket)}); err != nil { 64 | log.Printf("failed to DeleteBucket, %v", err) 65 | return err 66 | } 67 | 68 | return nil 69 | } 70 | -------------------------------------------------------------------------------- /bucket/bucket_test.go: -------------------------------------------------------------------------------- 1 | package bucket_test 2 | 3 | import ( 4 | "context" 5 | "log" 6 | "os" 7 | "sync" 8 | "testing" 9 | 10 | "github.com/aws/aws-sdk-go/aws" 11 | "github.com/aws/aws-sdk-go/aws/credentials" 12 | "github.com/aws/aws-sdk-go/aws/session" 13 | "github.com/aws/aws-sdk-go/service/s3" 14 | "github.com/meateam/upload-service/bucket" 15 | "github.com/meateam/upload-service/internal/test" 16 | ) 17 | 18 | // Declaring global variables. 19 | var s3Endpoint string 20 | var s3Client *s3.S3 21 | var mu sync.Mutex 22 | 23 | func init() { 24 | // Fetch env vars 25 | s3AccessKey := os.Getenv("S3_ACCESS_KEY") 26 | s3SecretKey := os.Getenv("S3_SECRET_KEY") 27 | s3Endpoint = os.Getenv("S3_ENDPOINT") 28 | s3Token := "" 29 | 30 | // Configure to use S3 Server 31 | s3Config := &aws.Config{ 32 | Credentials: credentials.NewStaticCredentials(s3AccessKey, s3SecretKey, s3Token), 33 | Endpoint: aws.String(s3Endpoint), 34 | Region: aws.String("eu-east-1"), 35 | DisableSSL: aws.Bool(true), 36 | S3ForcePathStyle: aws.Bool(true), 37 | } 38 | 39 | // Init real client. 40 | newSession, err := session.NewSession(s3Config) 41 | if err != nil { 42 | log.Fatalf(err.Error()) 43 | } 44 | s3Client = s3.New(newSession) 45 | 46 | mu.Lock() 47 | if err := test.EmptyAndDeleteBucket(s3Client, "testbucket"); err != nil { 48 | log.Printf("test.EmptyAndDeleteBucket failed with error: %v", err) 49 | } 50 | if err := test.EmptyAndDeleteBucket(s3Client, "testbucket1"); err != nil { 51 | log.Printf("test.EmptyAndDeleteBucket failed with error: %v", err) 52 | } 53 | if err := test.EmptyAndDeleteBucket(s3Client, "t874777-omer"); err != nil { 54 | log.Printf("test.EmptyAndDeleteBucket failed with error: %v", err) 55 | } 56 | mu.Unlock() 57 | } 58 | 59 | func TestBucketService_CreateBucket(t *testing.T) { 60 | type fields struct { 61 | s3Client *s3.S3 62 | } 63 | type args struct { 64 | ctx aws.Context 65 | bucket *string 66 | } 67 | tests := []struct { 68 | name string 69 | fields fields 70 | args args 71 | want bool 72 | wantErr bool 73 | }{ 74 | { 75 | name: "create bucket", 76 | fields: fields{s3Client: s3Client}, 77 | args: args{ 78 | ctx: context.Background(), 79 | bucket: aws.String("testbucket"), 80 | }, 81 | wantErr: false, 82 | want: true, 83 | }, 84 | { 85 | name: "create bucket - already exists", 86 | fields: fields{s3Client: s3Client}, 87 | args: args{ 88 | ctx: context.Background(), 89 | bucket: aws.String("testbucket"), 90 | }, 91 | wantErr: true, 92 | }, 93 | { 94 | name: "create bucket - nil bucket", 95 | fields: fields{s3Client: s3Client}, 96 | args: args{ 97 | ctx: context.Background(), 98 | bucket: nil, 99 | }, 100 | wantErr: true, 101 | }, 102 | { 103 | name: "create bucket - empty bucket", 104 | fields: fields{s3Client: s3Client}, 105 | args: args{ 106 | ctx: context.Background(), 107 | bucket: aws.String(""), 108 | }, 109 | wantErr: true, 110 | }, 111 | { 112 | name: "create bucket - invalid bucket", 113 | fields: fields{s3Client: s3Client}, 114 | args: args{ 115 | ctx: context.Background(), 116 | bucket: aws.String("T874777@omer"), 117 | }, 118 | wantErr: false, 119 | want: true, 120 | }, 121 | } 122 | for _, tt := range tests { 123 | t.Run(tt.name, func(t *testing.T) { 124 | s := bucket.NewService(tt.fields.s3Client) 125 | mu.Lock() 126 | got, err := s.CreateBucket(tt.args.ctx, tt.args.bucket) 127 | mu.Unlock() 128 | if (err != nil) != tt.wantErr { 129 | t.Errorf("BucketService.CreateBucket() error = %v, wantErr %v", err, tt.wantErr) 130 | return 131 | } 132 | if got != tt.want { 133 | t.Errorf("BucketService.CreateBucket() = %v, want %v", got, tt.want) 134 | } 135 | }) 136 | 137 | } 138 | } 139 | func TestBucketService_BucketExists(t *testing.T) { 140 | s := bucket.NewService(s3Client) 141 | 142 | mu.Lock() 143 | if _, err := s.CreateBucket(context.Background(), aws.String("testbucket")); err != nil { 144 | log.Printf("CreateBucket failed with error: %v", err) 145 | } 146 | mu.Unlock() 147 | type fields struct { 148 | s3Client *s3.S3 149 | } 150 | type args struct { 151 | ctx aws.Context 152 | bucket *string 153 | } 154 | tests := []struct { 155 | name string 156 | fields fields 157 | args args 158 | want bool 159 | }{ 160 | { 161 | name: "Bucket Exists", 162 | fields: fields{s3Client: s3Client}, 163 | args: args{ 164 | ctx: context.Background(), 165 | bucket: aws.String("testbucket"), 166 | }, 167 | want: true, 168 | }, 169 | { 170 | name: "Bucket doesn't Exists", 171 | fields: fields{s3Client: s3Client}, 172 | args: args{ 173 | ctx: context.Background(), 174 | bucket: aws.String("testbucket2"), 175 | }, 176 | want: false, 177 | }, 178 | { 179 | name: "Bucket nil", 180 | fields: fields{s3Client: s3Client}, 181 | args: args{ 182 | ctx: context.Background(), 183 | bucket: nil, 184 | }, 185 | want: false, 186 | }, 187 | } 188 | for _, tt := range tests { 189 | t.Run(tt.name, func(t *testing.T) { 190 | s := bucket.NewService(tt.fields.s3Client) 191 | 192 | if got := s.BucketExists(tt.args.ctx, tt.args.bucket); got != tt.want { 193 | t.Errorf("BucketService.BucketExists() = %v, want %v", got, tt.want) 194 | } 195 | }) 196 | } 197 | } 198 | -------------------------------------------------------------------------------- /proto/upload_service.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package upload; 4 | 5 | // Interface exported by the server 6 | service Upload { 7 | // The function Uploads the given file 8 | // 9 | // Returns the Location of the file as output 10 | // 11 | // In case of an error the error is returned 12 | rpc UploadMedia(UploadMediaRequest) returns (UploadMediaResponse) {} 13 | rpc UploadMultipart(UploadMultipartRequest) returns (UploadMultipartResponse) {} 14 | rpc UploadInit(UploadInitRequest) returns (UploadInitResponse) {} 15 | rpc UploadPart(stream UploadPartRequest) returns (stream UploadPartResponse) {} 16 | rpc UploadComplete(UploadCompleteRequest) returns (UploadCompleteResponse) {} 17 | rpc UploadAbort(UploadAbortRequest) returns (UploadAbortResponse) {} 18 | rpc DeleteObjects(DeleteObjectsRequest) returns (DeleteObjectsResponse) {} 19 | rpc CopyObject(CopyObjectRequest) returns (CopyObjectResponse) {} 20 | rpc MoveObject(MoveObjectRequest) returns (MoveObjectResponse) {} 21 | 22 | } 23 | 24 | // UploadMediaRequest is the request for media upload 25 | message UploadMediaRequest { 26 | // File is the file to upload 27 | bytes file = 1; 28 | 29 | // File key to store in S3 30 | string key = 2; 31 | 32 | // The bucket to upload the file to 33 | string bucket = 3; 34 | 35 | // The mime-type of the file. 36 | string contentType = 4; 37 | } 38 | 39 | // UploadMediaResponse is the response for media upload 40 | message UploadMediaResponse { 41 | // The location that the file was uploaded to 42 | string location = 1; 43 | } 44 | 45 | // UploadMultipartRequest is the request for multipart upload 46 | message UploadMultipartRequest { 47 | // File to upload 48 | bytes file = 1; 49 | 50 | // File metadata 51 | map metadata = 2; 52 | 53 | // File key to store in S3 54 | string key = 3; 55 | 56 | // The bucket to upload the file to 57 | string bucket = 4; 58 | 59 | // The mime-type of the file. 60 | string contentType = 5; 61 | } 62 | 63 | // UploadMultipartResponse is the response for multipart upload 64 | message UploadMultipartResponse { 65 | // The location that the file was uploaded to 66 | string location = 1; 67 | } 68 | 69 | // UploadInitRequest is the data for initiating resumable upload 70 | message UploadInitRequest { 71 | // File key to store in S3 72 | string key = 1; 73 | 74 | // The bucket to upload the file to 75 | string bucket = 2; 76 | 77 | // File metadata 78 | map metadata = 3; 79 | 80 | // The mime-type of the file. 81 | string contentType = 4; 82 | } 83 | 84 | // UploadInitResponse is the response for initiating resumable upload 85 | message UploadInitResponse { 86 | // Upload ID generated for resumable upload of a file 87 | string uploadId = 1; 88 | 89 | // File key to store in S3 90 | string key = 2; 91 | 92 | // The bucket to upload the file to 93 | string bucket = 3; 94 | } 95 | 96 | // UploadPartRequest is the request for resumable part uload 97 | message UploadPartRequest { 98 | // File part chunk 99 | bytes part = 1; 100 | 101 | // Part number 102 | int64 partNumber = 2; 103 | 104 | // Upload ID generated for resumable upload of a file 105 | string uploadId = 3; 106 | 107 | // File key to store in S3 108 | string key = 4; 109 | 110 | // The bucket to upload the file to 111 | string bucket = 5; 112 | } 113 | 114 | // UploadPartResponse is the response for resumable part upload 115 | message UploadPartResponse { 116 | // Upload status code 117 | int32 code = 1; 118 | 119 | // Upload status message 120 | string message = 2; 121 | } 122 | 123 | // UploadCompleteRequest is the request for completing resumable upload 124 | message UploadCompleteRequest { 125 | /// Upload ID generated for resumable upload of a file 126 | string uploadId = 1; 127 | 128 | // File key to store in S3 129 | string key = 2; 130 | 131 | // The bucket to upload the file to 132 | string bucket = 3; 133 | } 134 | 135 | // UploadCompleteResponse is the response for completing resumable upload 136 | message UploadCompleteResponse { 137 | // The size that the uploaded file 138 | int64 ContentLength = 1; 139 | // The type of the uploaded file 140 | string ContentType = 2; 141 | } 142 | 143 | // UploadAbortRequest is the request for aborting resumable upload 144 | message UploadAbortRequest { 145 | // Upload ID generated for resumable upload of a file 146 | string uploadId = 1; 147 | 148 | // File key to store in S3 149 | string key = 2; 150 | 151 | // The bucket to upload the file to 152 | string bucket = 3; 153 | } 154 | 155 | // UploadAbortResponse is the response for aborting resumable upload 156 | message UploadAbortResponse { 157 | bool status = 1; 158 | } 159 | 160 | // DeleteObjectsRequest is the request for deleting objects. 161 | message DeleteObjectsRequest { 162 | // The bucket to delete the objects from. 163 | string bucket = 1; 164 | 165 | // The object keys to be deleted from s3. 166 | repeated string keys = 2; 167 | } 168 | 169 | // DeleteObjectsResponse is the response for deleting objects. 170 | message DeleteObjectsResponse { 171 | // The object keys that deleted successfully. 172 | repeated string deleted = 1; 173 | 174 | // The object keys that failed to delete. 175 | repeated string failed = 2; 176 | } 177 | 178 | // CopyObjectRequest is the request for copy object between buckets. 179 | message CopyObjectRequest { 180 | // The source bucket of the object. 181 | string bucketSrc = 1; 182 | 183 | // The source bucket of the object. 184 | string bucketDest = 2; 185 | 186 | // The object key to be copied. 187 | string keySrc = 3; 188 | 189 | // New object key. 190 | string keyDest = 4; 191 | } 192 | 193 | // CopyObjectResponse is the response for copy an object. 194 | message CopyObjectResponse { 195 | // The object key that copied successfully. 196 | string copied = 1; 197 | } 198 | 199 | // MoveObjectRequest is the request for move object between buckets. 200 | message MoveObjectRequest { 201 | // The source bucket of the object. 202 | string bucketSrc = 1; 203 | 204 | // The source bucket of the object. 205 | string bucketDest = 2; 206 | 207 | // The object key to be moved. 208 | string keySrc = 3; 209 | 210 | // New object key. 211 | string keyDest = 4; 212 | } 213 | 214 | // MoveObjectResponse is the response for moving an object. 215 | message MoveObjectResponse { 216 | // The object keys that moved successfully. 217 | string moved = 1; 218 | } -------------------------------------------------------------------------------- /server/server.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "net" 5 | "net/http" 6 | "strings" 7 | "time" 8 | 9 | "github.com/aws/aws-sdk-go/aws" 10 | "github.com/aws/aws-sdk-go/aws/credentials" 11 | "github.com/aws/aws-sdk-go/aws/session" 12 | "github.com/aws/aws-sdk-go/service/s3" 13 | grpc_logrus "github.com/grpc-ecosystem/go-grpc-middleware/logging/logrus" 14 | ilogger "github.com/meateam/elasticsearch-logger" 15 | "github.com/meateam/upload-service/object" 16 | pb "github.com/meateam/upload-service/proto" 17 | "github.com/sirupsen/logrus" 18 | "github.com/spf13/viper" 19 | "go.elastic.co/apm/module/apmhttp" 20 | "google.golang.org/grpc" 21 | "google.golang.org/grpc/health" 22 | "google.golang.org/grpc/health/grpc_health_v1" 23 | ) 24 | 25 | const ( 26 | configPort = "tcp_port" 27 | configHealthCheckInterval = "health_check_interval" 28 | configElasticAPMIgnoreURLS = "us_elastic_apm_ignore_urls" 29 | configS3Endpoint = "s3_endpoint" 30 | configS3Token = "s3_token" 31 | configS3AccessKey = "s3_access_key" 32 | configS3SecretKey = "s3_secret_key" 33 | configS3Region = "s3_region" 34 | configS3SSL = "s3_ssl" 35 | ) 36 | 37 | func init() { 38 | viper.SetDefault(configPort, "8080") 39 | viper.SetDefault(configHealthCheckInterval, 3) 40 | viper.SetDefault(configElasticAPMIgnoreURLS, "/grpc.health.v1.Health/Check") 41 | viper.SetDefault(configS3Endpoint, "http://localhost:9000") 42 | viper.SetDefault(configS3Token, "") 43 | viper.SetDefault(configS3AccessKey, "") 44 | viper.SetDefault(configS3SecretKey, "") 45 | viper.SetDefault(configS3Region, "us-east-1") 46 | viper.SetDefault(configS3SSL, false) 47 | viper.AutomaticEnv() 48 | } 49 | 50 | // UploadServer is a structure that holds the upload server. 51 | type UploadServer struct { 52 | *grpc.Server 53 | logger *logrus.Logger 54 | tcpPort string 55 | healthCheckInterval int 56 | objectHandler *object.Handler 57 | } 58 | 59 | // GetHandler returns a copy of the underlying upload handler. 60 | func (s *UploadServer) GetHandler() *object.Handler { 61 | return s.objectHandler 62 | } 63 | 64 | // Serve accepts incoming connections on the listener `lis`, creating a new 65 | // ServerTransport and service goroutine for each. The service goroutines 66 | // read gRPC requests and then call the registered handlers to reply to them. 67 | // Serve returns when `lis.Accept` fails with fatal errors. `lis` will be closed when 68 | // this method returns. 69 | // If `lis` is nil then Serve creates a `net.Listener` with "tcp" network listening 70 | // on the configured `TCP_PORT`, which defaults to "8080". 71 | // Serve will return a non-nil error unless Stop or GracefulStop is called. 72 | func (s UploadServer) Serve(lis net.Listener) { 73 | listener := lis 74 | if lis == nil { 75 | l, err := net.Listen("tcp", ":"+s.tcpPort) 76 | if err != nil { 77 | s.logger.Fatalf("failed to listen: %v", err) 78 | } 79 | 80 | listener = l 81 | } 82 | 83 | s.logger.Infof("listening and serving grpc server on port %s", s.tcpPort) 84 | if err := s.Server.Serve(listener); err != nil { 85 | s.logger.Fatalf("%v", err) 86 | } 87 | } 88 | 89 | // NewServer configures and creates a grpc.Server instance with the upload service 90 | // health check service. 91 | // Configure using environment variables. 92 | // `HEALTH_CHECK_INTERVAL`: Interval to update serving state of the health check server. 93 | // `S3_ACCESS_KEY`: S3 accress key to connect with s3 backend. 94 | // `S3_SECRET_KEY`: S3 secret key to connect with s3 backend. 95 | // `S3_ENDPOINT`: S3 endpoint of s3 backend to connect to. 96 | // `S3_TOKEN`: S3 token of s3 backend to connect to. 97 | // `S3_REGION`: S3 ergion of s3 backend to connect to. 98 | // `S3_SSL`: Enable or Disable SSL on S3 connection. 99 | // `TCP_PORT`: TCP port on which the grpc server would serve on. 100 | func NewServer(logger *logrus.Logger) *UploadServer { 101 | // Configuration variables 102 | s3AccessKey := viper.GetString(configS3AccessKey) 103 | s3SecretKey := viper.GetString(configS3SecretKey) 104 | s3Endpoint := viper.GetString(configS3Endpoint) 105 | s3Token := viper.GetString(configS3Token) 106 | s3Region := viper.GetString(configS3Region) 107 | s3SSL := viper.GetBool(configS3SSL) 108 | 109 | // Configure to use S3 Server 110 | s3Config := &aws.Config{ 111 | Credentials: credentials.NewStaticCredentials(s3AccessKey, s3SecretKey, s3Token), 112 | Endpoint: aws.String(s3Endpoint), 113 | Region: aws.String(s3Region), 114 | DisableSSL: aws.Bool(!s3SSL), 115 | S3ForcePathStyle: aws.Bool(true), 116 | HTTPClient: apmhttp.WrapClient(http.DefaultClient), 117 | } 118 | 119 | // If no logger is given, create a new default logger for the server. 120 | if logger == nil { 121 | logger = ilogger.NewLogger() 122 | } 123 | 124 | // Open a session to s3. 125 | newSession, err := session.NewSession(s3Config) 126 | if err != nil { 127 | logger.Fatalf(err.Error()) 128 | } 129 | logger.Infof("connected to S3 - %s", s3Endpoint) 130 | 131 | // Create a client from the s3 session. 132 | s3Client := s3.New(newSession) 133 | 134 | // Set up grpc server opts with logger interceptor. 135 | serverOpts := append( 136 | serverLoggerInterceptor(logger), 137 | grpc.MaxRecvMsgSize(5120<<20), 138 | ) 139 | 140 | grpcServer := grpc.NewServer( 141 | serverOpts..., 142 | ) 143 | 144 | // Create a upload handler and register it on the grpc server. 145 | objectHandler := object.NewHandler( 146 | object.NewService(s3Client), 147 | logger, 148 | ) 149 | pb.RegisterUploadServer(grpcServer, objectHandler) 150 | 151 | // Create a health server and register it on the grpc server. 152 | healthServer := health.NewServer() 153 | grpc_health_v1.RegisterHealthServer(grpcServer, healthServer) 154 | 155 | uploadServer := &UploadServer{ 156 | Server: grpcServer, 157 | logger: logger, 158 | tcpPort: viper.GetString(configPort), 159 | healthCheckInterval: viper.GetInt(configHealthCheckInterval), 160 | objectHandler: objectHandler, 161 | } 162 | 163 | // Health check validation goroutine worker. 164 | go uploadServer.healthCheckWorker(healthServer) 165 | 166 | return uploadServer 167 | } 168 | 169 | // serverLoggerInterceptor configures the logger interceptor for the upload server. 170 | func serverLoggerInterceptor(logger *logrus.Logger) []grpc.ServerOption { 171 | // Create new logrus entry for logger interceptor. 172 | logrusEntry := logrus.NewEntry(logger) 173 | 174 | ignorePayload := ilogger.IgnoreServerMethodsDecider( 175 | append( 176 | strings.Split(viper.GetString(configElasticAPMIgnoreURLS), ","), 177 | "/upload.Upload/UploadMedia", 178 | "/upload.Upload/UploadMultipart", 179 | "/upload.Upload/UploadPart", 180 | )..., 181 | ) 182 | 183 | ignoreInitialRequest := ilogger.IgnoreServerMethodsDecider( 184 | append( 185 | strings.Split(viper.GetString(configElasticAPMIgnoreURLS), ","), 186 | "/upload.Upload/UploadMedia", 187 | "/upload.Upload/UploadMultipart", 188 | "/upload.Upload/UploadPart", 189 | )..., 190 | ) 191 | 192 | // Shared options for the logger, with a custom gRPC code to log level function. 193 | loggerOpts := []grpc_logrus.Option{ 194 | grpc_logrus.WithDecider(func(fullMethodName string, err error) bool { 195 | return ignorePayload(fullMethodName) 196 | }), 197 | grpc_logrus.WithLevels(grpc_logrus.DefaultCodeToLevel), 198 | } 199 | 200 | return ilogger.ElasticsearchLoggerServerInterceptor( 201 | logrusEntry, 202 | ignorePayload, 203 | ignoreInitialRequest, 204 | loggerOpts..., 205 | ) 206 | } 207 | 208 | // healthCheckWorker is running an infinite loop that sets the serving status once 209 | // in s.healthCheckInterval seconds. 210 | func (s UploadServer) healthCheckWorker(healthServer *health.Server) { 211 | s3Client := s.objectHandler.GetService().GetS3Client() 212 | 213 | for { 214 | _, err := s3Client.ListBuckets(&s3.ListBucketsInput{}) 215 | if err != nil { 216 | healthServer.SetServingStatus("", grpc_health_v1.HealthCheckResponse_NOT_SERVING) 217 | } else { 218 | healthServer.SetServingStatus("", grpc_health_v1.HealthCheckResponse_SERVING) 219 | } 220 | 221 | time.Sleep(time.Second * time.Duration(s.healthCheckInterval)) 222 | } 223 | } 224 | -------------------------------------------------------------------------------- /object/handler.go: -------------------------------------------------------------------------------- 1 | package object 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "fmt" 7 | "io" 8 | "sync" 9 | 10 | "github.com/aws/aws-sdk-go/aws" 11 | "github.com/sirupsen/logrus" 12 | 13 | pb "github.com/meateam/upload-service/proto" 14 | ) 15 | 16 | // Handler handles object operation requests by uploading the file's data to aws-s3 Object Storage. 17 | type Handler struct { 18 | service *Service 19 | logger *logrus.Logger 20 | } 21 | 22 | // NewHandler creates a Handler and returns it. 23 | func NewHandler(service *Service, logger *logrus.Logger) *Handler { 24 | return &Handler{service: service, logger: logger} 25 | } 26 | 27 | // GetService returns the internal upload service. 28 | func (h Handler) GetService() *Service { 29 | return h.service 30 | } 31 | 32 | // UploadMedia is the request handler for file upload, it is responsible for getting the file 33 | // from the request's body and uploading it to the bucket of the user who uploaded it 34 | func (h Handler) UploadMedia( 35 | ctx context.Context, 36 | request *pb.UploadMediaRequest, 37 | ) (*pb.UploadMediaResponse, error) { 38 | location, err := h.service.UploadFile(ctx, 39 | bytes.NewReader(request.GetFile()), 40 | aws.String(request.GetKey()), 41 | aws.String(request.GetBucket()), 42 | aws.String(request.GetContentType()), 43 | nil) 44 | 45 | if err != nil { 46 | return nil, err 47 | } 48 | 49 | return &pb.UploadMediaResponse{Location: *location}, nil 50 | } 51 | 52 | // UploadMultipart is the request handler for file upload, it is responsible for getting the file 53 | // from the request's body and uploading it to the bucket of the user who uploaded it 54 | func (h Handler) UploadMultipart( 55 | ctx context.Context, 56 | request *pb.UploadMultipartRequest, 57 | ) (*pb.UploadMultipartResponse, error) { 58 | metadata := request.GetMetadata() 59 | if len(metadata) == 0 { 60 | return nil, fmt.Errorf("metadata is required") 61 | } 62 | 63 | location, err := h.service.UploadFile(ctx, 64 | bytes.NewReader(request.GetFile()), 65 | aws.String(request.GetKey()), 66 | aws.String(request.GetBucket()), 67 | aws.String(request.GetContentType()), 68 | aws.StringMap(request.GetMetadata())) 69 | 70 | if err != nil { 71 | return nil, err 72 | } 73 | 74 | return &pb.UploadMultipartResponse{Location: *location}, nil 75 | } 76 | 77 | // UploadInit is the request handler for initiating resumable upload. 78 | func (h Handler) UploadInit( 79 | ctx context.Context, 80 | request *pb.UploadInitRequest, 81 | ) (*pb.UploadInitResponse, error) { 82 | result, err := h.service.UploadInit( 83 | ctx, 84 | aws.String(request.GetKey()), 85 | aws.String(request.GetBucket()), 86 | aws.String(request.GetContentType()), 87 | aws.StringMap(request.GetMetadata())) 88 | 89 | if err != nil { 90 | return nil, err 91 | } 92 | 93 | response := &pb.UploadInitResponse{ 94 | UploadId: *result.UploadId, 95 | Key: *result.Key, 96 | Bucket: *result.Bucket, 97 | } 98 | 99 | return response, nil 100 | } 101 | 102 | // UploadPart is the request handler for multipart file upload. 103 | // It is fetching file parts from a RPC stream and uploads them concurrently. 104 | // Responds with a stream of upload status for each part streamed. 105 | func (h Handler) UploadPart(stream pb.Upload_UploadPartServer) error { 106 | wg := sync.WaitGroup{} 107 | for { 108 | part, err := stream.Recv() 109 | 110 | if err == io.EOF { 111 | wg.Wait() 112 | return nil 113 | } 114 | 115 | if err != nil { 116 | errResponse := &pb.UploadPartResponse{Code: 500, Message: fmt.Sprintf("failed fetching part: %v", err)} 117 | if err := stream.Send(errResponse); err != nil { 118 | return err 119 | } 120 | } 121 | 122 | wg.Add(1) 123 | go func() { 124 | defer wg.Done() 125 | 126 | result, err := h.service.UploadPart( 127 | stream.Context(), 128 | aws.String(part.GetUploadId()), 129 | aws.String(part.GetKey()), 130 | aws.String(part.GetBucket()), 131 | aws.Int64(part.GetPartNumber()), 132 | bytes.NewReader(part.GetPart())) 133 | 134 | var resp *pb.UploadPartResponse 135 | 136 | if err != nil { 137 | resp = &pb.UploadPartResponse{ 138 | Code: 500, 139 | Message: fmt.Sprintf("failed uploading part: %v", err), 140 | } 141 | } else { 142 | resp = &pb.UploadPartResponse{ 143 | Code: 200, 144 | Message: fmt.Sprintf("successfully uploaded part %s", *result.ETag), 145 | } 146 | } 147 | 148 | if err := stream.Send(resp); err != nil { 149 | h.logger.Errorf("failed to send response in stream: %v", err) 150 | } 151 | }() 152 | } 153 | } 154 | 155 | // UploadComplete is the request handler for completing and assembling previously uploaded file parts. 156 | // Responds with the location of the assembled file. 157 | func (h Handler) UploadComplete( 158 | ctx context.Context, 159 | request *pb.UploadCompleteRequest, 160 | ) (*pb.UploadCompleteResponse, error) { 161 | _, err := h.service.UploadComplete(ctx, 162 | aws.String(request.GetUploadId()), 163 | aws.String(request.GetKey()), 164 | aws.String(request.GetBucket())) 165 | if err != nil { 166 | return nil, err 167 | } 168 | 169 | obj, err := h.service.HeadObject(ctx, aws.String(request.GetKey()), aws.String(request.GetBucket())) 170 | if err != nil { 171 | return nil, err 172 | } 173 | 174 | return &pb.UploadCompleteResponse{ContentLength: *obj.ContentLength, ContentType: *obj.ContentType}, nil 175 | } 176 | 177 | // UploadAbort is the request handler for aborting and freeing previously uploaded parts. 178 | func (h Handler) UploadAbort( 179 | ctx context.Context, 180 | request *pb.UploadAbortRequest, 181 | ) (*pb.UploadAbortResponse, error) { 182 | abortStatus, err := h.service.UploadAbort( 183 | ctx, 184 | aws.String(request.GetUploadId()), 185 | aws.String(request.GetKey()), 186 | aws.String(request.GetBucket())) 187 | 188 | if err != nil { 189 | return nil, err 190 | } 191 | 192 | return &pb.UploadAbortResponse{Status: abortStatus}, nil 193 | } 194 | 195 | // DeleteObjects is the request handler for deleting objects. 196 | // It responds with a slice of deleted object keys and a slice of failed object keys. 197 | func (h Handler) DeleteObjects( 198 | ctx context.Context, 199 | request *pb.DeleteObjectsRequest, 200 | ) (*pb.DeleteObjectsResponse, error) { 201 | deleteResponse, err := h.service.DeleteObjects( 202 | ctx, 203 | aws.String(request.GetBucket()), 204 | aws.StringSlice(request.GetKeys()), 205 | ) 206 | if err != nil { 207 | return nil, err 208 | } 209 | 210 | deletedKeys := make([]string, 0, len(deleteResponse.Deleted)) 211 | for _, deletedObject := range deleteResponse.Deleted { 212 | deletedKeys = append(deletedKeys, *(deletedObject.Key)) 213 | } 214 | 215 | failedKeys := make([]string, 0, len(deleteResponse.Errors)) 216 | for _, erroredObject := range deleteResponse.Errors { 217 | failedKeys = append(failedKeys, *(erroredObject.Key)) 218 | } 219 | 220 | return &pb.DeleteObjectsResponse{Deleted: deletedKeys, Failed: failedKeys}, nil 221 | } 222 | 223 | 224 | // CopyObject - copy an object from source to destination bucket 225 | // Returns the ETag of the new object. The ETag reflects only changes to the 226 | // contents of an object, not its metadata. The source and destination ETag 227 | // is identical for a successfully copied object. 228 | func (h Handler) CopyObject( 229 | ctx context.Context, 230 | request *pb.CopyObjectRequest, 231 | ) (*pb.CopyObjectResponse, error) { 232 | _, err := h.service.CopyObject( 233 | ctx, 234 | aws.String(request.GetBucketSrc()), 235 | aws.String(request.GetBucketDest()), 236 | aws.String(request.GetKeySrc()), 237 | aws.String(request.GetKeyDest()), 238 | ) 239 | if err != nil { 240 | return nil, err 241 | } 242 | 243 | 244 | return &pb.CopyObjectResponse{ Copied: request.GetKeySrc() }, nil 245 | } 246 | 247 | // MoveObject - copy an object from source to destination bucket and delete the source 248 | func (h Handler) MoveObject( 249 | ctx context.Context, 250 | request *pb.MoveObjectRequest, 251 | ) (*pb.MoveObjectResponse, error) { 252 | _, err := h.service.CopyObject( 253 | ctx, 254 | aws.String(request.GetBucketSrc()), 255 | aws.String(request.GetBucketDest()), 256 | aws.String(request.GetKeySrc()), 257 | aws.String(request.GetKeyDest()), 258 | ) 259 | if err != nil { 260 | return nil, err 261 | } 262 | 263 | // Delete the object from the source bucket 264 | deleteResponse, err := h.service.DeleteObjects( 265 | ctx, 266 | aws.String(request.GetBucketSrc()), 267 | aws.StringSlice([]string{request.GetKeySrc()}), 268 | ) 269 | if err != nil || len(deleteResponse.Errors) > 0 { 270 | return nil, err 271 | } 272 | 273 | return &pb.MoveObjectResponse{Moved: request.GetKeySrc()}, nil 274 | } -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /object/service.go: -------------------------------------------------------------------------------- 1 | package object 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | "sync" 7 | "net/url" 8 | 9 | "github.com/aws/aws-sdk-go/aws" 10 | "github.com/aws/aws-sdk-go/service/s3" 11 | "github.com/aws/aws-sdk-go/service/s3/s3manager" 12 | "github.com/meateam/upload-service/bucket" 13 | ) 14 | 15 | // Service is a structure used for operations on S3 objects. 16 | type Service struct { 17 | s3Client *s3.S3 18 | mu sync.Mutex 19 | } 20 | 21 | // NewService creates a Service and returns it. 22 | func NewService(s3Client *s3.S3) *Service { 23 | return &Service{s3Client: s3Client} 24 | } 25 | 26 | // GetS3Client returns the internal s3 client. 27 | func (s *Service) GetS3Client() *s3.S3 { 28 | return s.s3Client 29 | } 30 | 31 | // ensureBucketExists Creates a bucket if it doesn't exist. 32 | func (s *Service) ensureBucketExists(ctx aws.Context, bucketName *string) error { 33 | if ctx == nil { 34 | return fmt.Errorf("context is required") 35 | } 36 | if bucketName == nil { 37 | return fmt.Errorf("bucketName is required") 38 | } 39 | 40 | bucketService := bucket.NewService(s.GetS3Client()) 41 | s.mu.Lock() 42 | defer s.mu.Unlock() 43 | bucketExists := bucketService.BucketExists(ctx, bucketName) 44 | 45 | if !bucketExists { 46 | bucketExists, err := bucketService.CreateBucket(ctx, bucketName) 47 | if err != nil { 48 | return fmt.Errorf("failed to create bucket %s: %v", *bucketName, err) 49 | } 50 | 51 | if !bucketExists { 52 | return fmt.Errorf("failed to create bucket %s: bucket does not exist", *bucketName) 53 | } 54 | } 55 | *bucketName = bucketService.NormalizeCephBucketName(*bucketName) 56 | 57 | return nil 58 | } 59 | 60 | // UploadFile uploads a file to the given bucket and key in S3. 61 | // If metadata is a non-nil map then it will be uploaded with the file. 62 | // Returns the file's location and an error if any occurred. 63 | func (s *Service) UploadFile( 64 | ctx aws.Context, 65 | file io.Reader, 66 | key *string, 67 | bucket *string, 68 | contentType *string, 69 | metadata map[string]*string, 70 | ) (*string, error) { 71 | if file == nil { 72 | return nil, fmt.Errorf("file is required") 73 | } 74 | 75 | if key == nil || *key == "" { 76 | return nil, fmt.Errorf("key is required") 77 | } 78 | 79 | if bucket == nil || *bucket == "" { 80 | return nil, fmt.Errorf("bucket name is required") 81 | } 82 | 83 | if ctx == nil { 84 | return nil, fmt.Errorf("context is required") 85 | } 86 | 87 | err := s.ensureBucketExists(ctx, bucket) 88 | if err != nil { 89 | return nil, fmt.Errorf("failed to upload file to %s/%s: %v", *bucket, *key, err) 90 | } 91 | 92 | // Create an uploader with S3 client and custom options 93 | uploader := s3manager.NewUploaderWithClient(s.s3Client, func(u *s3manager.Uploader) { 94 | u.PartSize = 32 * 1024 * 1024 // 32MB per part 95 | }) 96 | 97 | input := &s3manager.UploadInput{ 98 | Bucket: bucket, 99 | Key: key, 100 | Body: file, 101 | ContentType: contentType, 102 | } 103 | 104 | if metadata != nil { 105 | input.Metadata = metadata 106 | } 107 | 108 | // Upload a new object with the file's data to the user's bucket 109 | output, err := uploader.UploadWithContext(ctx, input) 110 | 111 | if err != nil { 112 | return nil, fmt.Errorf("failed to upload data to %s/%s: %v", *bucket, *key, err) 113 | } 114 | 115 | return &output.Location, nil 116 | } 117 | 118 | // UploadInit initiates a multipart upload to the given bucket and key in S3 with metadata. 119 | // File metadata is required for multipart upload. 120 | func (s *Service) UploadInit( 121 | ctx aws.Context, 122 | key *string, 123 | bucket *string, 124 | contentType *string, 125 | metadata map[string]*string, 126 | ) (*s3.CreateMultipartUploadOutput, error) { 127 | if key == nil || *key == "" { 128 | return nil, fmt.Errorf("key is required") 129 | } 130 | 131 | if bucket == nil || *bucket == "" { 132 | return nil, fmt.Errorf("bucket name is required") 133 | } 134 | 135 | if ctx == nil { 136 | return nil, fmt.Errorf("context is required") 137 | } 138 | 139 | err := s.ensureBucketExists(ctx, bucket) 140 | if err != nil { 141 | return nil, fmt.Errorf("failed to init upload to %s/%s: %v", *bucket, *key, err) 142 | } 143 | 144 | input := &s3.CreateMultipartUploadInput{ 145 | Bucket: bucket, 146 | Key: key, 147 | Metadata: metadata, 148 | ContentType: contentType, 149 | } 150 | 151 | result, err := s.s3Client.CreateMultipartUploadWithContext(ctx, input) 152 | if err != nil { 153 | return nil, err 154 | } 155 | 156 | return result, err 157 | } 158 | 159 | // UploadPart uploads a part in a multipart upload of a file. 160 | func (s *Service) UploadPart( 161 | ctx aws.Context, 162 | uploadID *string, 163 | key *string, 164 | bucket *string, 165 | partNumber *int64, 166 | body io.ReadSeeker, 167 | ) (*s3.UploadPartOutput, error) { 168 | if body == nil { 169 | return nil, fmt.Errorf("part body is required") 170 | } 171 | 172 | if key == nil || *key == "" { 173 | return nil, fmt.Errorf("key is required") 174 | } 175 | 176 | if bucket == nil || *bucket == "" { 177 | return nil, fmt.Errorf("bucket name is required") 178 | } 179 | 180 | if uploadID == nil || *uploadID == "" { 181 | return nil, fmt.Errorf("upload id is required") 182 | } 183 | 184 | if partNumber == nil { 185 | return nil, fmt.Errorf("part number is required") 186 | } 187 | 188 | if *partNumber < 1 || *partNumber > 10000 { 189 | return nil, fmt.Errorf("part number must be between 1 and 10,000") 190 | } 191 | 192 | if ctx == nil { 193 | return nil, fmt.Errorf("context is required") 194 | } 195 | 196 | err := s.ensureBucketExists(ctx, bucket) 197 | if err != nil { 198 | return nil, fmt.Errorf("failed to upload part to %s/%s: %v", *bucket, *key, err) 199 | } 200 | 201 | input := &s3.UploadPartInput{ 202 | Body: body, 203 | Bucket: bucket, 204 | Key: key, 205 | PartNumber: partNumber, 206 | UploadId: uploadID, 207 | } 208 | 209 | result, err := s.s3Client.UploadPartWithContext(ctx, input) 210 | if err != nil { 211 | return nil, err 212 | } 213 | 214 | return result, nil 215 | } 216 | 217 | // ListUploadParts lists the uploaded file parts of a multipart upload of a file. 218 | func (s *Service) ListUploadParts( 219 | ctx aws.Context, 220 | uploadID *string, 221 | key *string, 222 | bucket *string, 223 | ) (*s3.ListPartsOutput, error) { 224 | if key == nil || *key == "" { 225 | return nil, fmt.Errorf("key is required") 226 | } 227 | 228 | if bucket == nil || *bucket == "" { 229 | return nil, fmt.Errorf("bucket name is required") 230 | } 231 | 232 | if uploadID == nil || *uploadID == "" { 233 | return nil, fmt.Errorf("upload id is required") 234 | } 235 | 236 | if ctx == nil { 237 | return nil, fmt.Errorf("context is required") 238 | } 239 | 240 | err := s.ensureBucketExists(ctx, bucket) 241 | if err != nil { 242 | return nil, fmt.Errorf("failed to list upload %s parts at %s/%s: %v", *uploadID, *bucket, *key, err) 243 | } 244 | 245 | listPartsInput := &s3.ListPartsInput{ 246 | UploadId: uploadID, 247 | Key: key, 248 | Bucket: bucket, 249 | MaxParts: aws.Int64(10000), 250 | } 251 | 252 | parts, err := s.s3Client.ListPartsWithContext(ctx, listPartsInput) 253 | if err != nil { 254 | return nil, err 255 | } 256 | 257 | return parts, nil 258 | } 259 | 260 | // UploadComplete completes a multipart upload by assembling previously uploaded parts 261 | // associated with uploadID. 262 | func (s *Service) UploadComplete( 263 | ctx aws.Context, 264 | uploadID *string, 265 | key *string, 266 | bucket *string, 267 | ) (*s3.CompleteMultipartUploadOutput, error) { 268 | if key == nil || *key == "" { 269 | return nil, fmt.Errorf("key is required") 270 | } 271 | 272 | if bucket == nil || *bucket == "" { 273 | return nil, fmt.Errorf("bucket name is required") 274 | } 275 | 276 | if uploadID == nil || *uploadID == "" { 277 | return nil, fmt.Errorf("upload id is required") 278 | } 279 | 280 | if ctx == nil { 281 | return nil, fmt.Errorf("context is required") 282 | } 283 | 284 | err := s.ensureBucketExists(ctx, bucket) 285 | if err != nil { 286 | return nil, fmt.Errorf("failed to upload complete %s parts at %s/%s: %v", *uploadID, *bucket, *key, err) 287 | } 288 | 289 | parts, err := s.ListUploadParts(ctx, uploadID, key, bucket) 290 | if err != nil { 291 | return nil, fmt.Errorf("failed listing upload parts") 292 | } 293 | 294 | completedMultipartParts := make([]*s3.CompletedPart, 0, len(parts.Parts)) 295 | 296 | for _, v := range parts.Parts { 297 | completedPart := &s3.CompletedPart{ 298 | ETag: v.ETag, 299 | PartNumber: v.PartNumber, 300 | } 301 | 302 | completedMultipartParts = append(completedMultipartParts, completedPart) 303 | } 304 | 305 | completedMultipartUpload := &s3.CompletedMultipartUpload{ 306 | Parts: completedMultipartParts, 307 | } 308 | 309 | input := &s3.CompleteMultipartUploadInput{ 310 | Bucket: bucket, 311 | Key: key, 312 | MultipartUpload: completedMultipartUpload, 313 | UploadId: uploadID, 314 | } 315 | 316 | result, err := s.s3Client.CompleteMultipartUploadWithContext(ctx, input) 317 | if err != nil { 318 | return nil, err 319 | } 320 | 321 | return result, nil 322 | } 323 | 324 | // HeadObject returns object's details. 325 | func (s *Service) HeadObject(ctx aws.Context, key *string, bucket *string) (*s3.HeadObjectOutput, error) { 326 | if key == nil || *key == "" { 327 | return nil, fmt.Errorf("key is required") 328 | } 329 | 330 | if bucket == nil || *bucket == "" { 331 | return nil, fmt.Errorf("bucket name is required") 332 | } 333 | if ctx == nil { 334 | return nil, fmt.Errorf("context is required") 335 | } 336 | 337 | err := s.ensureBucketExists(ctx, bucket) 338 | if err != nil { 339 | return nil, fmt.Errorf("failed to HeadObject %s/%s: %v", *bucket, *key, err) 340 | } 341 | 342 | obj, err := s.s3Client.HeadObjectWithContext(ctx, &s3.HeadObjectInput{Bucket: bucket, Key: key}) 343 | if err != nil { 344 | return nil, fmt.Errorf("failed to head object") 345 | } 346 | return obj, nil 347 | } 348 | 349 | // UploadAbort aborts a multipart upload. After a multipart upload is aborted, no additional parts 350 | // can be uploaded using that upload ID. The storage consumed by any previously uploaded parts will be freed. 351 | // However, if any part uploads are currently in progress, those part uploads might or might not succeed. 352 | // As a result, it might be necessary to abort a given multipart upload multiple times in order to 353 | // completely free all storage consumed by all parts. To verify that all parts have been removed, 354 | // so you don't get charged for the part storage, you should call 355 | // the List Parts operation and ensure the parts list is empty. 356 | func (s *Service) UploadAbort(ctx aws.Context, uploadID *string, key *string, bucket *string) (bool, error) { 357 | if key == nil || *key == "" { 358 | return false, fmt.Errorf("key is required") 359 | } 360 | 361 | if bucket == nil || *bucket == "" { 362 | return false, fmt.Errorf("bucket name is required") 363 | } 364 | 365 | if uploadID == nil || *uploadID == "" { 366 | return false, fmt.Errorf("upload id is required") 367 | } 368 | 369 | if ctx == nil { 370 | return false, fmt.Errorf("context is required") 371 | } 372 | 373 | err := s.ensureBucketExists(ctx, bucket) 374 | if err != nil { 375 | return false, fmt.Errorf("failed to list upload %s parts at %s/%s: %v", *uploadID, *bucket, *key, err) 376 | } 377 | 378 | abortInput := &s3.AbortMultipartUploadInput{ 379 | Bucket: bucket, 380 | Key: key, 381 | UploadId: uploadID, 382 | } 383 | 384 | _, err = s.s3Client.AbortMultipartUploadWithContext(ctx, abortInput) 385 | if err != nil { 386 | return false, fmt.Errorf("failed aborting multipart upload: %v", err) 387 | } 388 | 389 | return true, nil 390 | } 391 | 392 | // DeleteObjects repeated string deletes an object from s3, 393 | // It receives a bucket and a slice of *strings to be deleted 394 | // and returns the deleted and errored objects or an error if exists. 395 | func (s *Service) DeleteObjects(ctx aws.Context, bucket *string, keys []*string) (*s3.DeleteObjectsOutput, error) { 396 | if ctx == nil { 397 | return nil, fmt.Errorf("context is required") 398 | } 399 | 400 | if bucket == nil || *bucket == "" { 401 | return nil, fmt.Errorf("bucket name is required") 402 | } 403 | 404 | if keys == nil || len(keys) <= 0 { 405 | return nil, fmt.Errorf("keys are required") 406 | } 407 | 408 | err := s.ensureBucketExists(ctx, bucket) 409 | if err != nil { 410 | return nil, fmt.Errorf("failed to DeleteObjects bucket, %s, does not exist: %v", *bucket, err) 411 | } 412 | 413 | objects := make([]*s3.ObjectIdentifier, 0, len(keys)) 414 | 415 | for _, key := range keys { 416 | objects = append(objects, &s3.ObjectIdentifier{ 417 | Key: key, 418 | }) 419 | } 420 | 421 | deleteObjectsInput := &s3.DeleteObjectsInput{ 422 | Bucket: bucket, 423 | Delete: &s3.Delete{ 424 | Objects: objects, 425 | Quiet: aws.Bool(false), 426 | }, 427 | } 428 | 429 | deleteResponse, err := s.s3Client.DeleteObjectsWithContext(ctx, deleteObjectsInput) 430 | if err != nil { 431 | return nil, fmt.Errorf("failed to delete objects: %v", err) 432 | } 433 | return deleteResponse, nil 434 | } 435 | 436 | 437 | // CopyObject - copy an object between source and destination buckets 438 | // It receives a source bucket, object key and a destination bucket 439 | func (s *Service) CopyObject( 440 | ctx aws.Context, 441 | bucketSrc *string, 442 | bucketDest *string, 443 | keySrc *string, 444 | keyDest *string, 445 | ) (*string, error) { 446 | if ctx == nil { 447 | return nil, fmt.Errorf("context is required") 448 | } 449 | 450 | if bucketSrc == nil || *bucketSrc == "" { 451 | return nil, fmt.Errorf("source bucket name is required") 452 | } 453 | 454 | if bucketDest == nil || *bucketDest == "" { 455 | return nil, fmt.Errorf("destination bucket name is required") 456 | } 457 | 458 | if keySrc == nil || *keySrc == "" { 459 | return nil, fmt.Errorf("object's src key is required") 460 | } 461 | 462 | if keyDest == nil || *keyDest == "" { 463 | return nil, fmt.Errorf("object's dest key is required") 464 | } 465 | 466 | 467 | // Check if the source bucket exists 468 | // if it doesn't exist, we don't need to create a new bucket, 469 | // because it would be empty with no object to copy 470 | headBucketinput := &s3.HeadBucketInput{Bucket: bucketSrc} 471 | 472 | if _, err := s.s3Client.HeadBucket(headBucketinput); err != nil { 473 | return nil, fmt.Errorf("failed to CopyObject from bucket, %s, does not exist: %v", *bucketSrc, err) 474 | } 475 | 476 | // Check if the object exists 477 | sourceObjectResponse, err := s.HeadObject(ctx, keySrc, bucketSrc) 478 | if err != nil { 479 | return nil, fmt.Errorf("failed to CopyObject from bucket, %s, because object %s does not exist: %v", *bucketSrc, *keySrc, err) 480 | } 481 | 482 | // Check if the destination bucket exist 483 | if err := s.ensureBucketExists(ctx, bucketDest); err != nil { 484 | return nil, fmt.Errorf("failed to CopyObject from bucket, %s, does not exist: %v", *bucketSrc, err) 485 | } 486 | 487 | // Parse the location of the object to URL 488 | objectToCopy := url.QueryEscape(*bucketSrc + "/" + *keySrc) 489 | 490 | copyObjectinput := &s3.CopyObjectInput{ 491 | Bucket: bucketDest, 492 | CopySource: aws.String(objectToCopy), // (CopySource field expected url) 493 | Key: keyDest, 494 | } 495 | 496 | copyObjectResponse, err := s.s3Client.CopyObjectWithContext(ctx, copyObjectinput) 497 | if err != nil { 498 | return nil, fmt.Errorf("failed to copy object: %v", err) 499 | } 500 | 501 | // Compare the ETags between the source and the destination buckets (kind of a checksum) 502 | if *sourceObjectResponse.ETag != *copyObjectResponse.CopyObjectResult.ETag { 503 | return nil, fmt.Errorf( 504 | "failed to copy object %s from source bucket, %s, because something went wrong in the process of copying the object to bucket %s and the ETag has changed : %v", 505 | *keySrc, 506 | *bucketSrc, 507 | *bucketDest, 508 | err) 509 | } 510 | 511 | 512 | return keySrc, nil 513 | } 514 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= 2 | cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= 3 | git.apache.org/thrift.git v0.12.0/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= 4 | github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= 5 | github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= 6 | github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= 7 | github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= 8 | github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= 9 | github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= 10 | github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= 11 | github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= 12 | github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= 13 | github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= 14 | github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= 15 | github.com/aws/aws-sdk-go v1.19.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= 16 | github.com/aws/aws-sdk-go v1.23.21 h1:eVJT2C99cAjZlBY8+CJovf6AwrSANzAcYNuxdCB+SPk= 17 | github.com/aws/aws-sdk-go v1.23.21/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= 18 | github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= 19 | github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= 20 | github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= 21 | github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= 22 | github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= 23 | github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= 24 | github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= 25 | github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= 26 | github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= 27 | github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= 28 | github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= 29 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 30 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 31 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 32 | github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= 33 | github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= 34 | github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= 35 | github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= 36 | github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= 37 | github.com/elastic/go-sysinfo v1.0.1/go.mod h1:O/D5m1VpYLwGjCYzEt63g3Z1uO3jXfwyzzjiW90t8cY= 38 | github.com/elastic/go-sysinfo v1.1.0 h1:FiOJvd3KSHa8ALx/7EPsFcJFsMMhCfgG7NPUZwm3ybk= 39 | github.com/elastic/go-sysinfo v1.1.0/go.mod h1:O/D5m1VpYLwGjCYzEt63g3Z1uO3jXfwyzzjiW90t8cY= 40 | github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6CcloAxnPgU= 41 | github.com/elastic/go-windows v1.0.1 h1:AlYZOldA+UJ0/2nBuqWdo90GFCgG9xuyw9SYzGUtJm0= 42 | github.com/elastic/go-windows v1.0.1/go.mod h1:FoVvqWSun28vaDQPbj2Elfc0JahhPB7WQEGa3c814Ss= 43 | github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= 44 | github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= 45 | github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= 46 | github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= 47 | github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= 48 | github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= 49 | github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= 50 | github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= 51 | github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= 52 | github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= 53 | github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= 54 | github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= 55 | github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= 56 | github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= 57 | github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= 58 | github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= 59 | github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= 60 | github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= 61 | github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= 62 | github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= 63 | github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= 64 | github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 65 | github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 66 | github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= 67 | github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 68 | github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= 69 | github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= 70 | github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= 71 | github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= 72 | github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= 73 | github.com/golang/protobuf v1.4.1 h1:ZFgWrT+bLgsYPirOnRfKLYJLvssAegOj/hgyMFdJZe0= 74 | github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= 75 | github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= 76 | github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= 77 | github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= 78 | github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= 79 | github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= 80 | github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= 81 | github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= 82 | github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= 83 | github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= 84 | github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= 85 | github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= 86 | github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= 87 | github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= 88 | github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= 89 | github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= 90 | github.com/grpc-ecosystem/go-grpc-middleware v1.1.0 h1:THDBEeQ9xZ8JEaCLyLQqXMMdRqNr0QAUJTIkQAUtFjg= 91 | github.com/grpc-ecosystem/go-grpc-middleware v1.1.0/go.mod h1:f5nM7jw/oeRSadq3xCzHAvxcr8HZnzsqU6ILg/0NiiE= 92 | github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= 93 | github.com/grpc-ecosystem/grpc-gateway v1.6.2/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= 94 | github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= 95 | github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= 96 | github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= 97 | github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= 98 | github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= 99 | github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= 100 | github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= 101 | github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= 102 | github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 h1:rp+c0RAYOWj8l6qbCUTSiRLG/iKnW3K3/QfPPuSsBt4= 103 | github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= 104 | github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= 105 | github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= 106 | github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= 107 | github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= 108 | github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= 109 | github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= 110 | github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= 111 | github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= 112 | github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= 113 | github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= 114 | github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= 115 | github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= 116 | github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY= 117 | github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= 118 | github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe h1:W/GaMY0y69G4cFlmsC6B9sbuo2fP8OFP1ABjt4kPz+w= 119 | github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= 120 | github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= 121 | github.com/meateam/elasticsearch-logger v1.1.3-0.20190901111807-4e8b84fb9fda h1:lxFVxa9uF+QA/D1NPA3rXqY7vqEhJrrXqRVdd5eQHUE= 122 | github.com/meateam/elasticsearch-logger v1.1.3-0.20190901111807-4e8b84fb9fda/go.mod h1:aLGQxX9uf7lJ2Kr1J7+qq3IeO/NP7YAXe3IzVLTAIeM= 123 | github.com/meateam/elogrus/v4 v4.0.2 h1:X+fpps3Ti9vIoqo/wnPcuaB+wxkZwkmTppXnnBe1Un4= 124 | github.com/meateam/elogrus/v4 v4.0.2/go.mod h1:O+KJPmbnEV80u+V8tCYTvcBpzp/HZa7XR4nqDtDMzYg= 125 | github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= 126 | github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= 127 | github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= 128 | github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= 129 | github.com/olivere/elastic/v7 v7.0.0 h1:iw29D/OSXdR2loC4qPNddvWjuQqN7Co/uALVD4Si+D4= 130 | github.com/olivere/elastic/v7 v7.0.0/go.mod h1:h2vSaBKzz7eL+VsYPtIOXOURZlXmp+yY5MgyIW3Y/M0= 131 | github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= 132 | github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= 133 | github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= 134 | github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= 135 | github.com/openzipkin/zipkin-go v0.1.3/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= 136 | github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= 137 | github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= 138 | github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= 139 | github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= 140 | github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 141 | github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= 142 | github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 143 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 144 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 145 | github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= 146 | github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= 147 | github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= 148 | github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= 149 | github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= 150 | github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= 151 | github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= 152 | github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= 153 | github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= 154 | github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= 155 | github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= 156 | github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= 157 | github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= 158 | github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= 159 | github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= 160 | github.com/prometheus/procfs v0.0.4 h1:w8DjqFMJDjuVwdZBQoOozr4MVWOnwF7RcL/7uxBjY78= 161 | github.com/prometheus/procfs v0.0.4/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= 162 | github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= 163 | github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= 164 | github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= 165 | github.com/santhosh-tekuri/jsonschema v1.2.4 h1:hNhW8e7t+H1vgY+1QeEQpveR6D4+OwKPXCfD2aieJis= 166 | github.com/santhosh-tekuri/jsonschema v1.2.4/go.mod h1:TEAUOeZSmIxTTuHatJzrvARHiuO9LYd+cIxzgEHCQI4= 167 | github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= 168 | github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= 169 | github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= 170 | github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9/go.mod h1:SnhjPscd9TpLiy1LpzGSKh3bXCfxxXuqd9xmQJy3slM= 171 | github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= 172 | github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= 173 | github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= 174 | github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= 175 | github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= 176 | github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= 177 | github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= 178 | github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= 179 | github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= 180 | github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= 181 | github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= 182 | github.com/spf13/viper v1.4.0 h1:yXHLWeravcrgGyFSyCgdYpXQ9dR9c/WED3pg1RhxqEU= 183 | github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= 184 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 185 | github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 186 | github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= 187 | github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= 188 | github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= 189 | github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= 190 | github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= 191 | github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= 192 | github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= 193 | github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= 194 | github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= 195 | go.elastic.co/apm v1.5.0 h1:arba7i+CVc36Jptww3R1ttW+O10ydvnBtidyd85DLpg= 196 | go.elastic.co/apm v1.5.0/go.mod h1:OdB9sPtM6Vt7oz3VXt7+KR96i9li74qrxBGHTQygFvk= 197 | go.elastic.co/apm/module/apmgrpc v1.5.0 h1:HBuetQVE+oT29EAvo2dIiv/jZjJ7TuMYofBYEfGa54c= 198 | go.elastic.co/apm/module/apmgrpc v1.5.0/go.mod h1:yMMbkQ9QGlMtV5Mw3TImQVFkQLS7oi+eHmY2YPQLy3c= 199 | go.elastic.co/apm/module/apmhttp v1.5.0 h1:sxntP97oENyWWi+6GAwXUo05oEpkwbiarZLqrzLRA4o= 200 | go.elastic.co/apm/module/apmhttp v1.5.0/go.mod h1:1FbmNuyD3ddauwzgVwFB0fqY6KbZt3JkV187tGCYYhY= 201 | go.elastic.co/fastjson v1.0.0 h1:ooXV/ABvf+tBul26jcVViPT3sBir0PvXgibYB1IQQzg= 202 | go.elastic.co/fastjson v1.0.0/go.mod h1:PmeUOMMtLHQr9ZS9J9owrAVg0FkaZDRZJEFTTGHtchs= 203 | go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= 204 | go.opencensus.io v0.19.1/go.mod h1:gug0GbSHa8Pafr0d2urOSgoXHZ6x/RUlaiT0d9pqb4A= 205 | go.opencensus.io v0.19.2/go.mod h1:NO/8qkisMZLZ1FCsKNqtJPwc8/TaclWyY0B6wcYNg9M= 206 | go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= 207 | go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= 208 | go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= 209 | golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= 210 | golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= 211 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 212 | golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= 213 | golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= 214 | golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= 215 | golang.org/x/lint v0.0.0-20181217174547-8f45f776aaf1/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= 216 | golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= 217 | golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= 218 | golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= 219 | golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 220 | golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 221 | golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 222 | golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 223 | golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 224 | golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 225 | golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 226 | golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 227 | golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 228 | golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 229 | golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= 230 | golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 231 | golang.org/x/net v0.0.0-20190912160710-24e19bdeb0f2 h1:4dVFTC832rPn4pomLSz1vA+are2+dU19w1H8OngV7nc= 232 | golang.org/x/net v0.0.0-20190912160710-24e19bdeb0f2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 233 | golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= 234 | golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= 235 | golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= 236 | golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 237 | golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 238 | golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 239 | golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 240 | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 241 | golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 242 | golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 243 | golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 244 | golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 245 | golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 246 | golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 247 | golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 248 | golang.org/x/sys v0.0.0-20181218192612-074acd46bca6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 249 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 250 | golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 251 | golang.org/x/sys v0.0.0-20190425145619-16072639606e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 252 | golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 253 | golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 254 | golang.org/x/sys v0.0.0-20190830142957-1e83adbbebd0 h1:7z820YPX9pxWR59qM7BE5+fglp4D/mKqAwCvGt11b+8= 255 | golang.org/x/sys v0.0.0-20190830142957-1e83adbbebd0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 256 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 257 | golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= 258 | golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= 259 | golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= 260 | golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 261 | golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 262 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 263 | golang.org/x/tools v0.0.0-20181219222714-6e267b5cc78e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 264 | golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 265 | golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= 266 | golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= 267 | golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= 268 | golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= 269 | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= 270 | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 271 | google.golang.org/api v0.0.0-20181220000619-583d854617af/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= 272 | google.golang.org/api v0.2.0/go.mod h1:IfRCZScioGtypHNTlz3gFk67J8uePVW7uDTBzXuIkhU= 273 | google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= 274 | google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= 275 | google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= 276 | google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= 277 | google.golang.org/genproto v0.0.0-20181219182458-5a97ab628bfb/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= 278 | google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= 279 | google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= 280 | google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= 281 | google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= 282 | google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= 283 | google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= 284 | google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= 285 | google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= 286 | google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= 287 | google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= 288 | google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg= 289 | google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= 290 | google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= 291 | google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= 292 | google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= 293 | google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= 294 | google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= 295 | google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= 296 | google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= 297 | google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= 298 | google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= 299 | google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= 300 | gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= 301 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 302 | gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= 303 | gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 304 | gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= 305 | gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= 306 | gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= 307 | gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= 308 | gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 309 | gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= 310 | gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 311 | honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= 312 | honnef.co/go/tools v0.0.0-20180920025451-e3ad64cb4ed3/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= 313 | honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= 314 | honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= 315 | howett.net/plist v0.0.0-20181124034731-591f970eefbb h1:jhnBjNi9UFpfpl8YZhA9CrOqpnJdvzuiHsl/dnxl11M= 316 | howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= 317 | -------------------------------------------------------------------------------- /object/object_test.go: -------------------------------------------------------------------------------- 1 | package object_test 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "crypto/rand" 7 | "fmt" 8 | "io" 9 | "io/ioutil" 10 | "log" 11 | "net" 12 | "reflect" 13 | "testing" 14 | 15 | "github.com/aws/aws-sdk-go/aws" 16 | "github.com/aws/aws-sdk-go/service/s3" 17 | "github.com/google/go-cmp/cmp" 18 | "github.com/google/go-cmp/cmp/cmpopts" 19 | "github.com/meateam/upload-service/internal/test" 20 | "github.com/meateam/upload-service/object" 21 | pb "github.com/meateam/upload-service/proto" 22 | "github.com/meateam/upload-service/server" 23 | "github.com/sirupsen/logrus" 24 | "google.golang.org/grpc" 25 | "google.golang.org/grpc/test/bufconn" 26 | ) 27 | 28 | const bufSize = 1024 * 1024 29 | 30 | // Declaring global variable. 31 | var ( 32 | logger = logrus.New() 33 | lis *bufconn.Listener 34 | s3Client *s3.S3 35 | s3Endpoint string 36 | ) 37 | 38 | func init() { 39 | lis = bufconn.Listen(bufSize) 40 | 41 | // Disable log output. 42 | logger.SetOutput(ioutil.Discard) 43 | uploadServer := server.NewServer(logger) 44 | 45 | s3Client = uploadServer.GetHandler().GetService().GetS3Client() 46 | s3Endpoint = s3Client.Endpoint 47 | 48 | go uploadServer.Serve(lis) 49 | 50 | if err := test.EmptyAndDeleteBucket(s3Client, "testbucket"); err != nil { 51 | log.Printf("test.EmptyAndDeleteBucket failed with error: %v", err) 52 | } 53 | if err := test.EmptyAndDeleteBucket(s3Client, "testbucket1"); err != nil { 54 | log.Printf("test.EmptyAndDeleteBucket failed with error: %v", err) 55 | } 56 | } 57 | 58 | func bufDialer(context.Context, string) (net.Conn, error) { 59 | return lis.Dial() 60 | } 61 | 62 | func TestService_UploadFile(t *testing.T) { 63 | metadata := make(map[string]*string) 64 | metadata["test"] = aws.String("testt") 65 | type fields struct { 66 | s3Client *s3.S3 67 | } 68 | type args struct { 69 | file io.Reader 70 | key *string 71 | bucket *string 72 | contentType *string 73 | metadata map[string]*string 74 | ctx context.Context 75 | } 76 | 77 | tests := []struct { 78 | name string 79 | fields fields 80 | args args 81 | want *string 82 | wantErr bool 83 | }{ 84 | { 85 | name: "upload text file", 86 | fields: fields{s3Client: s3Client}, 87 | args: args{ 88 | key: aws.String("testfile.txt"), 89 | bucket: aws.String("testbucket"), 90 | file: bytes.NewReader([]byte("Hello, World!")), 91 | contentType: aws.String("text/plain"), 92 | metadata: nil, 93 | ctx: context.Background(), 94 | }, 95 | wantErr: false, 96 | want: aws.String(fmt.Sprintf("%s/testbucket/testfile.txt", s3Endpoint)), 97 | }, 98 | { 99 | name: "upload text file in a folder", 100 | fields: fields{s3Client: s3Client}, 101 | args: args{ 102 | key: aws.String("testfolder/testfile.txt"), 103 | bucket: aws.String("testbucket"), 104 | file: bytes.NewReader([]byte("Hello, World!")), 105 | contentType: aws.String("text/plain"), 106 | metadata: metadata, 107 | ctx: context.Background(), 108 | }, 109 | wantErr: false, 110 | want: aws.String(fmt.Sprintf("%s/testbucket/testfolder/testfile.txt", s3Endpoint)), 111 | }, 112 | { 113 | name: "upload text file with empty key", 114 | fields: fields{s3Client: s3Client}, 115 | args: args{ 116 | key: aws.String(""), 117 | bucket: aws.String("testbucket"), 118 | file: bytes.NewReader([]byte("Hello, World!")), 119 | contentType: aws.String("text/plain"), 120 | metadata: nil, 121 | ctx: context.Background(), 122 | }, 123 | wantErr: true, 124 | }, 125 | { 126 | name: "upload text file with empty bucket", 127 | fields: fields{s3Client: s3Client}, 128 | args: args{ 129 | key: aws.String("testfile.txt"), 130 | bucket: aws.String(""), 131 | file: bytes.NewReader([]byte("Hello, World!")), 132 | contentType: aws.String("text/plain"), 133 | metadata: nil, 134 | ctx: context.Background(), 135 | }, 136 | wantErr: true, 137 | }, 138 | { 139 | name: "upload text file with nil key", 140 | fields: fields{s3Client: s3Client}, 141 | args: args{ 142 | key: nil, 143 | bucket: aws.String("testbucket"), 144 | file: bytes.NewReader([]byte("Hello, World!")), 145 | contentType: aws.String("text/plain"), 146 | metadata: nil, 147 | ctx: context.Background(), 148 | }, 149 | wantErr: true, 150 | }, 151 | { 152 | name: "upload text file with nil bucket", 153 | fields: fields{s3Client: s3Client}, 154 | args: args{ 155 | key: aws.String("testfile.txt"), 156 | bucket: nil, 157 | file: bytes.NewReader([]byte("Hello, World!")), 158 | contentType: aws.String("text/plain"), 159 | metadata: nil, 160 | ctx: context.Background(), 161 | }, 162 | wantErr: true, 163 | }, 164 | { 165 | name: "upload nil file", 166 | fields: fields{s3Client: s3Client}, 167 | args: args{ 168 | key: aws.String("testfile.txt"), 169 | bucket: aws.String("testbucket"), 170 | contentType: aws.String("text/plain"), 171 | file: nil, 172 | metadata: nil, 173 | ctx: context.Background(), 174 | }, 175 | wantErr: true, 176 | }, 177 | } 178 | 179 | for _, tt := range tests { 180 | t.Run(tt.name, func(t *testing.T) { 181 | s := object.NewService(tt.fields.s3Client) 182 | 183 | got, err := s.UploadFile( 184 | tt.args.ctx, 185 | tt.args.file, 186 | tt.args.key, 187 | tt.args.bucket, 188 | tt.args.contentType, 189 | tt.args.metadata, 190 | ) 191 | if (err != nil) != tt.wantErr { 192 | t.Errorf("UploadService.UploadFile() error = %v, wantErr %v", err, tt.wantErr) 193 | return 194 | } 195 | 196 | if got != nil && *got != *tt.want { 197 | t.Errorf("UploadService.UploadFile() = %v, want %v", got, tt.want) 198 | } 199 | }) 200 | } 201 | } 202 | 203 | func TestHandler_UploadMedia(t *testing.T) { 204 | hugefile := make([]byte, 5<<20) 205 | if _, err := rand.Read(hugefile); err != nil { 206 | t.Errorf("Could not generate file with error: %v", err) 207 | } 208 | 209 | uploadservice := object.NewService(s3Client) 210 | 211 | type fields struct { 212 | UploadService *object.Service 213 | } 214 | type args struct { 215 | ctx context.Context 216 | request *pb.UploadMediaRequest 217 | } 218 | tests := []struct { 219 | name string 220 | fields fields 221 | args args 222 | want *pb.UploadMediaResponse 223 | wantErr bool 224 | }{ 225 | { 226 | name: "UploadMedia - text file", 227 | fields: fields{UploadService: uploadservice}, 228 | args: args{ 229 | ctx: context.Background(), 230 | request: &pb.UploadMediaRequest{ 231 | Key: "testfile.txt", 232 | ContentType: "text/plain", 233 | Bucket: "testbucket", 234 | File: []byte("Hello, World!"), 235 | }, 236 | }, 237 | wantErr: false, 238 | want: &pb.UploadMediaResponse{ 239 | Location: fmt.Sprintf("%s/testbucket/testfile.txt", s3Endpoint), 240 | }, 241 | }, 242 | { 243 | name: "UploadMedia - text file - without key", 244 | fields: fields{UploadService: uploadservice}, 245 | args: args{ 246 | ctx: context.Background(), 247 | request: &pb.UploadMediaRequest{ 248 | Key: "", 249 | ContentType: "text/plain", 250 | Bucket: "testbucket", 251 | File: []byte("Hello, World!"), 252 | }, 253 | }, 254 | wantErr: true, 255 | }, 256 | { 257 | name: "UploadMedia - text file - without bucket", 258 | fields: fields{UploadService: uploadservice}, 259 | args: args{ 260 | ctx: context.Background(), 261 | request: &pb.UploadMediaRequest{ 262 | Key: "testfile.txt", 263 | ContentType: "text/plain", 264 | Bucket: "", 265 | File: []byte("Hello, World!"), 266 | }, 267 | }, 268 | wantErr: true, 269 | }, 270 | { 271 | name: "UploadMedia - text file - with nil file", 272 | fields: fields{UploadService: uploadservice}, 273 | args: args{ 274 | ctx: context.Background(), 275 | request: &pb.UploadMediaRequest{ 276 | Key: "testfile.txt", 277 | ContentType: "text/plain", 278 | Bucket: "testbucket", 279 | File: nil, 280 | }, 281 | }, 282 | wantErr: false, 283 | want: &pb.UploadMediaResponse{ 284 | Location: fmt.Sprintf("%s/testbucket/testfile.txt", s3Endpoint), 285 | }, 286 | }, 287 | { 288 | name: "UploadMedia - text file - huge file", 289 | fields: fields{UploadService: uploadservice}, 290 | args: args{ 291 | ctx: context.Background(), 292 | request: &pb.UploadMediaRequest{ 293 | Key: "testfile.txt", 294 | ContentType: "text/plain", 295 | Bucket: "testbucket", 296 | File: hugefile, 297 | }, 298 | }, 299 | wantErr: false, 300 | want: &pb.UploadMediaResponse{ 301 | Location: fmt.Sprintf("%s/testbucket/testfile.txt", s3Endpoint), 302 | }, 303 | }, 304 | } 305 | 306 | // Create connection to server 307 | ctx := context.Background() 308 | conn, err := grpc.DialContext(ctx, "bufnet", grpc.WithContextDialer(bufDialer), grpc.WithInsecure()) 309 | if err != nil { 310 | t.Fatalf("Failed to dial bufnet: %v", err) 311 | } 312 | defer conn.Close() 313 | 314 | // Create client 315 | client := pb.NewUploadClient(conn) 316 | 317 | // Iterate over test cases 318 | for _, tt := range tests { 319 | t.Run(tt.name, func(t *testing.T) { 320 | got, err := client.UploadMedia(tt.args.ctx, tt.args.request) 321 | if (err != nil) != tt.wantErr { 322 | t.Errorf("UploadHandler.UploadMedia() error = %v, wantErr %v", err, tt.wantErr) 323 | return 324 | } 325 | if got != nil && !reflect.DeepEqual(got, tt.want) { 326 | t.Errorf("UploadHandler.UploadMedia() = %v, want %v", got, tt.want) 327 | } 328 | }) 329 | } 330 | } 331 | 332 | func TestService_UploadInit(t *testing.T) { 333 | metadata := make(map[string]*string) 334 | metadata["test"] = aws.String("testt") 335 | type fields struct { 336 | s3Client *s3.S3 337 | } 338 | type args struct { 339 | key *string 340 | bucket *string 341 | contentType *string 342 | metadata map[string]*string 343 | ctx context.Context 344 | } 345 | tests := []struct { 346 | name string 347 | fields fields 348 | args args 349 | want *s3.CreateMultipartUploadOutput 350 | wantErr bool 351 | }{ 352 | { 353 | name: "init upload", 354 | fields: fields{s3Client: s3Client}, 355 | args: args{ 356 | key: aws.String("testfile.txt"), 357 | bucket: aws.String("testbucket"), 358 | contentType: aws.String("text/plain"), 359 | metadata: metadata, 360 | ctx: context.Background(), 361 | }, 362 | wantErr: false, 363 | want: &s3.CreateMultipartUploadOutput{ 364 | Bucket: aws.String("testbucket"), 365 | Key: aws.String("testfile.txt"), 366 | }, 367 | }, 368 | { 369 | name: "init upload in folder", 370 | fields: fields{s3Client: s3Client}, 371 | args: args{ 372 | key: aws.String("testfolder/testfile.txt"), 373 | contentType: aws.String("text/plain"), 374 | bucket: aws.String("testbucket"), 375 | metadata: metadata, 376 | ctx: context.Background(), 377 | }, 378 | wantErr: false, 379 | want: &s3.CreateMultipartUploadOutput{ 380 | Bucket: aws.String("testbucket"), 381 | Key: aws.String("testfolder/testfile.txt"), 382 | }, 383 | }, 384 | { 385 | name: "init upload with missing key", 386 | fields: fields{s3Client: s3Client}, 387 | args: args{ 388 | key: aws.String(""), 389 | bucket: aws.String("testbucket"), 390 | contentType: aws.String("text/plain"), 391 | metadata: metadata, 392 | ctx: context.Background(), 393 | }, 394 | wantErr: true, 395 | }, 396 | { 397 | name: "init upload with nil key", 398 | fields: fields{s3Client: s3Client}, 399 | args: args{ 400 | key: nil, 401 | bucket: aws.String("testbucket"), 402 | contentType: aws.String("text/plain"), 403 | metadata: metadata, 404 | ctx: context.Background(), 405 | }, 406 | wantErr: true, 407 | }, 408 | { 409 | name: "init upload with missing bucket", 410 | fields: fields{s3Client: s3Client}, 411 | args: args{ 412 | key: aws.String("testfile.txt"), 413 | bucket: aws.String(""), 414 | contentType: aws.String("text/plain"), 415 | metadata: metadata, 416 | ctx: context.Background(), 417 | }, 418 | wantErr: true, 419 | }, 420 | { 421 | name: "init upload with nil bucket", 422 | fields: fields{s3Client: s3Client}, 423 | args: args{ 424 | key: aws.String("testfile.txt"), 425 | bucket: nil, 426 | contentType: aws.String("text/plain"), 427 | metadata: metadata, 428 | ctx: context.Background(), 429 | }, 430 | wantErr: true, 431 | }, 432 | { 433 | name: "init upload with empty metadata", 434 | fields: fields{s3Client: s3Client}, 435 | args: args{ 436 | key: aws.String("testfile.txt"), 437 | bucket: aws.String("testbucket"), 438 | contentType: aws.String("text/plain"), 439 | metadata: aws.StringMap(make(map[string]string)), 440 | ctx: context.Background(), 441 | }, 442 | wantErr: false, 443 | want: &s3.CreateMultipartUploadOutput{ 444 | Bucket: aws.String("testbucket"), 445 | Key: aws.String("testfile.txt"), 446 | }, 447 | }, 448 | { 449 | name: "init upload with nil metadata", 450 | fields: fields{s3Client: s3Client}, 451 | args: args{ 452 | key: aws.String("testfile.txt"), 453 | bucket: aws.String("testbucket"), 454 | contentType: aws.String("text/plain"), 455 | metadata: nil, 456 | ctx: context.Background(), 457 | }, 458 | wantErr: false, 459 | want: &s3.CreateMultipartUploadOutput{ 460 | Bucket: aws.String("testbucket"), 461 | Key: aws.String("testfile.txt"), 462 | }, 463 | }, 464 | } 465 | for _, tt := range tests { 466 | t.Run(tt.name, func(t *testing.T) { 467 | s := object.NewService(tt.fields.s3Client) 468 | 469 | got, err := s.UploadInit(tt.args.ctx, tt.args.key, tt.args.bucket, tt.args.contentType, tt.args.metadata) 470 | if (err != nil) != tt.wantErr { 471 | t.Errorf("UploadService.UploadInit() error = %v, wantErr %v", err, tt.wantErr) 472 | return 473 | } 474 | if !reflect.DeepEqual(got, tt.want) && got.UploadId == nil { 475 | t.Errorf("UploadService.UploadInit() = %v, want %v", got, tt.want) 476 | } 477 | }) 478 | } 479 | } 480 | 481 | func TestHandler_UploadInit(t *testing.T) { 482 | metadata := make(map[string]string) 483 | metadata["test"] = "testt" 484 | uploadservice := object.NewService(s3Client) 485 | 486 | type fields struct { 487 | UploadService *object.Service 488 | } 489 | type args struct { 490 | ctx context.Context 491 | request *pb.UploadInitRequest 492 | } 493 | tests := []struct { 494 | name string 495 | fields fields 496 | args args 497 | want *pb.UploadInitResponse 498 | wantErr bool 499 | }{ 500 | { 501 | name: "UploadInit", 502 | fields: fields{UploadService: uploadservice}, 503 | args: args{ 504 | ctx: context.Background(), 505 | request: &pb.UploadInitRequest{ 506 | Key: "testfile.txt", 507 | ContentType: "text/plain", 508 | Bucket: "testbucket", 509 | Metadata: metadata, 510 | }, 511 | }, 512 | wantErr: false, 513 | want: &pb.UploadInitResponse{ 514 | Key: "testfile.txt", 515 | Bucket: "testbucket", 516 | }, 517 | }, 518 | { 519 | name: "UploadInit folder", 520 | fields: fields{UploadService: uploadservice}, 521 | args: args{ 522 | ctx: context.Background(), 523 | request: &pb.UploadInitRequest{ 524 | Key: "testfolder/testfile.txt", 525 | ContentType: "text/plain", 526 | Bucket: "testbucket", 527 | Metadata: metadata, 528 | }, 529 | }, 530 | wantErr: false, 531 | want: &pb.UploadInitResponse{ 532 | Key: "testfolder/testfile.txt", 533 | Bucket: "testbucket", 534 | }, 535 | }, 536 | { 537 | name: "UploadInit with empty key", 538 | fields: fields{UploadService: uploadservice}, 539 | args: args{ 540 | ctx: context.Background(), 541 | request: &pb.UploadInitRequest{ 542 | Key: "", 543 | ContentType: "text/plain", 544 | Bucket: "testbucket", 545 | Metadata: metadata, 546 | }, 547 | }, 548 | wantErr: true, 549 | }, 550 | { 551 | name: "UploadInit with empty bucket", 552 | fields: fields{UploadService: uploadservice}, 553 | args: args{ 554 | ctx: context.Background(), 555 | request: &pb.UploadInitRequest{ 556 | ContentType: "text/plain", 557 | Key: "testfile.txt", 558 | Bucket: "", 559 | Metadata: metadata, 560 | }, 561 | }, 562 | wantErr: true, 563 | }, 564 | { 565 | name: "UploadInit with empty metadata", 566 | fields: fields{UploadService: uploadservice}, 567 | args: args{ 568 | ctx: context.Background(), 569 | request: &pb.UploadInitRequest{ 570 | ContentType: "text/plain", 571 | Key: "testfile.txt", 572 | Bucket: "testbucket", 573 | Metadata: make(map[string]string), 574 | }, 575 | }, 576 | wantErr: false, 577 | want: &pb.UploadInitResponse{ 578 | Key: "testfile.txt", 579 | Bucket: "testbucket", 580 | }, 581 | }, 582 | { 583 | name: "UploadInit with nil metadata", 584 | fields: fields{UploadService: uploadservice}, 585 | args: args{ 586 | ctx: context.Background(), 587 | request: &pb.UploadInitRequest{ 588 | ContentType: "text/plain", 589 | Key: "testfile.txt", 590 | Bucket: "testbucket", 591 | Metadata: nil, 592 | }, 593 | }, 594 | wantErr: false, 595 | want: &pb.UploadInitResponse{ 596 | Key: "testfile.txt", 597 | Bucket: "testbucket", 598 | }, 599 | }, 600 | } 601 | 602 | // Create connection to server 603 | ctx := context.Background() 604 | conn, err := grpc.DialContext(ctx, "bufnet", grpc.WithContextDialer(bufDialer), grpc.WithInsecure()) 605 | if err != nil { 606 | t.Fatalf("Failed to dial bufnet: %v", err) 607 | } 608 | defer conn.Close() 609 | 610 | // Create client 611 | client := pb.NewUploadClient(conn) 612 | 613 | for _, tt := range tests { 614 | t.Run(tt.name, func(t *testing.T) { 615 | got, err := client.UploadInit(tt.args.ctx, tt.args.request) 616 | if (err != nil) != tt.wantErr { 617 | t.Errorf("UploadHandler.UploadInit() error = %v, wantErr %v", err, tt.wantErr) 618 | return 619 | } 620 | if !reflect.DeepEqual(got, tt.want) && got.UploadId == "" { 621 | t.Errorf("UploadHandler.UploadInit() = %v, want %v", got, tt.want) 622 | } 623 | }) 624 | } 625 | } 626 | 627 | //nolint:gocyclo 628 | func TestService_UploadPart(t *testing.T) { 629 | metadata := make(map[string]*string) 630 | metadata["test"] = aws.String("meta") 631 | file := make([]byte, 50<<20) 632 | if _, err := rand.Read(file); err != nil { 633 | t.Errorf("Could not generate file with error: %v", err) 634 | } 635 | fileReader := bytes.NewReader(file) 636 | 637 | type fields struct { 638 | s3Client *s3.S3 639 | } 640 | type args struct { 641 | initKey *string 642 | initBucket *string 643 | key *string 644 | bucket *string 645 | partNumber *int64 646 | body io.ReadSeeker 647 | ctx context.Context 648 | } 649 | tests := []struct { 650 | name string 651 | fields fields 652 | args args 653 | wantErr bool 654 | }{ 655 | { 656 | name: "upload part", 657 | fields: fields{s3Client: s3Client}, 658 | args: args{ 659 | initKey: aws.String("partfile.txt"), 660 | initBucket: aws.String("testbucket"), 661 | key: aws.String("partfile.txt"), 662 | bucket: aws.String("testbucket"), 663 | partNumber: aws.Int64(1), 664 | body: fileReader, 665 | ctx: context.Background(), 666 | }, 667 | wantErr: false, 668 | }, 669 | { 670 | name: "upload part in folder", 671 | fields: fields{s3Client: s3Client}, 672 | args: args{ 673 | initKey: aws.String("testfolder/partfile.txt"), 674 | initBucket: aws.String("testbucket"), 675 | key: aws.String("testfolder/partfile.txt"), 676 | bucket: aws.String("testbucket"), 677 | partNumber: aws.Int64(1), 678 | body: fileReader, 679 | ctx: context.Background(), 680 | }, 681 | wantErr: false, 682 | }, 683 | { 684 | name: "upload part with empty key", 685 | fields: fields{s3Client: s3Client}, 686 | args: args{ 687 | initKey: aws.String("partfile1.txt"), 688 | initBucket: aws.String("testbucket"), 689 | key: aws.String(""), 690 | bucket: aws.String("testbucket"), 691 | partNumber: aws.Int64(1), 692 | body: fileReader, 693 | ctx: context.Background(), 694 | }, 695 | wantErr: true, 696 | }, 697 | { 698 | name: "upload part with nil key", 699 | fields: fields{s3Client: s3Client}, 700 | args: args{ 701 | initKey: aws.String("partfile2.txt"), 702 | initBucket: aws.String("testbucket"), 703 | key: nil, 704 | bucket: aws.String("testbucket"), 705 | partNumber: aws.Int64(1), 706 | body: fileReader, 707 | ctx: context.Background(), 708 | }, 709 | wantErr: true, 710 | }, 711 | { 712 | name: "upload part with key mismatch", 713 | fields: fields{s3Client: s3Client}, 714 | args: args{ 715 | initKey: aws.String("partfile3.txt"), 716 | initBucket: aws.String("testbucket"), 717 | key: aws.String("partfile.txt"), 718 | bucket: aws.String("testbucket"), 719 | partNumber: aws.Int64(1), 720 | body: fileReader, 721 | ctx: context.Background(), 722 | }, 723 | wantErr: true, 724 | }, 725 | { 726 | name: "upload part with empty bucket", 727 | fields: fields{s3Client: s3Client}, 728 | args: args{ 729 | initKey: aws.String("partfile4.txt"), 730 | initBucket: aws.String("testbucket"), 731 | key: aws.String("partfile4.txt"), 732 | bucket: aws.String(""), 733 | partNumber: aws.Int64(1), 734 | body: fileReader, 735 | ctx: context.Background(), 736 | }, 737 | wantErr: true, 738 | }, 739 | { 740 | name: "upload part with nil bucket", 741 | fields: fields{s3Client: s3Client}, 742 | args: args{ 743 | initKey: aws.String("partfile5.txt"), 744 | initBucket: aws.String("testbucket"), 745 | key: aws.String("partfile5.txt"), 746 | bucket: nil, 747 | partNumber: aws.Int64(1), 748 | body: fileReader, 749 | ctx: context.Background(), 750 | }, 751 | wantErr: true, 752 | }, 753 | { 754 | name: "upload part with bucket mismatch", 755 | fields: fields{s3Client: s3Client}, 756 | args: args{ 757 | initKey: aws.String("partfile6.txt"), 758 | initBucket: aws.String("testbucket"), 759 | key: aws.String("partfile6.txt"), 760 | bucket: aws.String("testbucket1"), 761 | partNumber: aws.Int64(1), 762 | body: fileReader, 763 | ctx: context.Background(), 764 | }, 765 | wantErr: true, 766 | }, 767 | { 768 | name: "upload part with nil body", 769 | fields: fields{s3Client: s3Client}, 770 | args: args{ 771 | initKey: aws.String("partfile6.txt"), 772 | initBucket: aws.String("testbucket"), 773 | key: aws.String("partfile6.txt"), 774 | bucket: aws.String("testbucket"), 775 | partNumber: aws.Int64(1), 776 | body: nil, 777 | ctx: context.Background(), 778 | }, 779 | wantErr: true, 780 | }, 781 | { 782 | name: "upload part without part number", 783 | fields: fields{s3Client: s3Client}, 784 | args: args{ 785 | initKey: aws.String("partfile8.txt"), 786 | initBucket: aws.String("testbucket"), 787 | key: aws.String("partfile8.txt"), 788 | bucket: aws.String("testbucket"), 789 | partNumber: nil, 790 | body: fileReader, 791 | ctx: context.Background(), 792 | }, 793 | wantErr: true, 794 | }, 795 | { 796 | name: "upload part with part number lower than 1", 797 | fields: fields{s3Client: s3Client}, 798 | args: args{ 799 | initKey: aws.String("partfile7.txt"), 800 | initBucket: aws.String("testbucket"), 801 | key: aws.String("partfile7.txt"), 802 | bucket: aws.String("testbucket"), 803 | partNumber: aws.Int64(0), 804 | body: fileReader, 805 | ctx: context.Background(), 806 | }, 807 | wantErr: true, 808 | }, 809 | { 810 | name: "upload part with part number greater than 10000", 811 | fields: fields{s3Client: s3Client}, 812 | args: args{ 813 | initKey: aws.String("partfile7.txt"), 814 | initBucket: aws.String("testbucket"), 815 | key: aws.String("partfile7.txt"), 816 | bucket: aws.String("testbucket"), 817 | partNumber: aws.Int64(10001), 818 | body: fileReader, 819 | ctx: context.Background(), 820 | }, 821 | wantErr: true, 822 | }, 823 | } 824 | for _, tt := range tests { 825 | t.Run(tt.name, func(t *testing.T) { 826 | s := object.NewService(tt.fields.s3Client) 827 | 828 | initOutput, err := s.UploadInit( 829 | tt.args.ctx, 830 | tt.args.initKey, 831 | tt.args.initBucket, 832 | aws.String("text/plain"), 833 | metadata, 834 | ) 835 | if err != nil { 836 | t.Errorf("UploadService.UploadInit() error = %v, wantErr %v", err, tt.wantErr) 837 | return 838 | } 839 | got, err := s.UploadPart( 840 | tt.args.ctx, 841 | initOutput.UploadId, 842 | tt.args.key, 843 | tt.args.bucket, 844 | tt.args.partNumber, 845 | tt.args.body, 846 | ) 847 | if (err != nil) != tt.wantErr { 848 | t.Errorf("UploadService.UploadPart() error = %v, wantErr %v", err, tt.wantErr) 849 | return 850 | } 851 | if (got == nil || got.ETag == nil || *got.ETag == "") != tt.wantErr { 852 | t.Errorf("UploadService.UploadPart() = %v", got) 853 | } 854 | }) 855 | } 856 | 857 | t.Run("UploadPart - nil UploadID", func(t *testing.T) { 858 | s := object.NewService(s3Client) 859 | 860 | ctx := context.Background() 861 | got, err := s.UploadPart( 862 | ctx, 863 | nil, 864 | aws.String("testfile10.txt"), 865 | aws.String("testbucket"), 866 | aws.Int64(1), 867 | fileReader, 868 | ) 869 | if err == nil { 870 | t.Errorf("UploadService.UploadPart() error = %v, wantErr %v", err, true) 871 | return 872 | } 873 | if got != nil && (got.ETag != nil || *got.ETag != "") { 874 | t.Errorf("UploadService.UploadPart() = %v", got) 875 | } 876 | }) 877 | t.Run("UploadPart - empty UploadID", func(t *testing.T) { 878 | s := object.NewService(s3Client) 879 | 880 | ctx := context.Background() 881 | got, err := s.UploadPart( 882 | ctx, 883 | aws.String(""), 884 | aws.String("testfile10.txt"), 885 | aws.String("testbucket"), 886 | aws.Int64(1), 887 | fileReader, 888 | ) 889 | if err == nil { 890 | t.Errorf("UploadService.UploadPart() error = %v, wantErr %v", err, true) 891 | return 892 | } 893 | if got != nil && (got.ETag != nil || *got.ETag != "") { 894 | t.Errorf("UploadService.UploadPart() = %v", got) 895 | } 896 | }) 897 | } 898 | 899 | func TestService_UploadComplete(t *testing.T) { 900 | metadata := make(map[string]*string) 901 | metadata["test"] = aws.String("meta") 902 | file := make([]byte, 50<<20) 903 | if _, err := rand.Read(file); err != nil { 904 | t.Errorf("Could not generate file with error: %v", err) 905 | } 906 | fileReader := bytes.NewReader(file) 907 | 908 | type fields struct { 909 | s3Client *s3.S3 910 | } 911 | type args struct { 912 | initKey *string 913 | initBucket *string 914 | key *string 915 | bucket *string 916 | ctx context.Context 917 | } 918 | tests := []struct { 919 | name string 920 | fields fields 921 | args args 922 | wantErr bool 923 | }{ 924 | { 925 | name: "Upload Complete", 926 | fields: fields{s3Client: s3Client}, 927 | args: args{ 928 | initKey: aws.String("file.txt"), 929 | initBucket: aws.String("testbucket"), 930 | key: aws.String("file.txt"), 931 | bucket: aws.String("testbucket"), 932 | ctx: context.Background(), 933 | }, 934 | wantErr: false, 935 | }, 936 | { 937 | name: "Upload Complete to folder", 938 | fields: fields{s3Client: s3Client}, 939 | args: args{ 940 | initKey: aws.String("testfolder/file.txt"), 941 | initBucket: aws.String("testbucket"), 942 | key: aws.String("testfolder/file.txt"), 943 | bucket: aws.String("testbucket"), 944 | ctx: context.Background(), 945 | }, 946 | wantErr: false, 947 | }, 948 | { 949 | name: "Upload Complete with empty key", 950 | fields: fields{s3Client: s3Client}, 951 | args: args{ 952 | initKey: aws.String("file.txt"), 953 | initBucket: aws.String("testbucket"), 954 | key: aws.String(""), 955 | bucket: aws.String("testbucket"), 956 | ctx: context.Background(), 957 | }, 958 | wantErr: true, 959 | }, 960 | { 961 | name: "Upload Complete with nil key", 962 | fields: fields{s3Client: s3Client}, 963 | args: args{ 964 | initKey: aws.String("file.txt"), 965 | initBucket: aws.String("testbucket"), 966 | key: nil, 967 | bucket: aws.String("testbucket"), 968 | ctx: context.Background(), 969 | }, 970 | wantErr: true, 971 | }, 972 | { 973 | name: "Upload Complete with key mismatch", 974 | fields: fields{s3Client: s3Client}, 975 | args: args{ 976 | initKey: aws.String("file.txt"), 977 | initBucket: aws.String("testbucket"), 978 | key: aws.String("file1.txt"), 979 | bucket: aws.String("testbucket"), 980 | ctx: context.Background(), 981 | }, 982 | wantErr: true, 983 | }, 984 | { 985 | name: "Upload Complete with empty bucket", 986 | fields: fields{s3Client: s3Client}, 987 | args: args{ 988 | initKey: aws.String("file.txt"), 989 | initBucket: aws.String("testbucket"), 990 | key: aws.String("file.txt"), 991 | bucket: aws.String(""), 992 | ctx: context.Background(), 993 | }, 994 | wantErr: true, 995 | }, 996 | { 997 | name: "Upload Complete with nil bucket", 998 | fields: fields{s3Client: s3Client}, 999 | args: args{ 1000 | initKey: aws.String("file.txt"), 1001 | initBucket: aws.String("testbucket"), 1002 | key: aws.String("file.txt"), 1003 | bucket: nil, 1004 | ctx: context.Background(), 1005 | }, 1006 | wantErr: true, 1007 | }, 1008 | { 1009 | name: "Upload Complete with bucket mismatch", 1010 | fields: fields{s3Client: s3Client}, 1011 | args: args{ 1012 | initKey: aws.String("file.txt"), 1013 | initBucket: aws.String("testbucket"), 1014 | key: aws.String("file1.txt"), 1015 | bucket: aws.String("testbucket1"), 1016 | ctx: context.Background(), 1017 | }, 1018 | wantErr: true, 1019 | }, 1020 | } 1021 | for _, tt := range tests { 1022 | t.Run(tt.name, func(t *testing.T) { 1023 | s := object.NewService(tt.fields.s3Client) 1024 | 1025 | initOutput, err := s.UploadInit( 1026 | tt.args.ctx, 1027 | tt.args.initKey, 1028 | tt.args.initBucket, 1029 | aws.String("text/plain"), 1030 | metadata, 1031 | ) 1032 | if err != nil { 1033 | t.Errorf("UploadService.UploadInit() error = %v, wantErr %v", err, tt.wantErr) 1034 | return 1035 | } 1036 | 1037 | _, err = s.UploadPart( 1038 | tt.args.ctx, 1039 | initOutput.UploadId, 1040 | tt.args.initKey, 1041 | tt.args.initBucket, 1042 | aws.Int64(1), 1043 | fileReader) 1044 | 1045 | if err != nil { 1046 | t.Errorf("UploadService.UploadPart() error = %v, wantErr %v", err, tt.wantErr) 1047 | return 1048 | } 1049 | 1050 | got, err := s.UploadComplete(tt.args.ctx, initOutput.UploadId, tt.args.key, tt.args.bucket) 1051 | if (err != nil) != tt.wantErr { 1052 | t.Errorf("UploadService.UploadComplete() error = %v, wantErr %v", err, tt.wantErr) 1053 | return 1054 | } 1055 | if (got == nil) != tt.wantErr { 1056 | t.Errorf("UploadService.UploadComplete() = %v", got) 1057 | } 1058 | }) 1059 | } 1060 | t.Run("UploadComplete - empty uploadID ", func(t *testing.T) { 1061 | s := object.NewService(s3Client) 1062 | 1063 | ctx := context.Background() 1064 | got, err := s.UploadComplete(ctx, aws.String(""), aws.String("tests.txt"), aws.String("testbucket")) 1065 | if err == nil { 1066 | t.Errorf("UploadService.UploadComplete() error = %v, wantErr %v", err, true) 1067 | return 1068 | } 1069 | if got != nil { 1070 | t.Errorf("UploadService.UploadComplete() = %v", got) 1071 | return 1072 | } 1073 | }) 1074 | 1075 | t.Run("UploadComplete - nil uploadID ", func(t *testing.T) { 1076 | s := object.NewService(s3Client) 1077 | 1078 | ctx := context.Background() 1079 | got, err := s.UploadComplete(ctx, nil, aws.String("tests.txt"), aws.String("testbucket")) 1080 | if err == nil { 1081 | t.Errorf("UploadService.UploadComplete() error = %v, wantErr %v", err, true) 1082 | return 1083 | } 1084 | if got != nil { 1085 | t.Errorf("UploadService.UploadComplete() = %v", got) 1086 | return 1087 | } 1088 | }) 1089 | } 1090 | 1091 | func TestHandler_UploadMultipart(t *testing.T) { 1092 | // Init global values to use in tests. 1093 | file := make([]byte, 5<<20) 1094 | if _, err := rand.Read(file); err != nil { 1095 | t.Errorf("Could not generate file with error: %v", err) 1096 | } 1097 | metadata := make(map[string]string) 1098 | metadata["test"] = "testt" 1099 | 1100 | type args struct { 1101 | ctx context.Context 1102 | request *pb.UploadMultipartRequest 1103 | } 1104 | tests := []struct { 1105 | name string 1106 | args args 1107 | want *pb.UploadMultipartResponse 1108 | wantErr bool 1109 | }{ 1110 | { 1111 | name: "Upload Multipart", 1112 | args: args{ 1113 | ctx: context.Background(), 1114 | request: &pb.UploadMultipartRequest{ 1115 | Key: "testfile.txt", 1116 | Bucket: "testbucket", 1117 | ContentType: "text/plain", 1118 | File: file, 1119 | Metadata: metadata, 1120 | }, 1121 | }, 1122 | wantErr: false, 1123 | want: &pb.UploadMultipartResponse{ 1124 | Location: fmt.Sprintf("%s/testbucket/testfile.txt", s3Endpoint), 1125 | }, 1126 | }, 1127 | { 1128 | name: "Upload Multipart to folder", 1129 | args: args{ 1130 | ctx: context.Background(), 1131 | request: &pb.UploadMultipartRequest{ 1132 | Key: "testfolder/testfile.txt", 1133 | ContentType: "text/plain", 1134 | Bucket: "testbucket", 1135 | File: file, 1136 | Metadata: metadata, 1137 | }, 1138 | }, 1139 | wantErr: false, 1140 | want: &pb.UploadMultipartResponse{ 1141 | Location: fmt.Sprintf("%s/testbucket/testfolder/testfile.txt", s3Endpoint), 1142 | }, 1143 | }, 1144 | { 1145 | name: "Upload Multipart with empty key", 1146 | args: args{ 1147 | ctx: context.Background(), 1148 | request: &pb.UploadMultipartRequest{ 1149 | Key: "", 1150 | ContentType: "text/plain", 1151 | Bucket: "testbucket", 1152 | File: file, 1153 | Metadata: metadata, 1154 | }, 1155 | }, 1156 | wantErr: true, 1157 | }, 1158 | { 1159 | name: "Upload Multipart with empty bucket", 1160 | args: args{ 1161 | ctx: context.Background(), 1162 | request: &pb.UploadMultipartRequest{ 1163 | ContentType: "text/plain", 1164 | Key: "testfile.txt", 1165 | Bucket: "", 1166 | File: file, 1167 | Metadata: metadata, 1168 | }, 1169 | }, 1170 | wantErr: true, 1171 | }, 1172 | { 1173 | name: "Upload Multipart with nil file", 1174 | args: args{ 1175 | ctx: context.Background(), 1176 | request: &pb.UploadMultipartRequest{ 1177 | ContentType: "text/plain", 1178 | Key: "testfile.txt", 1179 | Bucket: "testbucket", 1180 | File: nil, 1181 | Metadata: metadata, 1182 | }, 1183 | }, 1184 | wantErr: false, 1185 | want: &pb.UploadMultipartResponse{ 1186 | Location: fmt.Sprintf("%s/testbucket/testfile.txt", s3Endpoint), 1187 | }, 1188 | }, 1189 | { 1190 | name: "Upload Multipart with empty file", 1191 | args: args{ 1192 | ctx: context.Background(), 1193 | request: &pb.UploadMultipartRequest{ 1194 | ContentType: "text/plain", 1195 | Key: "testfile.txt", 1196 | Bucket: "testbucket", 1197 | File: make([]byte, 0), 1198 | Metadata: metadata, 1199 | }, 1200 | }, 1201 | wantErr: false, 1202 | want: &pb.UploadMultipartResponse{ 1203 | Location: fmt.Sprintf("%s/testbucket/testfile.txt", s3Endpoint), 1204 | }, 1205 | }, 1206 | { 1207 | name: "Upload Multipart with nil metadata", 1208 | args: args{ 1209 | ctx: context.Background(), 1210 | request: &pb.UploadMultipartRequest{ 1211 | Key: "testfile.txt", 1212 | ContentType: "text/plain", 1213 | Bucket: "testbucket", 1214 | File: file, 1215 | Metadata: nil, 1216 | }, 1217 | }, 1218 | wantErr: true, 1219 | }, 1220 | { 1221 | name: "Upload Multipart with empty metadata", 1222 | args: args{ 1223 | ctx: context.Background(), 1224 | request: &pb.UploadMultipartRequest{ 1225 | Key: "testfile.txt", 1226 | ContentType: "text/plain", 1227 | Bucket: "testbucket", 1228 | File: file, 1229 | Metadata: make(map[string]string), 1230 | }, 1231 | }, 1232 | wantErr: true, 1233 | }, 1234 | } 1235 | 1236 | // Create connection to server 1237 | ctx := context.Background() 1238 | conn, err := grpc.DialContext(ctx, "bufnet", grpc.WithContextDialer(bufDialer), grpc.WithInsecure()) 1239 | if err != nil { 1240 | t.Fatalf("Failed to dial bufnet: %v", err) 1241 | } 1242 | defer conn.Close() 1243 | 1244 | // Create client 1245 | client := pb.NewUploadClient(conn) 1246 | 1247 | for _, tt := range tests { 1248 | t.Run(tt.name, func(t *testing.T) { 1249 | got, err := client.UploadMultipart(tt.args.ctx, tt.args.request) 1250 | if (err != nil) != tt.wantErr { 1251 | t.Errorf("UploadHandler.UploadMultipart() error = %v, wantErr %v", err, tt.wantErr) 1252 | return 1253 | } 1254 | if !reflect.DeepEqual(got, tt.want) { 1255 | t.Errorf("UploadHandler.UploadMultipart() = %v, want %v", got, tt.want) 1256 | } 1257 | }) 1258 | } 1259 | } 1260 | 1261 | func TestService_UploadAbort(t *testing.T) { 1262 | metadata := make(map[string]*string) 1263 | metadata["test"] = aws.String("testt") 1264 | file := make([]byte, 50<<20) 1265 | if _, err := rand.Read(file); err != nil { 1266 | t.Errorf("Could not generate file with error: %v", err) 1267 | } 1268 | fileReader := bytes.NewReader(file) 1269 | type fields struct { 1270 | s3Client *s3.S3 1271 | } 1272 | type args struct { 1273 | ctx aws.Context 1274 | key *string 1275 | bucket *string 1276 | } 1277 | tests := []struct { 1278 | name string 1279 | fields fields 1280 | args args 1281 | want bool 1282 | wantErr bool 1283 | }{ 1284 | { 1285 | name: "init upload", 1286 | fields: fields{s3Client: s3Client}, 1287 | args: args{ 1288 | key: aws.String("testfile.txt"), 1289 | bucket: aws.String("testbucket"), 1290 | ctx: context.Background(), 1291 | }, 1292 | wantErr: false, 1293 | want: true, 1294 | }, 1295 | } 1296 | for _, tt := range tests { 1297 | t.Run(tt.name, func(t *testing.T) { 1298 | s := object.NewService(tt.fields.s3Client) 1299 | 1300 | initOutput, err := s.UploadInit(tt.args.ctx, tt.args.key, tt.args.bucket, aws.String("text/plain"), metadata) 1301 | if err != nil { 1302 | t.Errorf("UploadService.UploadInit() error = %v, wantErr %v", err, tt.wantErr) 1303 | return 1304 | } 1305 | 1306 | _, err = s.UploadPart( 1307 | tt.args.ctx, 1308 | initOutput.UploadId, 1309 | tt.args.key, 1310 | tt.args.bucket, 1311 | aws.Int64(1), 1312 | fileReader, 1313 | ) 1314 | if (err != nil) != tt.wantErr { 1315 | t.Errorf("UploadService.UploadPart() error = %v, wantErr %v", err, tt.wantErr) 1316 | return 1317 | } 1318 | 1319 | got, err := s.UploadAbort(tt.args.ctx, initOutput.UploadId, tt.args.key, tt.args.bucket) 1320 | if (err != nil) != tt.wantErr { 1321 | t.Errorf("UploadService.UploadAbort() error = %v, wantErr %v", err, tt.wantErr) 1322 | return 1323 | } 1324 | if got != tt.want { 1325 | t.Errorf("UploadService.UploadAbort() = %v, want %v", got, tt.want) 1326 | } 1327 | }) 1328 | } 1329 | } 1330 | 1331 | func TestService_DeleteObjects(t *testing.T) { 1332 | uploadservice := object.NewService(s3Client) 1333 | key1, err := uploadservice.UploadFile( 1334 | context.Background(), 1335 | bytes.NewReader([]byte("Hello, World!")), 1336 | aws.String("file1"), 1337 | aws.String("testbucket"), 1338 | aws.String("text/plain"), 1339 | nil, 1340 | ) 1341 | if err != nil { 1342 | t.Errorf("Could not create file with error: %v", err) 1343 | } 1344 | key2, err := uploadservice.UploadFile( 1345 | context.Background(), 1346 | bytes.NewReader([]byte("Hello, World!")), 1347 | aws.String("file2"), 1348 | aws.String("testbucket"), 1349 | aws.String("text/plain"), 1350 | nil, 1351 | ) 1352 | if err != nil { 1353 | t.Errorf("Could not create file with error: %v", err) 1354 | } 1355 | key3, err := uploadservice.UploadFile( 1356 | context.Background(), 1357 | bytes.NewReader([]byte("Hello, World!")), 1358 | aws.String("file3"), 1359 | aws.String("testbucket"), 1360 | aws.String("text/plain"), 1361 | nil, 1362 | ) 1363 | if err != nil { 1364 | t.Errorf("Could not create file with error: %v", err) 1365 | } 1366 | key4, err := uploadservice.UploadFile( 1367 | context.Background(), 1368 | bytes.NewReader([]byte("Hello, World!")), 1369 | aws.String("file4"), 1370 | aws.String("testbucket"), 1371 | aws.String("text/plain"), 1372 | nil, 1373 | ) 1374 | if err != nil { 1375 | t.Errorf("Could not create file with error: %v", err) 1376 | } 1377 | type fields struct { 1378 | s3Client *s3.S3 1379 | } 1380 | type args struct { 1381 | ctx aws.Context 1382 | bucket *string 1383 | keys []*string 1384 | } 1385 | tests := []struct { 1386 | name string 1387 | fields fields 1388 | args args 1389 | wantSuccess []string 1390 | wantFailed []string 1391 | wantErr bool 1392 | }{ 1393 | { 1394 | name: "delete only one object", 1395 | fields: fields{ 1396 | s3Client: s3Client, 1397 | }, 1398 | args: args{ 1399 | keys: []*string{key1}, 1400 | bucket: aws.String("testbucket"), 1401 | ctx: context.Background(), 1402 | }, 1403 | wantSuccess: []string{*key1}, 1404 | wantFailed: []string{}, 1405 | wantErr: false, 1406 | }, 1407 | { 1408 | name: "delete two objects", 1409 | fields: fields{ 1410 | s3Client: s3Client, 1411 | }, 1412 | args: args{ 1413 | keys: []*string{key2, key3}, 1414 | bucket: aws.String("testbucket"), 1415 | ctx: context.Background(), 1416 | }, 1417 | wantSuccess: []string{*key2, *key3}, 1418 | wantFailed: []string{}, 1419 | wantErr: false, 1420 | }, 1421 | { 1422 | name: "delete valid and invalid objects", 1423 | fields: fields{ 1424 | s3Client: s3Client, 1425 | }, 1426 | args: args{ 1427 | keys: []*string{key4, aws.String("oneoneone")}, 1428 | bucket: aws.String("testbucket"), 1429 | ctx: context.Background(), 1430 | }, 1431 | wantSuccess: []string{*key4, "oneoneone"}, 1432 | wantFailed: []string{}, 1433 | wantErr: false, 1434 | }, 1435 | { 1436 | name: "delete invalid object", 1437 | fields: fields{ 1438 | s3Client: s3Client, 1439 | }, 1440 | args: args{ 1441 | keys: []*string{aws.String("oneoneone")}, 1442 | bucket: aws.String("testbucket"), 1443 | ctx: context.Background(), 1444 | }, 1445 | // as S3 behave, if the key doesnt exist it'll be returned as removed. 1446 | wantSuccess: []string{"oneoneone"}, 1447 | wantFailed: []string{}, 1448 | wantErr: false, 1449 | }, 1450 | { 1451 | name: "delete nil keys", 1452 | fields: fields{ 1453 | s3Client: s3Client, 1454 | }, 1455 | args: args{ 1456 | keys: nil, 1457 | bucket: aws.String("testbucket"), 1458 | ctx: context.Background(), 1459 | }, 1460 | wantSuccess: []string{}, 1461 | wantFailed: []string{}, 1462 | wantErr: true, 1463 | }, 1464 | { 1465 | name: "delete nil bucket", 1466 | fields: fields{ 1467 | s3Client: s3Client, 1468 | }, 1469 | args: args{ 1470 | keys: []*string{aws.String("valid")}, 1471 | bucket: nil, 1472 | ctx: context.Background(), 1473 | }, 1474 | wantSuccess: []string{}, 1475 | wantFailed: []string{}, 1476 | wantErr: true, 1477 | }, 1478 | { 1479 | name: "delete empty bucket", 1480 | fields: fields{ 1481 | s3Client: s3Client, 1482 | }, 1483 | args: args{ 1484 | keys: []*string{aws.String("valid")}, 1485 | bucket: aws.String(""), 1486 | ctx: context.Background(), 1487 | }, 1488 | wantSuccess: []string{}, 1489 | wantFailed: []string{}, 1490 | wantErr: true, 1491 | }, 1492 | { 1493 | name: "delete empty key", 1494 | fields: fields{ 1495 | s3Client: s3Client, 1496 | }, 1497 | args: args{ 1498 | keys: []*string{}, 1499 | bucket: aws.String("testbucket"), 1500 | ctx: context.Background(), 1501 | }, 1502 | wantSuccess: []string{}, 1503 | wantFailed: []string{}, 1504 | wantErr: true, 1505 | }, 1506 | } 1507 | for _, tt := range tests { 1508 | t.Run(tt.name, func(t *testing.T) { 1509 | s := object.NewService(tt.fields.s3Client) 1510 | got, err := s.DeleteObjects(tt.args.ctx, tt.args.bucket, tt.args.keys) 1511 | if (err != nil) != tt.wantErr { 1512 | t.Errorf("Service.DeleteObjects() error = %v, wantErr %v", err, tt.wantErr) 1513 | return 1514 | } 1515 | deletedKeys := make([]string, 0) 1516 | if got != nil && got.Deleted != nil { 1517 | for _, deletedObject := range got.Deleted { 1518 | deletedKeys = append(deletedKeys, *(deletedObject.Key)) 1519 | } 1520 | } 1521 | failedKeys := make([]string, 0) 1522 | if got != nil && got.Errors != nil { 1523 | for _, erroredObject := range got.Errors { 1524 | failedKeys = append(failedKeys, *(erroredObject.Key)) 1525 | } 1526 | } 1527 | if !(reflect.DeepEqual(deletedKeys, tt.wantSuccess) && reflect.DeepEqual(failedKeys, tt.wantFailed)) { 1528 | t.Errorf("Service.DeleteObjects() got unexpected output") 1529 | } 1530 | }) 1531 | } 1532 | } 1533 | 1534 | func TestHandler_DeleteObjects(t *testing.T) { 1535 | uploadservice := object.NewService(s3Client) 1536 | key1, err := uploadservice.UploadFile( 1537 | context.Background(), 1538 | bytes.NewReader([]byte("Hello, World!")), 1539 | aws.String("file1"), 1540 | aws.String("testbucket"), 1541 | aws.String("text/plain"), 1542 | nil, 1543 | ) 1544 | if err != nil { 1545 | t.Errorf("Could not create file with error: %v", err) 1546 | } 1547 | key2, err := uploadservice.UploadFile( 1548 | context.Background(), 1549 | bytes.NewReader([]byte("Hello, World!")), 1550 | aws.String("file2"), 1551 | aws.String("testbucket"), 1552 | aws.String("text/plain"), 1553 | nil, 1554 | ) 1555 | if err != nil { 1556 | t.Errorf("Could not create file with error: %v", err) 1557 | } 1558 | key3, err := uploadservice.UploadFile( 1559 | context.Background(), 1560 | bytes.NewReader([]byte("Hello, World!")), 1561 | aws.String("file3"), 1562 | aws.String("testbucket"), 1563 | aws.String("text/plain"), 1564 | nil, 1565 | ) 1566 | if err != nil { 1567 | t.Errorf("Could not create file with error: %v", err) 1568 | } 1569 | key4, err := uploadservice.UploadFile( 1570 | context.Background(), 1571 | bytes.NewReader([]byte("Hello, World!")), 1572 | aws.String("file4"), 1573 | aws.String("testbucket"), 1574 | aws.String("text/plain"), 1575 | nil, 1576 | ) 1577 | if err != nil { 1578 | t.Errorf("Could not create file with error: %v", err) 1579 | } 1580 | type args struct { 1581 | ctx context.Context 1582 | request *pb.DeleteObjectsRequest 1583 | } 1584 | tests := []struct { 1585 | name string 1586 | args args 1587 | want *pb.DeleteObjectsResponse 1588 | wantErr bool 1589 | }{ 1590 | { 1591 | name: "delete only one object", 1592 | args: args{ 1593 | ctx: context.Background(), 1594 | request: &pb.DeleteObjectsRequest{ 1595 | Bucket: "testbucket", 1596 | Keys: []string{*key1}, 1597 | }, 1598 | }, 1599 | want: &pb.DeleteObjectsResponse{ 1600 | Deleted: []string{*key1}, 1601 | Failed: []string{}, 1602 | }, 1603 | wantErr: false, 1604 | }, 1605 | { 1606 | name: "delete two objects", 1607 | args: args{ 1608 | ctx: context.Background(), 1609 | request: &pb.DeleteObjectsRequest{ 1610 | Bucket: "testbucket", 1611 | Keys: []string{*key2, *key3}, 1612 | }, 1613 | }, 1614 | want: &pb.DeleteObjectsResponse{ 1615 | Deleted: []string{*key2, *key3}, 1616 | Failed: []string{}, 1617 | }, 1618 | wantErr: false, 1619 | }, 1620 | { 1621 | name: "delete valid and invalid objects", 1622 | args: args{ 1623 | ctx: context.Background(), 1624 | request: &pb.DeleteObjectsRequest{ 1625 | Bucket: "testbucket", 1626 | Keys: []string{*key4, "oneoneone"}, 1627 | }, 1628 | }, 1629 | want: &pb.DeleteObjectsResponse{ 1630 | Deleted: []string{*key4, "oneoneone"}, 1631 | Failed: []string{}, 1632 | }, 1633 | wantErr: false, 1634 | }, 1635 | { 1636 | name: "delete invalid object", 1637 | args: args{ 1638 | ctx: context.Background(), 1639 | request: &pb.DeleteObjectsRequest{ 1640 | Bucket: "testbucket", 1641 | Keys: []string{"oneoneone"}, 1642 | }, 1643 | }, 1644 | want: &pb.DeleteObjectsResponse{ 1645 | Deleted: []string{"oneoneone"}, 1646 | Failed: []string{}, 1647 | }, 1648 | wantErr: false, 1649 | }, 1650 | { 1651 | name: "delete empty bucket", 1652 | args: args{ 1653 | ctx: context.Background(), 1654 | request: &pb.DeleteObjectsRequest{ 1655 | Bucket: "", 1656 | Keys: []string{"valid"}, 1657 | }, 1658 | }, 1659 | want: &pb.DeleteObjectsResponse{ 1660 | Deleted: []string{}, 1661 | Failed: []string{}, 1662 | }, 1663 | wantErr: true, 1664 | }, 1665 | { 1666 | name: "delete empty key", 1667 | args: args{ 1668 | ctx: context.Background(), 1669 | request: &pb.DeleteObjectsRequest{ 1670 | Bucket: "testbucket", 1671 | Keys: []string{""}, 1672 | }, 1673 | }, 1674 | want: &pb.DeleteObjectsResponse{ 1675 | Deleted: []string{}, 1676 | Failed: []string{}, 1677 | }, 1678 | wantErr: true, 1679 | }, 1680 | } 1681 | 1682 | // Create connection to server 1683 | ctx := context.Background() 1684 | conn, err := grpc.DialContext(ctx, "bufnet", grpc.WithContextDialer(bufDialer), grpc.WithInsecure()) 1685 | if err != nil { 1686 | t.Fatalf("Failed to dial bufnet: %v", err) 1687 | } 1688 | defer conn.Close() 1689 | 1690 | // Create client 1691 | client := pb.NewUploadClient(conn) 1692 | 1693 | for _, tt := range tests { 1694 | t.Run(tt.name, func(t *testing.T) { 1695 | got, err := client.DeleteObjects(tt.args.ctx, tt.args.request) 1696 | if (err != nil) != tt.wantErr { 1697 | t.Errorf("Handler.DeleteObjects() error = %v, wantErr %v", err, tt.wantErr) 1698 | return 1699 | } 1700 | 1701 | // It's needed, don't know why. 1702 | gotFailed := append([]string{}, got.GetFailed()...) 1703 | wantFailed := append([]string{}, tt.want.GetFailed()...) 1704 | gotDeleted := append([]string{}, got.GetDeleted()...) 1705 | wantDeleted := append([]string{}, tt.want.GetDeleted()...) 1706 | 1707 | if !(reflect.DeepEqual(gotDeleted, wantDeleted) && reflect.DeepEqual(gotFailed, wantFailed)) { 1708 | t.Errorf("Handler.DeleteObjects() = %v, want %v", got, tt.want) 1709 | } 1710 | }) 1711 | } 1712 | } 1713 | 1714 | func TestHandler_CopyObject(t *testing.T) { 1715 | // Upload files for testing 1716 | uploadservice := object.NewService(s3Client) 1717 | _, err := uploadservice.UploadFile( 1718 | context.Background(), 1719 | bytes.NewReader([]byte("Hello, World!")), 1720 | aws.String("file1"), 1721 | aws.String("testbucket"), 1722 | aws.String("text/plain"), 1723 | nil, 1724 | ) 1725 | if err != nil { 1726 | t.Errorf("Could not create file with error: %v", err) 1727 | } 1728 | 1729 | // Init structs 1730 | type fields struct { 1731 | s3Client *s3.S3 1732 | } 1733 | 1734 | type args struct { 1735 | ctx aws.Context 1736 | request *pb.CopyObjectRequest 1737 | } 1738 | 1739 | tests := []struct { 1740 | name string 1741 | fields fields 1742 | args args 1743 | want *pb.CopyObjectResponse 1744 | wantErr bool 1745 | }{ 1746 | { 1747 | name: "copy one object", 1748 | fields: fields{ 1749 | s3Client: s3Client, 1750 | }, 1751 | args: args{ 1752 | ctx: context.Background(), 1753 | request: &pb.CopyObjectRequest{ 1754 | BucketSrc: "testbucket", 1755 | BucketDest: "testbucket1", 1756 | KeySrc: "file1", 1757 | KeyDest: "newfile1", 1758 | }, 1759 | }, 1760 | want: &pb.CopyObjectResponse{Copied: "file1"}, 1761 | wantErr: false, 1762 | }, 1763 | { 1764 | name: "source bucket doesnt exist", 1765 | fields: fields{ 1766 | s3Client: s3Client, 1767 | }, 1768 | args: args{ 1769 | ctx: context.Background(), 1770 | request: &pb.CopyObjectRequest{ 1771 | BucketSrc: "notexistbucket", 1772 | BucketDest: "testbucket1", 1773 | KeySrc: "file1", 1774 | KeyDest: "newfile1", 1775 | }, 1776 | }, 1777 | wantErr: true, 1778 | }, 1779 | { 1780 | name: "source object doesnt exist", 1781 | fields: fields{ 1782 | s3Client: s3Client, 1783 | }, 1784 | args: args{ 1785 | ctx: context.Background(), 1786 | request: &pb.CopyObjectRequest{ 1787 | BucketSrc: "testbucket", 1788 | BucketDest: "testbucket1", 1789 | KeySrc: "notexistobject", 1790 | KeyDest: "newfile1", 1791 | }, 1792 | }, 1793 | wantErr: true, 1794 | }, 1795 | { 1796 | name: "dest bucket is null", 1797 | fields: fields{ 1798 | s3Client: s3Client, 1799 | }, 1800 | args: args{ 1801 | ctx: context.Background(), 1802 | request: &pb.CopyObjectRequest{ 1803 | BucketSrc: "testbucket", 1804 | BucketDest: "", 1805 | KeySrc: "notexistobject", 1806 | KeyDest: "newfile1", 1807 | }, 1808 | }, 1809 | wantErr: true, 1810 | }, 1811 | { 1812 | name: "source bucket is null", 1813 | fields: fields{ 1814 | s3Client: s3Client, 1815 | }, 1816 | args: args{ 1817 | ctx: context.Background(), 1818 | request: &pb.CopyObjectRequest{ 1819 | BucketSrc: "", 1820 | BucketDest: "testbucket1", 1821 | KeySrc: "notexistobject", 1822 | KeyDest: "newfile1", 1823 | }, 1824 | }, 1825 | wantErr: true, 1826 | }, 1827 | { 1828 | name: "source key is null", 1829 | fields: fields{ 1830 | s3Client: s3Client, 1831 | }, 1832 | args: args{ 1833 | ctx: context.Background(), 1834 | request: &pb.CopyObjectRequest{ 1835 | BucketSrc: "testbucket", 1836 | BucketDest: "testbucket1", 1837 | KeySrc: "", 1838 | KeyDest: "newfile1", 1839 | }, 1840 | }, 1841 | wantErr: true, 1842 | }, 1843 | { 1844 | name: "dest key is null", 1845 | fields: fields{ 1846 | s3Client: s3Client, 1847 | }, 1848 | args: args{ 1849 | ctx: context.Background(), 1850 | request: &pb.CopyObjectRequest{ 1851 | BucketSrc: "testbucket", 1852 | BucketDest: "testbucket1", 1853 | KeySrc: "notexistobject", 1854 | KeyDest: "", 1855 | }, 1856 | }, 1857 | wantErr: true, 1858 | }, 1859 | } 1860 | 1861 | // Create connection to server 1862 | ctx := context.Background() 1863 | conn, err := grpc.DialContext(ctx, "bufnet", grpc.WithContextDialer(bufDialer), grpc.WithInsecure()) 1864 | if err != nil { 1865 | t.Fatalf("Failed to dial bufnet: %v", err) 1866 | } 1867 | defer conn.Close() 1868 | 1869 | // Create client 1870 | client := pb.NewUploadClient(conn) 1871 | 1872 | // Iterate over test cases 1873 | for _, tt := range tests { 1874 | t.Run(tt.name, func(t *testing.T) { 1875 | got, err := client.CopyObject(tt.args.ctx, tt.args.request) 1876 | if (err != nil) != tt.wantErr { 1877 | t.Errorf("Service.CopyObject() error = %v, wantErr %v", err, tt.wantErr) 1878 | return 1879 | } 1880 | 1881 | // TODO: check if deepequal or cmp.equal 1882 | // reflect.DeepEqual is often incorrectly used to compare two like structs 1883 | // cmp.Equal is a better tool for comparing structs. 1884 | // https://golang.org/pkg/reflect/#DeepEqual 1885 | if got != nil && !cmp.Equal(tt.want, got, cmpopts.IgnoreUnexported(pb.CopyObjectResponse{})) { 1886 | t.Errorf("UploadHandler.CopyObject() = %v, want %v", got, tt.want) 1887 | } 1888 | }) 1889 | } 1890 | } 1891 | 1892 | func TestService_CopyObject(t *testing.T) { 1893 | // Upload files for testing 1894 | // file size less than 5GB 1895 | uploadservice := object.NewService(s3Client) 1896 | _, err := uploadservice.UploadFile( 1897 | context.Background(), 1898 | bytes.NewReader([]byte("Hello, World!")), 1899 | aws.String("file1"), 1900 | aws.String("testbucket"), 1901 | aws.String("text/plain"), 1902 | nil, 1903 | ) 1904 | if err != nil { 1905 | t.Errorf("Could not create file with error: %v", err) 1906 | } 1907 | 1908 | // file larger than 5GB 1909 | hugefile := make([]byte, 5<<25) 1910 | if _, err := rand.Read(hugefile); err != nil { 1911 | t.Errorf("Could not generate file with error: %v", err) 1912 | } 1913 | 1914 | _, err = uploadservice.UploadFile( 1915 | context.Background(), 1916 | bytes.NewReader(hugefile), 1917 | aws.String("file2"), 1918 | aws.String("testbucket"), 1919 | aws.String("text/plain"), 1920 | nil, 1921 | ) 1922 | 1923 | if err != nil { 1924 | t.Errorf("Could not create file with error: %v", err) 1925 | } 1926 | 1927 | // Init structs 1928 | type fields struct { 1929 | s3Client *s3.S3 1930 | } 1931 | 1932 | type args struct { 1933 | ctx context.Context // TODO: check if context.context or aws.context 1934 | bucketSrc *string 1935 | bucketDest *string 1936 | keySrc *string 1937 | keyDest *string 1938 | } 1939 | 1940 | tests := []struct { 1941 | name string 1942 | fields fields 1943 | args args 1944 | want *string 1945 | wantErr bool 1946 | }{ 1947 | { 1948 | name: "copy small object", 1949 | fields: fields{ 1950 | s3Client: s3Client, 1951 | }, 1952 | args: args{ 1953 | ctx: context.Background(), 1954 | bucketSrc: aws.String("testbucket"), 1955 | bucketDest: aws.String("testbucket1"), 1956 | keySrc: aws.String("file1"), 1957 | keyDest: aws.String("newfile1"), 1958 | }, 1959 | want: aws.String("file1"), 1960 | wantErr: false, 1961 | }, 1962 | { 1963 | name: "copy large object", 1964 | fields: fields{ 1965 | s3Client: s3Client, 1966 | }, 1967 | args: args{ 1968 | ctx: context.Background(), 1969 | bucketSrc: aws.String("testbucket"), 1970 | bucketDest: aws.String("testbucket1"), 1971 | keySrc: aws.String("file2"), 1972 | keyDest: aws.String("newfile2"), 1973 | }, 1974 | wantErr: true, 1975 | }, 1976 | { 1977 | name: "source bucket doesnt exist", 1978 | fields: fields{ 1979 | s3Client: s3Client, 1980 | }, 1981 | args: args{ 1982 | ctx: context.Background(), 1983 | bucketSrc: aws.String("notexistbucket"), 1984 | bucketDest: aws.String("testbucket1"), 1985 | keySrc: aws.String("file1"), 1986 | keyDest: aws.String("newfile1"), 1987 | }, 1988 | wantErr: true, 1989 | }, 1990 | { 1991 | name: "source object doesnt exist", 1992 | fields: fields{ 1993 | s3Client: s3Client, 1994 | }, 1995 | args: args{ 1996 | ctx: context.Background(), 1997 | bucketSrc: aws.String("testbucket"), 1998 | bucketDest: aws.String("testbucket1"), 1999 | keySrc: aws.String("notexistobject"), 2000 | keyDest: aws.String("newfile1"), 2001 | }, 2002 | wantErr: true, 2003 | }, 2004 | { 2005 | name: "dest bucket is null", 2006 | fields: fields{ 2007 | s3Client: s3Client, 2008 | }, 2009 | args: args{ 2010 | ctx: context.Background(), 2011 | bucketSrc: aws.String("testbucket"), 2012 | bucketDest: aws.String(""), 2013 | keySrc: aws.String("notexistobject"), 2014 | keyDest: aws.String("newfile1"), 2015 | }, 2016 | wantErr: true, 2017 | }, 2018 | { 2019 | name: "source bucket is null", 2020 | fields: fields{ 2021 | s3Client: s3Client, 2022 | }, 2023 | args: args{ 2024 | ctx: context.Background(), 2025 | bucketSrc: aws.String(""), 2026 | bucketDest: aws.String("testbucket1"), 2027 | keySrc: aws.String("notexistobject"), 2028 | keyDest: aws.String("newfile1"), 2029 | }, 2030 | wantErr: true, 2031 | }, 2032 | { 2033 | name: "source key is null", 2034 | fields: fields{ 2035 | s3Client: s3Client, 2036 | }, 2037 | args: args{ 2038 | ctx: context.Background(), 2039 | bucketSrc: aws.String("testbucket"), 2040 | bucketDest: aws.String("testbucket1"), 2041 | keySrc: aws.String(""), 2042 | keyDest: aws.String("newfile1"), 2043 | }, 2044 | wantErr: true, 2045 | }, 2046 | { 2047 | name: "dest key is null", 2048 | fields: fields{ 2049 | s3Client: s3Client, 2050 | }, 2051 | args: args{ 2052 | ctx: context.Background(), 2053 | bucketSrc: aws.String("testbucket"), 2054 | bucketDest: aws.String("testbucket1"), 2055 | keySrc: aws.String("notexistobject"), 2056 | keyDest: aws.String(""), 2057 | }, 2058 | wantErr: true, 2059 | }, 2060 | } 2061 | 2062 | // Iterate over test cases 2063 | for _, tt := range tests { 2064 | t.Run(tt.name, func(t *testing.T) { 2065 | s := object.NewService(s3Client) 2066 | 2067 | got, err := s.CopyObject( 2068 | tt.args.ctx, 2069 | tt.args.bucketSrc, 2070 | tt.args.bucketDest, 2071 | tt.args.keySrc, 2072 | tt.args.keyDest, 2073 | ) 2074 | 2075 | if (err != nil) != tt.wantErr { 2076 | t.Errorf("Service.CopyObject() error = %v, wantErr %v", err, tt.wantErr) 2077 | return 2078 | } 2079 | 2080 | // reflect.DeepEqual is often incorrectly used to compare two like structs 2081 | // cmp.Equal is a better tool for comparing structs. 2082 | // https://golang.org/pkg/reflect/#DeepEqual 2083 | if got != nil && !cmp.Equal(tt.want, got, cmpopts.IgnoreUnexported(pb.CopyObjectResponse{})) { 2084 | t.Errorf("UploadHandler.CopyObject() = %v, want %v", got, tt.want) 2085 | } 2086 | }) 2087 | } 2088 | } 2089 | 2090 | func TestHandler_MoveObject(t *testing.T) { 2091 | // Upload files for testing 2092 | uploadservice := object.NewService(s3Client) 2093 | 2094 | _, err := uploadservice.UploadFile( 2095 | context.Background(), 2096 | bytes.NewReader([]byte("Hello, World!")), 2097 | aws.String("file1"), 2098 | aws.String("testbucket"), 2099 | aws.String("text/plain"), 2100 | nil, 2101 | ) 2102 | if err != nil { 2103 | t.Errorf("Could not create file with error: %v", err) 2104 | } 2105 | 2106 | // Init structs 2107 | type fields struct { 2108 | s3Client *s3.S3 2109 | } 2110 | 2111 | type args struct { 2112 | ctx aws.Context 2113 | request *pb.MoveObjectRequest 2114 | } 2115 | 2116 | tests := []struct { 2117 | name string 2118 | fields fields 2119 | args args 2120 | want *pb.MoveObjectResponse 2121 | wantErr bool 2122 | }{ 2123 | { 2124 | name: "move one object", 2125 | fields: fields{ 2126 | s3Client: s3Client, 2127 | }, 2128 | args: args{ 2129 | ctx: context.Background(), 2130 | request: &pb.MoveObjectRequest{ 2131 | BucketSrc: "testbucket", 2132 | BucketDest: "testbucket1", 2133 | KeySrc: "file1", 2134 | KeyDest: "newfile1", 2135 | }, 2136 | }, 2137 | want: &pb.MoveObjectResponse{Moved: "file1"}, 2138 | wantErr: false, 2139 | }, 2140 | { 2141 | name: "source bucket doesnt exist", 2142 | fields: fields{ 2143 | s3Client: s3Client, 2144 | }, 2145 | args: args{ 2146 | ctx: context.Background(), 2147 | request: &pb.MoveObjectRequest{ 2148 | BucketSrc: "notexistbucket", 2149 | BucketDest: "testbucket1", 2150 | KeySrc: "file1", 2151 | KeyDest: "newfile1", 2152 | }, 2153 | }, 2154 | wantErr: true, 2155 | }, 2156 | { 2157 | name: "source object doesnt exist", 2158 | fields: fields{ 2159 | s3Client: s3Client, 2160 | }, 2161 | args: args{ 2162 | ctx: context.Background(), 2163 | request: &pb.MoveObjectRequest{ 2164 | BucketSrc: "testbucket", 2165 | BucketDest: "testbucket1", 2166 | KeySrc: "notexistobject", 2167 | KeyDest: "newfile1", 2168 | }, 2169 | }, 2170 | wantErr: true, 2171 | }, 2172 | { 2173 | name: "dest bucket is null", 2174 | fields: fields{ 2175 | s3Client: s3Client, 2176 | }, 2177 | args: args{ 2178 | ctx: context.Background(), 2179 | request: &pb.MoveObjectRequest{ 2180 | BucketSrc: "testbucket", 2181 | BucketDest: "", 2182 | KeySrc: "notexistobject", 2183 | KeyDest: "newfile1", 2184 | }, 2185 | }, 2186 | wantErr: true, 2187 | }, 2188 | { 2189 | name: "source bucket is null", 2190 | fields: fields{ 2191 | s3Client: s3Client, 2192 | }, 2193 | args: args{ 2194 | ctx: context.Background(), 2195 | request: &pb.MoveObjectRequest{ 2196 | BucketSrc: "", 2197 | BucketDest: "testbucket1", 2198 | KeySrc: "notexistobject", 2199 | KeyDest: "newfile1", 2200 | }, 2201 | }, 2202 | wantErr: true, 2203 | }, 2204 | { 2205 | name: "source key is null", 2206 | fields: fields{ 2207 | s3Client: s3Client, 2208 | }, 2209 | args: args{ 2210 | ctx: context.Background(), 2211 | request: &pb.MoveObjectRequest{ 2212 | BucketSrc: "testbucket", 2213 | BucketDest: "testbucket1", 2214 | KeySrc: "", 2215 | KeyDest: "newfile1", 2216 | }, 2217 | }, 2218 | wantErr: true, 2219 | }, 2220 | { 2221 | name: "dest key is null", 2222 | fields: fields{ 2223 | s3Client: s3Client, 2224 | }, 2225 | args: args{ 2226 | ctx: context.Background(), 2227 | request: &pb.MoveObjectRequest{ 2228 | BucketSrc: "testbucket", 2229 | BucketDest: "testbucket1", 2230 | KeySrc: "notexistobject", 2231 | KeyDest: "", 2232 | }, 2233 | }, 2234 | wantErr: true, 2235 | }, 2236 | } 2237 | 2238 | // Create connection to server 2239 | ctx := context.Background() 2240 | conn, err := grpc.DialContext(ctx, "bufnet", grpc.WithContextDialer(bufDialer), grpc.WithInsecure()) 2241 | if err != nil { 2242 | t.Fatalf("Failed to dial bufnet: %v", err) 2243 | } 2244 | defer conn.Close() 2245 | 2246 | // Create client 2247 | client := pb.NewUploadClient(conn) 2248 | 2249 | // Iterate over test cases 2250 | for _, tt := range tests { 2251 | t.Run(tt.name, func(t *testing.T) { 2252 | got, err := client.MoveObject(tt.args.ctx, tt.args.request) 2253 | if (err != nil) != tt.wantErr { 2254 | t.Errorf("Service.MoveObject() error = %v, wantErr %v", err, tt.wantErr) 2255 | return 2256 | } 2257 | 2258 | // TODO: check if deepequal or cmp.equal 2259 | // reflect.DeepEqual is often incorrectly used to compare two like structs 2260 | // cmp.Equal is a better tool for comparing structs. 2261 | // https://golang.org/pkg/reflect/#DeepEqual 2262 | if got != nil && !cmp.Equal(tt.want, got, cmpopts.IgnoreUnexported(pb.MoveObjectResponse{})) { 2263 | t.Errorf("UploadHandler.MoveObject() = %v, want %v", got, tt.want) 2264 | } 2265 | }) 2266 | } 2267 | } 2268 | 2269 | // TODO: TestHandler_UploadAbort 2270 | // TODO: TestHandler_UploadComplete 2271 | // TODO: TestHandler_UploadPart 2272 | --------------------------------------------------------------------------------