├── examples ├── python │ ├── requirements.txt │ ├── README.md │ └── nats_s3_example.py ├── credentials.json ├── docker-compose.yml └── README.md ├── scripts ├── benchmark │ ├── credentials.json │ ├── benchmark.sh │ └── README.md ├── ci-conformance.sh └── ci-conformance-full.sh ├── .dockerignore ├── credentials.example.json ├── .github ├── ISSUE_TEMPLATE │ ├── feature_request.md │ └── bug_report.md ├── pull_request_template.md ├── workflows │ ├── release.yml │ ├── coverage.yml │ ├── release-image.yml │ └── conformance.yml └── dependabot.yml ├── internal ├── s3api │ ├── monitoring_handlers.go │ ├── s3_options_handlers.go │ ├── monitoring_handlers_test.go │ ├── s3_object_cancellation_test.go │ ├── s3_bucket_handlers.go │ ├── s3_bucket_handlers_test.go │ ├── s3_object_tag_handlers.go │ ├── s3_multipart_handlers.go │ └── s3_multipart_handlers_test.go ├── interceptor │ ├── cancellation.go │ └── request_validation.go ├── metrics │ ├── metrics_test.go │ └── metrics.go ├── client │ ├── nats_client_test.go │ ├── nats_client.go │ ├── nats_object_client_metrics.go │ └── nats_object_client_test.go ├── credential │ ├── credential_test.go │ ├── credential.go │ ├── static_file_store.go │ └── static_file_store_test.go ├── testutil │ └── nats_server.go ├── logging │ └── logger.go ├── util │ ├── validation.go │ └── validation_test.go ├── server │ ├── opts.go │ └── gateway_server.go ├── streams │ └── sigv4_streams.go ├── model │ └── s3_responses.go └── auth │ └── s3_auth.go ├── .gitignore ├── CODE_OF_CONDUCT.md ├── Dockerfile ├── .goreleaser.yaml ├── Makefile ├── go.mod ├── CONTRIBUTING.md ├── ROADMAP.md ├── cmd └── nats-s3 │ └── main.go ├── CONFORMANCE.md ├── go.sum ├── LICENSE └── README.md /examples/python/requirements.txt: -------------------------------------------------------------------------------- 1 | boto3>=1.28.0 2 | -------------------------------------------------------------------------------- /scripts/benchmark/credentials.json: -------------------------------------------------------------------------------- 1 | { 2 | "credentials": [ 3 | { 4 | "accessKey": "testtest", 5 | "secretKey": "testtest" 6 | } 7 | ] 8 | } -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | # Build output 2 | bin/ 3 | 4 | # VCS 5 | .git 6 | .gitignore 7 | 8 | # IDE 9 | .vscode/ 10 | .idea/ 11 | 12 | # CI coverage 13 | coverage.out 14 | coverage.html 15 | 16 | # Modules cache (not needed in image) 17 | **/node_modules 18 | 19 | -------------------------------------------------------------------------------- /examples/credentials.json: -------------------------------------------------------------------------------- 1 | { 2 | "credentials": [ 3 | { 4 | "accessKey": "demo-access-key", 5 | "secretKey": "demo-secret-key" 6 | }, 7 | { 8 | "accessKey": "admin", 9 | "secretKey": "admin-password-123" 10 | } 11 | ] 12 | } 13 | -------------------------------------------------------------------------------- /credentials.example.json: -------------------------------------------------------------------------------- 1 | { 2 | "credentials": [ 3 | { 4 | "accessKey": "AKIAIOSFODNN7EXAMPLE", 5 | "secretKey": "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" 6 | }, 7 | { 8 | "accessKey": "AKIAI44QH8DHBEXAMPLE", 9 | "secretKey": "je7MtGbClwBF/2Zp9Utk/h3yCo8nvbEXAMPLEKEY" 10 | } 11 | ] 12 | } 13 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | labels: enhancement 5 | --- 6 | 7 | ### Problem 8 | What problem would this feature solve? Who benefits? 9 | 10 | ### Proposal 11 | Describe the solution you'd like. 12 | 13 | ### Alternatives 14 | What alternatives have you considered? 15 | 16 | ### Additional context 17 | 18 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | ## Overview 2 | Briefly describe the change and why it’s needed. 3 | 4 | ## Changes 5 | - 6 | 7 | ## Testing 8 | How did you test this change? Include commands/output when relevant. 9 | 10 | ## Checklist 11 | - [ ] Code formatted (`gofmt -s`) 12 | - [ ] Tests added/updated and passing (`go test ./...`) 13 | - [ ] Docs updated (README/ROADMAP/CONTRIBUTING) 14 | 15 | -------------------------------------------------------------------------------- /internal/s3api/monitoring_handlers.go: -------------------------------------------------------------------------------- 1 | package s3api 2 | 3 | import ( 4 | "github.com/wpnpeiris/nats-s3/internal/model" 5 | "net/http" 6 | ) 7 | 8 | // Healthz returns 200 OK when the gateway has an active connection to NATS. 9 | // Returns 503 Service Unavailable when disconnected. 10 | func (s *S3Gateway) Healthz(w http.ResponseWriter, r *http.Request) { 11 | if s.client.IsConnected() { 12 | model.WriteEmptyResponse(w, r, http.StatusOK) 13 | return 14 | } 15 | model.WriteEmptyResponse(w, r, http.StatusServiceUnavailable) 16 | } 17 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | labels: bug 5 | --- 6 | 7 | ### Describe the bug 8 | A clear and concise description of what the bug is. 9 | 10 | ### To Reproduce 11 | Steps to reproduce the behavior: 12 | 1. 13 | 2. 14 | 15 | ### Expected behavior 16 | 17 | ### Logs / Output 18 | ``` 19 | (paste relevant logs) 20 | ``` 21 | 22 | ### Environment 23 | - nats-s3 version: 24 | - Go version: 25 | - OS/Arch: 26 | - NATS version: 27 | 28 | ### Additional context 29 | 30 | -------------------------------------------------------------------------------- /internal/s3api/s3_options_handlers.go: -------------------------------------------------------------------------------- 1 | package s3api 2 | 3 | import ( 4 | "github.com/wpnpeiris/nats-s3/internal/model" 5 | "net/http" 6 | ) 7 | 8 | // SetOptionHeaders responds to CORS preflight and generic OPTIONS requests with 9 | // permissive headers to simplify development and testing. 10 | func (s *S3Gateway) SetOptionHeaders(w http.ResponseWriter, r *http.Request) { 11 | w.Header().Set("Access-Control-Allow-Origin", "*") 12 | w.Header().Set("Access-Control-Expose-Headers", "*") 13 | w.Header().Set("Access-Control-Allow-Methods", "*") 14 | w.Header().Set("Access-Control-Allow-Headers", "*") 15 | 16 | model.WriteEmptyResponse(w, r, http.StatusOK) 17 | } 18 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release 2 | 3 | on: 4 | push: 5 | tags: 6 | - 'v*.*.*' 7 | - 'v*.*' 8 | 9 | permissions: 10 | contents: write 11 | 12 | jobs: 13 | goreleaser: 14 | runs-on: ubuntu-latest 15 | steps: 16 | - name: Checkout 17 | uses: actions/checkout@v6 18 | 19 | - name: Setup Go 20 | uses: actions/setup-go@v6 21 | with: 22 | go-version: stable 23 | 24 | - name: GoReleaser 25 | uses: goreleaser/goreleaser-action@v6 26 | with: 27 | version: latest 28 | args: release --clean 29 | env: 30 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 31 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # If you prefer the allow list template instead of the deny list, see community template: 2 | # https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore 3 | # 4 | # Binaries for programs and plugins 5 | *.exe 6 | *.exe~ 7 | *.dll 8 | *.so 9 | *.dylib 10 | 11 | # Test binary, built with `go test -c` 12 | *.test 13 | 14 | # Output of the go coverage tool, specifically when used with LiteIDE 15 | coverage.* 16 | 17 | 18 | # Dependency directories (remove the comment below to include it) 19 | # vendor/ 20 | 21 | # Go workspace file 22 | go.work 23 | 24 | .idea 25 | 26 | .vscode 27 | data 28 | 29 | .DS_Store 30 | 31 | # Go build target folder 32 | bin/ 33 | dist/ 34 | -------------------------------------------------------------------------------- /internal/interceptor/cancellation.go: -------------------------------------------------------------------------------- 1 | package interceptor 2 | 3 | import ( 4 | "net/http" 5 | ) 6 | 7 | // RequestCancellation short-circuits requests whose contexts are already 8 | // canceled (e.g., client disconnected) before invoking the handler. This avoids 9 | // kicking off backend work for requests that cannot complete. 10 | type RequestCancellation struct{} 11 | 12 | func (m *RequestCancellation) CancelIfDone(next http.Handler) http.Handler { 13 | return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 14 | select { 15 | case <-r.Context().Done(): 16 | // Client canceled or server timed out: stop early. 17 | return 18 | default: 19 | } 20 | next.ServeHTTP(w, r) 21 | }) 22 | } 23 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Code of Conduct 2 | 3 | We are committed to a friendly, respectful, and inclusive community. 4 | 5 | - Be respectful and considerate. 6 | - Use welcoming and inclusive language. 7 | - No harassment, discrimination, or personal attacks. 8 | - Assume good intent; disagree constructively. 9 | - Keep discussions focused on the project. 10 | 11 | Scope: This applies to all project spaces and interactions (issues, PRs, reviews, chats, and events). 12 | 13 | Reporting: If you experience or witness unacceptable behavior, open an issue or privately contact a maintainer. 14 | 15 | Enforcement: Maintainers may edit, remove, or reject content and contributions that violate this policy and may take further action as appropriate. 16 | 17 | Thank you for helping make this project welcoming to everyone. 18 | -------------------------------------------------------------------------------- /internal/s3api/monitoring_handlers_test.go: -------------------------------------------------------------------------------- 1 | package s3api 2 | 3 | import ( 4 | "net/http/httptest" 5 | "testing" 6 | 7 | "github.com/wpnpeiris/nats-s3/internal/logging" 8 | "github.com/wpnpeiris/nats-s3/internal/testutil" 9 | 10 | "github.com/gorilla/mux" 11 | ) 12 | 13 | func TestHealthz_OK(t *testing.T) { 14 | s := testutil.StartJSServer(t) 15 | defer s.Shutdown() 16 | 17 | logger := logging.NewLogger(logging.Config{Level: "debug"}) 18 | gw, err := NewS3Gateway(logger, s.ClientURL(), 1, nil, nil) 19 | if err != nil { 20 | t.Fatalf("failed to create S3 gateway: %v", err) 21 | } 22 | r := mux.NewRouter() 23 | gw.RegisterRoutes(r) 24 | 25 | req := httptest.NewRequest("GET", "/healthz", nil) 26 | rr := httptest.NewRecorder() 27 | r.ServeHTTP(rr, req) 28 | 29 | if rr.Code != 200 { 30 | t.Fatalf("expected 200 from /healthz, got %d", rr.Code) 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | ARG GO_VERSION=1.24 2 | 3 | FROM golang:${GO_VERSION}-alpine AS build 4 | WORKDIR /src 5 | 6 | # Speed up module download with BuildKit caches when available 7 | COPY go.mod go.sum ./ 8 | RUN --mount=type=cache,target=/go/pkg/mod \ 9 | --mount=type=cache,target=/root/.cache/go-build \ 10 | go mod download 11 | 12 | # Copy source and build directly with go build 13 | COPY . . 14 | ENV CGO_ENABLED=0 15 | RUN --mount=type=cache,target=/go/pkg/mod \ 16 | --mount=type=cache,target=/root/.cache/go-build \ 17 | go build -o /out/nats-s3 ./cmd/nats-s3 18 | 19 | FROM alpine:3.23 20 | # Create non-root user to run the service 21 | RUN adduser -D -H -s /sbin/nologin app 22 | USER app 23 | WORKDIR /home/app 24 | 25 | COPY --from=build /out/nats-s3 /usr/local/bin/nats-s3 26 | 27 | EXPOSE 5222 28 | ENTRYPOINT ["nats-s3"] 29 | CMD ["--listen","0.0.0.0:5222","--natsServers","nats://127.0.0.1:4222"] 30 | -------------------------------------------------------------------------------- /.goreleaser.yaml: -------------------------------------------------------------------------------- 1 | project_name: nats-s3 2 | 3 | builds: 4 | - id: nats-s3 5 | main: ./cmd/nats-s3 6 | binary: nats-s3 7 | env: 8 | - CGO_ENABLED=0 9 | ldflags: 10 | - -s -w -X main.Version={{.Version}} 11 | goos: [linux, darwin, windows] 12 | goarch: [amd64, arm64] 13 | mod_timestamp: '{{ .CommitTimestamp }}' 14 | 15 | archives: 16 | - id: nats-s3-archives 17 | builds: [nats-s3] 18 | format_overrides: 19 | - goos: windows 20 | format: zip 21 | files: 22 | - LICENSE 23 | - README.md 24 | 25 | checksum: 26 | name_template: "SHA256SUMS" 27 | algorithm: sha256 28 | 29 | release: 30 | github: 31 | owner: '{{ envOrDefault "GITHUB_REPOSITORY_OWNER" "nats-s3" }}' 32 | name: '{{ envOrDefault "GITHUB_REPOSITORY_NAME" "nats-s3" }}' 33 | name_template: "Release {{.Tag}}" 34 | draft: true 35 | 36 | changelog: 37 | disable: true 38 | -------------------------------------------------------------------------------- /.github/workflows/coverage.yml: -------------------------------------------------------------------------------- 1 | name: NATS-S3 Code Coverage 2 | 3 | on: 4 | workflow_dispatch: {} 5 | 6 | jobs: 7 | coverage: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: actions/checkout@v6 11 | 12 | - name: Setup Go 13 | uses: actions/setup-go@v6 14 | with: 15 | go-version: stable 16 | 17 | - name: Go fmt check 18 | run: | 19 | files=$(gofmt -s -l .) 20 | if [ -n "$files" ]; then 21 | echo "Unformatted files:" && echo "$files" && exit 1 22 | fi 23 | 24 | - name: Go vet 25 | run: go vet ./... 26 | 27 | - name: Test with coverage 28 | run: go test -race -covermode=atomic -coverprofile=coverage.out ./... 29 | 30 | - name: Coverage summary 31 | run: go tool cover -func=coverage.out | tail -n 1 32 | 33 | - name: Upload coverage artifact 34 | uses: actions/upload-artifact@v6 35 | with: 36 | name: coverage 37 | path: coverage.out 38 | -------------------------------------------------------------------------------- /internal/metrics/metrics_test.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | import ( 4 | "context" 5 | "github.com/gorilla/mux" 6 | "io" 7 | "net/http" 8 | "net/http/httptest" 9 | "strings" 10 | "testing" 11 | "time" 12 | ) 13 | 14 | func TestMetricsEndpoint(t *testing.T) { 15 | t.Parallel() 16 | router := mux.NewRouter() 17 | RegisterMetricEndpoint(router) 18 | ts := httptest.NewServer(router) 19 | defer ts.Close() 20 | 21 | ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) 22 | defer cancel() 23 | req, err := http.NewRequestWithContext(ctx, http.MethodGet, ts.URL+path, nil) 24 | if err != nil { 25 | t.Fatal(err) 26 | } 27 | 28 | res, err := http.DefaultClient.Do(req) 29 | if err != nil { 30 | t.Fatal(err) 31 | } 32 | 33 | defer res.Body.Close() 34 | 35 | if res.StatusCode != http.StatusOK { 36 | t.Errorf("Expected to be response 200 OK") 37 | } 38 | 39 | if !strings.Contains(res.Header.Get("Content-type"), "text/plain") { 40 | t.Errorf("Expected to be response content type 'text/plain'") 41 | } 42 | 43 | data, err := io.ReadAll(res.Body) 44 | if err != nil { 45 | t.Errorf("Expected to be no errors when reading the body %v", err) 46 | } 47 | 48 | if data == nil { 49 | t.Errorf("Expected to be nonempty response") 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | BINARY_NAME := nats-s3 2 | BUILD_DIR := bin 3 | VERSION ?= $(shell git describe --always --tags 2>/dev/null || echo dev) 4 | COVERPROFILE := coverage.out 5 | 6 | .PHONY: all build fmt fmt-fix format vet test coverage coverage-report coverage-html run clean clean-coverage 7 | 8 | all: build test 9 | 10 | $(BUILD_DIR): 11 | @mkdir -p $(BUILD_DIR) 12 | 13 | build: $(BUILD_DIR) 14 | go build -ldflags="-X main.Version=$(VERSION)" -o $(BUILD_DIR)/$(BINARY_NAME) ./cmd/nats-s3 15 | 16 | coverage: 17 | go test -race -covermode=atomic -coverprofile=$(COVERPROFILE) ./... 18 | 19 | coverage-report: coverage 20 | @go tool cover -func=$(COVERPROFILE) | tail -n 1 21 | 22 | coverage-html: coverage 23 | @go tool cover -html=$(COVERPROFILE) -o coverage.html 24 | 25 | fmt: 26 | @files=$$(gofmt -s -l .); if [ -n "$$files" ]; then \ 27 | echo "The following files are not gofmt-ed:"; echo "$$files"; exit 1; \ 28 | else echo "gofmt check passed"; fi 29 | 30 | format fmt-fix: 31 | gofmt -s -w . 32 | 33 | vet: 34 | go vet ./... 35 | 36 | test: build fmt vet 37 | go test -race -count=1 ./... 38 | 39 | run: build 40 | $(BUILD_DIR)/$(BINARY_NAME) --listen 0.0.0.0:5222 --natsServers nats://127.0.0.1:4222 41 | 42 | clean: 43 | rm -rf $(BUILD_DIR) 44 | 45 | clean-coverage: 46 | rm -f $(COVERPROFILE) coverage.html 47 | -------------------------------------------------------------------------------- /internal/client/nats_client_test.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | "fmt" 5 | "github.com/nats-io/nats-server/v2/server" 6 | "testing" 7 | 8 | nservertest "github.com/nats-io/nats-server/v2/test" 9 | "github.com/nats-io/nats.go" 10 | ) 11 | 12 | // TestClientSetupConnectionToNATS verifies that a Client can connect to a 13 | // locally started NATS Server, and that connection options (Name) are applied. 14 | func TestClientSetupConnectionToNATS(t *testing.T) { 15 | opts := nservertest.DefaultTestOptions 16 | opts.Port = server.RANDOM_PORT 17 | s := nservertest.RunServer(&opts) 18 | defer s.Shutdown() 19 | 20 | c := NewClient("unit-test") 21 | if c.ID() == "" { 22 | t.Fatalf("expected non-empty client ID") 23 | } 24 | 25 | if err := c.SetupConnectionToNATS(fmt.Sprintf("nats://127.0.0.1:%d", opts.Port)); err != nil { 26 | t.Fatalf("SetupConnectionToNATS failed: %v", err) 27 | } 28 | 29 | nc := c.NATS() 30 | if nc == nil { 31 | t.Fatalf("expected non-nil NATS connection") 32 | } 33 | if !nc.IsConnected() { 34 | t.Fatalf("expected NATS connection to be connected") 35 | } 36 | if got, want := nc.Opts.Name, c.Name(); got != want { 37 | t.Errorf("unexpected connection name: got %q, want %q", got, want) 38 | } 39 | 40 | // Avoid test panics from the client's closed handler by overriding it 41 | // before closing the connection in this test. 42 | nc.SetClosedHandler(func(_ *nats.Conn) {}) 43 | nc.Close() 44 | } 45 | -------------------------------------------------------------------------------- /internal/metrics/metrics.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | import ( 4 | "github.com/gorilla/mux" 5 | "github.com/prometheus/client_golang/prometheus" 6 | "github.com/prometheus/client_golang/prometheus/collectors" 7 | "github.com/prometheus/client_golang/prometheus/promhttp" 8 | ) 9 | 10 | var ( 11 | // path is the HTTP route where Prometheus metrics are exposed. 12 | path = "/metrics" 13 | // registry is the private Prometheus registry used by the gateway server. 14 | registry = prometheus.NewRegistry() 15 | // runtimeMetrics collects Go runtime stats (GC, goroutines, mem, etc.). 16 | runtimeMetrics = collectors.NewGoCollector() 17 | // processMetrics collects process-level stats (CPU, RSS, fds, etc.). 18 | processMetrics = collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}) 19 | ) 20 | 21 | func init() { 22 | registry.MustRegister(runtimeMetrics) 23 | registry.MustRegister(processMetrics) 24 | } 25 | 26 | // RegisterMetricEndpoint mounts the Prometheus handler for private registry at the standard metrics path 27 | // on the provided router. 28 | func RegisterMetricEndpoint(router *mux.Router) { 29 | r := router.PathPrefix("/").Subrouter() 30 | r.Handle(path, promhttp.HandlerFor( 31 | registry, 32 | promhttp.HandlerOpts{ 33 | EnableOpenMetrics: true, 34 | }, 35 | )) 36 | } 37 | 38 | // RegisterPrometheusCollector registers a custom collector with registry. 39 | func RegisterPrometheusCollector(c prometheus.Collector) error { 40 | return registry.Register(c) 41 | } 42 | -------------------------------------------------------------------------------- /internal/interceptor/request_validation.go: -------------------------------------------------------------------------------- 1 | package interceptor 2 | 3 | import ( 4 | "github.com/gorilla/mux" 5 | "github.com/wpnpeiris/nats-s3/internal/model" 6 | "github.com/wpnpeiris/nats-s3/internal/util" 7 | "log" 8 | "net/http" 9 | ) 10 | 11 | // RequestValidator validates bucket names and object keys from route parameters 12 | // before passing the request to the handler. 13 | type RequestValidator struct{} 14 | 15 | // Validate is a middleware that extracts and validates bucket and key parameters 16 | // from the request path. It validates bucket names against S3 naming rules and object 17 | // keys against security constraints (path traversal, control characters, etc.). 18 | func (v *RequestValidator) Validate(next http.Handler) http.Handler { 19 | return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 20 | vars := mux.Vars(r) 21 | 22 | // Validate bucket name if present in route 23 | if bucket, ok := vars["bucket"]; ok && bucket != "" { 24 | if err := util.ValidateBucketName(bucket); err != nil { 25 | log.Printf("Invalid bucket name: %s, error: %v", bucket, err) 26 | model.WriteErrorResponse(w, r, model.ErrInvalidBucketName) 27 | return 28 | } 29 | } 30 | 31 | // Validate object key if present in route 32 | if key, ok := vars["key"]; ok && key != "" { 33 | if err := util.ValidateObjectKey(key); err != nil { 34 | log.Printf("Invalid object key: %s, error: %v", key, err) 35 | model.WriteErrorResponse(w, r, model.ErrInvalidRequest) 36 | return 37 | } 38 | } 39 | 40 | // Validation passed, proceed to handler 41 | next.ServeHTTP(w, r) 42 | }) 43 | } 44 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/wpnpeiris/nats-s3 2 | 3 | go 1.24.0 4 | 5 | require ( 6 | github.com/aws/aws-sdk-go v1.55.8 7 | github.com/go-kit/log v0.2.1 8 | github.com/google/uuid v1.6.0 9 | github.com/gorilla/mux v1.8.1 10 | github.com/nats-io/nats-server/v2 v2.12.3 11 | github.com/nats-io/nats.go v1.48.0 12 | github.com/nats-io/nuid v1.0.1 13 | github.com/prometheus/client_golang v1.23.2 14 | ) 15 | 16 | require ( 17 | github.com/antithesishq/antithesis-sdk-go v0.5.0 // indirect 18 | github.com/beorn7/perks v1.0.1 // indirect 19 | github.com/cespare/xxhash/v2 v2.3.0 // indirect 20 | github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect 21 | github.com/go-logfmt/logfmt v0.6.1 // indirect 22 | github.com/google/go-tpm v0.9.7 // indirect 23 | github.com/jmespath/go-jmespath v0.4.0 // indirect 24 | github.com/klauspost/compress v1.18.2 // indirect 25 | github.com/kr/text v0.2.0 // indirect 26 | github.com/minio/highwayhash v1.0.4-0.20251030100505-070ab1a87a76 // indirect 27 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 28 | github.com/nats-io/jwt/v2 v2.8.0 // indirect 29 | github.com/nats-io/nkeys v0.4.12 // indirect 30 | github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect 31 | github.com/prometheus/client_model v0.6.2 // indirect 32 | github.com/prometheus/common v0.67.4 // indirect 33 | github.com/prometheus/procfs v0.19.2 // indirect 34 | go.yaml.in/yaml/v2 v2.4.3 // indirect 35 | golang.org/x/crypto v0.46.0 // indirect 36 | golang.org/x/sys v0.39.0 // indirect 37 | golang.org/x/time v0.14.0 // indirect 38 | google.golang.org/protobuf v1.36.11 // indirect 39 | ) 40 | -------------------------------------------------------------------------------- /examples/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.8' 2 | 3 | # NATS-S3 Docker Compose Example 4 | # 5 | # This compose file provides a complete NATS-S3 setup with: 6 | # - NATS server with JetStream enabled 7 | # - nats-s3 gateway 8 | # - Pre-configured credentials 9 | # 10 | # Usage: 11 | # docker-compose up -d 12 | # 13 | # Test with AWS CLI: 14 | # export AWS_ACCESS_KEY_ID=demo-access-key 15 | # export AWS_SECRET_ACCESS_KEY=demo-secret-key 16 | # export AWS_DEFAULT_REGION=us-east-1 17 | # aws s3 ls --endpoint-url=http://localhost:5222 18 | 19 | services: 20 | # NATS server with JetStream for object storage 21 | nats: 22 | image: nats:latest 23 | container_name: nats-server 24 | command: 25 | - "-js" 26 | - "--user=nats-user" 27 | - "--pass=nats-password" 28 | ports: 29 | - "4222:4222" 30 | networks: 31 | - nats-s3-network 32 | 33 | # NATS-S3 Gateway 34 | nats-s3: 35 | image: wpnpeiris/nats-s3:latest 36 | container_name: nats-s3-gateway 37 | depends_on: 38 | - nats 39 | restart: on-failure 40 | ports: 41 | - "5222:5222" # S3 API endpoint 42 | volumes: 43 | - ./credentials.json:/credentials.json:ro 44 | command: 45 | - "--listen=0.0.0.0:5222" 46 | - "--natsServers=nats://nats:4222" 47 | - "--natsUser=nats-user" 48 | - "--natsPassword=nats-password" 49 | - "--s3.credentials=/credentials.json" 50 | environment: 51 | - LOG_LEVEL=info 52 | healthcheck: 53 | test: ["CMD", "wget", "--spider", "-q", "http://localhost:5222/healthz"] 54 | interval: 10s 55 | timeout: 5s 56 | retries: 3 57 | networks: 58 | - nats-s3-network 59 | 60 | networks: 61 | nats-s3-network: 62 | driver: bridge 63 | -------------------------------------------------------------------------------- /.github/workflows/release-image.yml: -------------------------------------------------------------------------------- 1 | name: Release Image 2 | 3 | on: 4 | push: 5 | tags: 6 | - 'v*.*.*' 7 | - 'v*.*' 8 | workflow_dispatch: {} 9 | 10 | permissions: 11 | contents: read 12 | packages: write 13 | 14 | jobs: 15 | build-and-push: 16 | runs-on: ubuntu-latest 17 | steps: 18 | - name: Checkout 19 | uses: actions/checkout@v6 20 | 21 | - name: Set up QEMU 22 | uses: docker/setup-qemu-action@v3 23 | 24 | - name: Set up Docker Buildx 25 | uses: docker/setup-buildx-action@v3 26 | 27 | - name: Log in to GHCR 28 | uses: docker/login-action@v3 29 | with: 30 | registry: ghcr.io 31 | username: ${{ github.actor }} 32 | password: ${{ secrets.GITHUB_TOKEN }} 33 | 34 | - name: Log in to Docker Hub 35 | uses: docker/login-action@v3 36 | with: 37 | username: ${{ secrets.DOCKERHUB_USERNAME }} 38 | password: ${{ secrets.DOCKERHUB_TOKEN }} 39 | 40 | - name: Extract metadata (tags, labels) 41 | id: meta 42 | uses: docker/metadata-action@v5 43 | with: 44 | images: | 45 | ghcr.io/${{ github.repository }} 46 | ${{ secrets.DOCKERHUB_USERNAME }}/nats-s3 47 | tags: | 48 | type=ref,event=tag 49 | type=semver,pattern={{version}} 50 | type=semver,pattern={{major}}.{{minor}} 51 | type=semver,pattern={{major}} 52 | type=raw,value=latest,enable={{is_default_branch}} 53 | 54 | - name: Build and push 55 | uses: docker/build-push-action@v6 56 | with: 57 | context: . 58 | platforms: linux/amd64,linux/arm64 59 | push: true 60 | tags: ${{ steps.meta.outputs.tags }} 61 | labels: ${{ steps.meta.outputs.labels }} 62 | -------------------------------------------------------------------------------- /internal/credential/credential_test.go: -------------------------------------------------------------------------------- 1 | package credential 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | func TestEntry_Validate(t *testing.T) { 8 | tests := []struct { 9 | name string 10 | entry Entry 11 | wantError bool 12 | errorType error 13 | }{ 14 | { 15 | name: "valid credential", 16 | entry: Entry{ 17 | AccessKey: "AKIAIOSFODNN7EXAMPLE", 18 | SecretKey: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", 19 | }, 20 | wantError: false, 21 | }, 22 | { 23 | name: "minimum length credentials", 24 | entry: Entry{ 25 | AccessKey: "ABC", 26 | SecretKey: "12345678", 27 | }, 28 | wantError: false, 29 | }, 30 | { 31 | name: "empty credentials", 32 | entry: Entry{ 33 | AccessKey: "", 34 | SecretKey: "", 35 | }, 36 | wantError: true, 37 | errorType: ErrEmptyCredentials, 38 | }, 39 | { 40 | name: "access key too short", 41 | entry: Entry{ 42 | AccessKey: "AB", 43 | SecretKey: "validSecretKey123", 44 | }, 45 | wantError: true, 46 | errorType: ErrInvalidAccessKeyLength, 47 | }, 48 | { 49 | name: "secret key too short", 50 | entry: Entry{ 51 | AccessKey: "validAccessKey", 52 | SecretKey: "short", 53 | }, 54 | wantError: true, 55 | errorType: ErrInvalidSecretKeyLength, 56 | }, 57 | { 58 | name: "access key contains equals sign", 59 | entry: Entry{ 60 | AccessKey: "ACCESS=KEY", 61 | SecretKey: "validSecretKey123", 62 | }, 63 | wantError: true, 64 | errorType: ErrContainsReservedChars, 65 | }, 66 | { 67 | name: "access key contains comma", 68 | entry: Entry{ 69 | AccessKey: "ACCESS,KEY", 70 | SecretKey: "validSecretKey123", 71 | }, 72 | wantError: true, 73 | errorType: ErrContainsReservedChars, 74 | }, 75 | } 76 | 77 | for _, tt := range tests { 78 | t.Run(tt.name, func(t *testing.T) { 79 | err := tt.entry.Validate() 80 | if (err != nil) != tt.wantError { 81 | t.Errorf("Validate() error = %v, wantError %v", err, tt.wantError) 82 | return 83 | } 84 | }) 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # Dependabot configuration for automated dependency updates 2 | # See: https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file 3 | 4 | version: 2 5 | updates: 6 | # Go modules - Keep dependencies up to date 7 | - package-ecosystem: "gomod" 8 | directory: "/" 9 | schedule: 10 | interval: "weekly" 11 | day: "monday" 12 | time: "09:00" 13 | timezone: "UTC" 14 | # Group all Go dependency updates into a single PR 15 | groups: 16 | go-dependencies: 17 | patterns: 18 | - "*" 19 | update-types: 20 | - "minor" 21 | - "patch" 22 | # Keep major version updates separate for review 23 | open-pull-requests-limit: 5 24 | labels: 25 | - "dependencies" 26 | - "go" 27 | commit-message: 28 | prefix: "deps" 29 | include: "scope" 30 | reviewers: 31 | - "wpnpeiris" 32 | # Allow both direct and indirect dependency updates 33 | allow: 34 | - dependency-type: "all" 35 | 36 | # GitHub Actions - Keep workflow actions up to date 37 | - package-ecosystem: "github-actions" 38 | directory: "/" 39 | schedule: 40 | interval: "weekly" 41 | day: "monday" 42 | time: "09:00" 43 | timezone: "UTC" 44 | groups: 45 | github-actions: 46 | patterns: 47 | - "*" 48 | open-pull-requests-limit: 3 49 | labels: 50 | - "dependencies" 51 | - "github-actions" 52 | commit-message: 53 | prefix: "ci" 54 | include: "scope" 55 | reviewers: 56 | - "wpnpeiris" 57 | 58 | # Docker - Keep base images up to date 59 | - package-ecosystem: "docker" 60 | directory: "/" 61 | schedule: 62 | interval: "weekly" 63 | day: "monday" 64 | time: "09:00" 65 | timezone: "UTC" 66 | open-pull-requests-limit: 2 67 | labels: 68 | - "dependencies" 69 | - "docker" 70 | commit-message: 71 | prefix: "docker" 72 | include: "scope" 73 | reviewers: 74 | - "wpnpeiris" 75 | -------------------------------------------------------------------------------- /internal/client/nats_client.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | "fmt" 5 | "github.com/nats-io/nats.go/jetstream" 6 | "log" 7 | 8 | "github.com/nats-io/nats.go" 9 | "github.com/nats-io/nuid" 10 | ) 11 | 12 | const ( 13 | MetaStoreName = "mp_meta" 14 | PartMetaStoreName = "mp_part_meta" 15 | TempStoreName = "mp_temp" 16 | ) 17 | 18 | // Client wraps a NATS connection and metadata used by gateway components. 19 | type Client struct { 20 | id string 21 | kind string 22 | nc *nats.Conn 23 | } 24 | 25 | // NewClient creates a new Client with a generated ID and the provided kind 26 | // used in connection names and logs. 27 | func NewClient(kind string) *Client { 28 | id := nuid.Next() 29 | return &Client{ 30 | id: id, 31 | kind: kind, 32 | } 33 | } 34 | 35 | // SetupConnectionToNATS establishes a connection to the given NATS servers 36 | // applying the provided options and sets standard event handlers. 37 | func (c *Client) SetupConnectionToNATS(servers string, options ...nats.Option) error { 38 | 39 | options = append(options, nats.Name(c.Name())) 40 | 41 | nc, err := nats.Connect(servers, options...) 42 | if err != nil { 43 | return err 44 | } 45 | c.nc = nc 46 | 47 | nc.SetErrorHandler(func(_ *nats.Conn, _ *nats.Subscription, err error) { 48 | log.Printf("NATS error: %s\n", err) 49 | }) 50 | nc.SetReconnectHandler(func(_ *nats.Conn) { 51 | log.Println("Reconnected to NATS!") 52 | }) 53 | nc.SetClosedHandler(func(_ *nats.Conn) { 54 | log.Fatal("Connection to NATS is closed! Service cannot continue.") 55 | }) 56 | 57 | return err 58 | } 59 | 60 | // NATS returns the underlying NATS connection. 61 | func (c *Client) NATS() *nats.Conn { 62 | return c.nc 63 | } 64 | 65 | // JetStream a new JetStream instance. 66 | func (c *Client) Jetstream() (jetstream.JetStream, error) { 67 | return jetstream.New(c.nc) 68 | } 69 | 70 | // ID returns the client's stable unique identifier. 71 | func (c *Client) ID() string { 72 | return c.id 73 | } 74 | 75 | // Name returns a human-readable connection name used for NATS. 76 | func (c *Client) Name() string { 77 | return fmt.Sprintf("%s:%s", c.kind, c.id) 78 | } 79 | -------------------------------------------------------------------------------- /internal/testutil/nats_server.go: -------------------------------------------------------------------------------- 1 | package testutil 2 | 3 | import ( 4 | "fmt" 5 | "path/filepath" 6 | "testing" 7 | "time" 8 | 9 | srvlog "github.com/nats-io/nats-server/v2/logger" 10 | "github.com/nats-io/nats-server/v2/server" 11 | nservertest "github.com/nats-io/nats-server/v2/test" 12 | ) 13 | 14 | // StartJSServer starts an in-process NATS Server with JetStream enabled 15 | // and a per-test StoreDir to avoid cross-test persistence. 16 | func StartJSServer(t *testing.T) *server.Server { 17 | t.Helper() 18 | opts := nservertest.DefaultTestOptions 19 | opts.Port = server.RANDOM_PORT 20 | opts.JetStream = true 21 | opts.StoreDir = t.TempDir() 22 | s := nservertest.RunServer(&opts) 23 | return s 24 | } 25 | 26 | // StartJSServerCluster starts 3 in-process NATS Servers with JetStream enabled 27 | // and a per-test StoreDir to avoid cross-test persistence. 28 | func StartJSServerCluster(t *testing.T) []*server.Server { 29 | t.Helper() 30 | clusterPorts := []int{16222, 17222, 18222} // fixed ports for cluster nodes, please ensure they are free 31 | 32 | servers := make([]*server.Server, 3) 33 | for i := 0; i < 3; i++ { 34 | opts := nservertest.DefaultTestOptions 35 | opts.ServerName = fmt.Sprintf("S-%d", i+1) 36 | opts.Port = server.RANDOM_PORT 37 | opts.JetStream = true 38 | opts.StoreDir = filepath.Join(t.TempDir(), fmt.Sprintf("%d", i)) 39 | opts.Cluster.Name = "test-cluster" 40 | opts.Cluster.Host = "127.0.0.1" 41 | opts.Cluster.Port = clusterPorts[i] 42 | 43 | opts.Routes = server.RoutesFromStr(fmt.Sprintf("nats://127.0.0.1:%d", clusterPorts[0])) 44 | s := nservertest.RunServerCallback(&opts, func(s *server.Server) { 45 | s.SetLogger(srvlog.NewStdLogger(opts.Logtime, opts.Debug, opts.Trace, false, true, srvlog.LogUTC(opts.LogtimeUTC)), false, false) 46 | }) 47 | t.Logf("Started server %d\n", i) 48 | servers[i] = s 49 | } 50 | now := time.Now() 51 | t.Logf("Waiting for cluster to be ready\n") 52 | // Wait for cluster to form 53 | for { 54 | time.Sleep(200 * time.Millisecond) 55 | for _, s := range servers { 56 | if s.JetStreamIsLeader() { 57 | t.Logf("Cluster is ready in %s\n", time.Since(now)) 58 | return servers 59 | } 60 | } 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /internal/credential/credential.go: -------------------------------------------------------------------------------- 1 | package credential 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "strings" 7 | ) 8 | 9 | // Common errors for credential validation and retrieval 10 | var ( 11 | ErrAccessKeyNotFound = errors.New("access key not found") 12 | ErrInvalidAccessKeyLength = errors.New("access key must be at least 3 characters") 13 | ErrInvalidSecretKeyLength = errors.New("secret key must be at least 8 characters") 14 | ErrContainsReservedChars = errors.New("access key contains reserved characters '=' or ','") 15 | ErrEmptyCredentials = errors.New("access key and secret key cannot be empty") 16 | ) 17 | 18 | const ( 19 | // Minimum length for access key (following MinIO conventions) 20 | accessKeyMinLen = 3 21 | 22 | // Minimum length for secret key (following MinIO conventions) 23 | secretKeyMinLen = 8 24 | 25 | // Reserved characters that cannot be used in access keys 26 | reservedChars = "=," 27 | ) 28 | 29 | // Entry represents a single AWS-style credential pair. 30 | type Entry struct { 31 | AccessKey string `json:"accessKey"` 32 | SecretKey string `json:"secretKey"` 33 | } 34 | 35 | // Validate checks if the credential entry is valid. 36 | func (e *Entry) Validate() error { 37 | if e.AccessKey == "" && e.SecretKey == "" { 38 | return ErrEmptyCredentials 39 | } 40 | 41 | if len(e.AccessKey) < accessKeyMinLen { 42 | return fmt.Errorf("%w: got %d characters", ErrInvalidAccessKeyLength, len(e.AccessKey)) 43 | } 44 | 45 | if len(e.SecretKey) < secretKeyMinLen { 46 | return fmt.Errorf("%w: got %d characters", ErrInvalidSecretKeyLength, len(e.SecretKey)) 47 | } 48 | 49 | if strings.ContainsAny(e.AccessKey, reservedChars) { 50 | return ErrContainsReservedChars 51 | } 52 | 53 | return nil 54 | } 55 | 56 | // Store defines the interface for credential storage and retrieval. 57 | // Implementations can use different backends (file, memory, database, etc.) 58 | type Store interface { 59 | // Get retrieves the secret key for the given access key. 60 | // Returns the secret key and true if found, empty string and false otherwise. 61 | Get(accessKey string) (secretKey string, found bool) 62 | 63 | // GetName returns a descriptive name of the store implementation. 64 | GetName() string 65 | } 66 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to nats-s3 2 | 3 | Thanks for your interest in improving nats-s3! This project is in an early stage, so the process is intentionally lightweight. 4 | 5 | ## Getting Started 6 | - Prerequisites: 7 | - Go 1.22+ 8 | - Optional: a local NATS server with JetStream enabled for manual testing (`nats-server -js`) 9 | - Build: `go build ./cmd/nats-s3` 10 | - Run: `./nats-s3 --listen 0.0.0.0:5222 --natsServers nats://127.0.0.1:4222` 11 | - Test: `go test ./...` 12 | 13 | ## How We Work 14 | - Prefer opening an issue or discussion before large changes to align on direction. 15 | - Keep pull requests small, focused, and easy to review. 16 | - Include tests for new behavior or bug fixes when reasonable. 17 | - Update docs when you change user-facing behavior. 18 | 19 | ## Branching & Commits 20 | - Branch naming suggestions: 21 | - Features: `feat/` 22 | - Fixes: `fix/` 23 | - Chores/Docs: `chore/`, `docs/` 24 | - Commit messages: prefer conventional-style prefixes, for example: 25 | - `feat: add DELETE object route` 26 | - `fix: correct ETag and Last-Modified headers` 27 | - `docs: improve README usage examples` 28 | - `test: add unit tests for object handlers` 29 | 30 | ## Coding Style 31 | - Run the formatter before committing: `gofmt -s -w .` 32 | - Keep changes minimal and focused on the task at hand. 33 | - Be careful with logging; avoid logging full payloads or sensitive data. 34 | - Favor clear, concise comments and Go-doc comments for exported items. 35 | 36 | ## Testing 37 | - Run all tests: `go test ./...` 38 | - For NATS-dependent tests, use `github.com/nats-io/nats-server/v2/test` helpers to start an in-process server with JetStream enabled. 39 | - Aim for table-driven tests where practical and prefer deterministic behavior. 40 | 41 | ## Pull Request Checklist 42 | - [ ] Code is formatted (`gofmt -s`) 43 | - [ ] Unit tests added/updated and passing (`go test ./...`) 44 | - [ ] Behavior and public APIs documented (README or comments) 45 | - [ ] PR description explains the problem and the solution briefly 46 | 47 | We appreciate your contributions and feedback! If something is unclear, please open an issue and we’ll help clarify and improve the docs and process. 48 | -------------------------------------------------------------------------------- /scripts/ci-conformance.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -euo pipefail 3 | 4 | # Suppress Python BrokenPipeError warnings from AWS CLI 5 | export PYTHONWARNINGS="ignore::BrokenPipeError" 6 | 7 | ENDPOINT="http://localhost:5222" 8 | BUCKET="conformance-bucket-$(date +%s)" 9 | TMPDIR="$(mktemp -d)" 10 | PASS_COUNT=0 11 | FAIL_COUNT=0 12 | 13 | cleanup() { 14 | rm -rf "$TMPDIR" || true 15 | } 16 | trap cleanup EXIT 17 | 18 | pass() { echo "PASS: $1"; PASS_COUNT=$((PASS_COUNT+1)); } 19 | fail() { echo "FAIL: $1"; FAIL_COUNT=$((FAIL_COUNT+1)); } 20 | run_or_fail() { if "$@"; then pass "$1 ${2-}"; else fail "$1 ${2-}"; fi } 21 | 22 | echo "Endpoint: $ENDPOINT" 23 | echo "Bucket: $BUCKET" 24 | 25 | # 1) Make bucket 26 | if aws s3 mb "s3://$BUCKET" --endpoint-url="$ENDPOINT" >/dev/null; then 27 | pass "aws s3 mb" 28 | else 29 | fail "aws s3 mb"; exit 1 30 | fi 31 | 32 | # 2) List buckets should include our bucket 33 | buckets_output=$(aws s3 ls --endpoint-url="$ENDPOINT" 2>/dev/null) 34 | if echo "$buckets_output" | grep -q "$BUCKET"; then 35 | pass "aws s3 ls (service)" 36 | else 37 | fail "aws s3 ls (service)"; exit 1 38 | fi 39 | 40 | # 3) Put object 41 | echo "hello world" > "$TMPDIR/file.txt" 42 | if aws s3 cp "$TMPDIR/file.txt" "s3://$BUCKET/hello.txt" --endpoint-url="$ENDPOINT" >/dev/null; then 43 | pass "aws s3 cp put" 44 | else 45 | fail "aws s3 cp put"; exit 1 46 | fi 47 | 48 | # 4) List bucket contents 49 | if aws s3 ls "s3://$BUCKET" --endpoint-url="$ENDPOINT" 2>/dev/null | grep -q "hello.txt"; then 50 | pass "aws s3 ls (bucket)" 51 | else 52 | fail "aws s3 ls (bucket)"; exit 1 53 | fi 54 | 55 | # 5) Head object via s3api 56 | if aws s3api head-object --bucket "$BUCKET" --key "hello.txt" --endpoint-url="$ENDPOINT" >/dev/null; then 57 | pass "aws s3api head-object" 58 | else 59 | fail "aws s3api head-object"; exit 1 60 | fi 61 | 62 | # 6) Get object and compare content 63 | if aws s3 cp "s3://$BUCKET/hello.txt" "$TMPDIR/out.txt" --endpoint-url="$ENDPOINT" >/dev/null; then 64 | if diff -q "$TMPDIR/file.txt" "$TMPDIR/out.txt" >/dev/null; then 65 | pass "aws s3 cp get + content match" 66 | else 67 | fail "content mismatch after get"; exit 1 68 | fi 69 | else 70 | fail "aws s3 cp get"; exit 1 71 | fi 72 | 73 | # 7) Delete object 74 | if aws s3 rm "s3://$BUCKET/hello.txt" --endpoint-url="$ENDPOINT" >/dev/null; then 75 | pass "aws s3 rm" 76 | else 77 | fail "aws s3 rm"; exit 1 78 | fi 79 | 80 | # 8) Remove bucket 81 | if aws s3 rb "s3://$BUCKET" --endpoint-url="$ENDPOINT" >/dev/null; then 82 | pass "aws s3 rb" 83 | else 84 | fail "aws s3 rb"; exit 1 85 | fi 86 | 87 | echo "Conformance subset summary: PASS=$PASS_COUNT FAIL=$FAIL_COUNT" 88 | if [ "$FAIL_COUNT" != "0" ]; then exit 1; fi 89 | 90 | -------------------------------------------------------------------------------- /internal/logging/logger.go: -------------------------------------------------------------------------------- 1 | package logging 2 | 3 | import ( 4 | "fmt" 5 | "github.com/go-kit/log" 6 | "github.com/go-kit/log/level" 7 | "os" 8 | ) 9 | 10 | // Supported logger formats and levels. 11 | const ( 12 | // FormatJSON encodes logs as JSON to stdout. 13 | FormatJSON = "json" 14 | 15 | // LevelDebug enables debug and above. 16 | LevelDebug = "debug" 17 | // LevelInfo enables info and above. 18 | LevelInfo = "info" 19 | // LevelWarning enables warning and error only. 20 | LevelWarning = "warning" 21 | // LevelError enables error only. 22 | LevelError = "error" 23 | ) 24 | 25 | // Config declares runtime logging preferences. 26 | type Config struct { 27 | // Format selects encoder. When set to "json" logs are emitted as JSON to 28 | // stdout; any other value uses logfmt to stderr. 29 | Format string 30 | // Level selects the minimum level to log. Valid values are 31 | // "debug", "info", "warning", or "error". 32 | Level string 33 | } 34 | 35 | // NewLogger builds a go-kit/log Logger using the provided configuration. 36 | func NewLogger(cfg Config) log.Logger { 37 | var lvl level.Option 38 | switch cfg.Level { 39 | case LevelError: 40 | lvl = level.AllowError() 41 | case LevelWarning: 42 | lvl = level.AllowWarn() 43 | case LevelInfo: 44 | lvl = level.AllowInfo() 45 | case LevelDebug: 46 | lvl = level.AllowDebug() 47 | default: 48 | // Default to info level for unknown/invalid configurations 49 | fmt.Fprintf(os.Stderr, "WARNING: Unknown log level '%s', defaulting to 'info'\n", cfg.Level) 50 | lvl = level.AllowInfo() 51 | } 52 | 53 | var logger log.Logger 54 | switch cfg.Format { 55 | case FormatJSON: 56 | logger = log.NewJSONLogger(log.NewSyncWriter(os.Stdout)) 57 | default: 58 | logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) 59 | } 60 | 61 | logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller) 62 | logger = level.NewFilter(logger, lvl) 63 | 64 | return logger 65 | } 66 | 67 | // Info logs at info level. 68 | func Info(l log.Logger, kv ...any) { 69 | if err := level.Info(l).Log(kv...); err != nil { 70 | fmt.Fprintf(os.Stderr, "log info write failed: %v\n", err) 71 | } 72 | } 73 | 74 | // Warn logs at warning level. 75 | func Warn(l log.Logger, kv ...any) { 76 | if err := level.Warn(l).Log(kv...); err != nil { 77 | fmt.Fprintf(os.Stderr, "log warn write failed: %v\n", err) 78 | } 79 | } 80 | 81 | // Error logs at error level. 82 | func Error(l log.Logger, kv ...any) { 83 | if err := level.Error(l).Log(kv...); err != nil { 84 | fmt.Fprintf(os.Stderr, "log error write failed: %v\n", err) 85 | } 86 | } 87 | 88 | // Debug logs at debug level. 89 | func Debug(l log.Logger, kv ...any) { 90 | if err := level.Debug(l).Log(kv...); err != nil { 91 | fmt.Fprintf(os.Stderr, "log debug write failed: %v\n", err) 92 | } 93 | } 94 | -------------------------------------------------------------------------------- /scripts/benchmark/benchmark.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ################################## 4 | # Benchmark Script for NATS-S3 Gateway 5 | # 6 | # This script sets up a NATS server and a NATS-S3 gateway, 7 | # runs a benchmark using the 'warp' tool, and ensures cleanup 8 | # of all resources upon completion or interruption. 9 | # 10 | # This scripts assumes that Docker, NATS server 11 | # and warp (https://github.com/minio/warp) are installed 12 | # and available in the system's PATH. 13 | # 14 | # Also, ensure that you have a valid 'credentials.json' file in your working directory. 15 | # The Access Key and Secret Key in the credentials file should both be set to 'testtest'. 16 | ################################## 17 | 18 | set -e # Exit on error 19 | 20 | # Cleanup function to be called on exit 21 | cleanup() { 22 | echo "Cleaning up..." 23 | 24 | # Stop NATS-S3 container if running 25 | if [ ! -z "$NATS_S3_CONTAINER_ID" ]; then 26 | echo "Stopping NATS-S3 container..." 27 | docker stop "$NATS_S3_CONTAINER_ID" 2>/dev/null || true 28 | docker rm "$NATS_S3_CONTAINER_ID" 2>/dev/null || true 29 | fi 30 | 31 | # Stop NATS server if running 32 | if [ ! -z "$NATS_PID" ]; then 33 | echo "Stopping NATS server (PID: $NATS_PID)..." 34 | kill "$NATS_PID" 2>/dev/null || true 35 | wait "$NATS_PID" 2>/dev/null || true 36 | fi 37 | 38 | # Remove NATS JetStream data directory 39 | if [ -d "/tmp/nats/jetstream" ]; then 40 | echo "Removing NATS JetStream data directory..." 41 | rm -rf /tmp/nats/jetstream 42 | fi 43 | 44 | echo "Cleanup complete." 45 | } 46 | 47 | # Register cleanup function to run on script exit 48 | trap cleanup EXIT INT TERM 49 | 50 | echo "Pulling latest NATS-S3 gateway Docker image..." 51 | docker pull wpnpeiris/nats-s3:latest 52 | 53 | echo "Starting NATS server..." 54 | nats-server -js & 55 | NATS_PID=$! 56 | echo "NATS server started with PID: $NATS_PID" 57 | 58 | # Wait for NATS to be ready 59 | sleep 2 60 | 61 | echo "Starting NATS-S3 gateway..." 62 | NATS_S3_CONTAINER_ID=$(docker run -d --network host -p 5222:5222 \ 63 | -v $(pwd)/credentials.json:/credentials.json \ 64 | wpnpeiris/nats-s3:latest \ 65 | --listen 0.0.0.0:5222 \ 66 | --natsServers nats://127.0.0.1:4222 \ 67 | --s3.credentials /credentials.json) 68 | echo "NATS-S3 gateway started with container ID: $NATS_S3_CONTAINER_ID" 69 | 70 | # Wait for NATS-S3 to be ready 71 | sleep 3 72 | 73 | echo "Running benchmark..." 74 | warp mixed --host=localhost:5222 \ 75 | --access-key=testtest \ 76 | --secret-key=testtest \ 77 | --duration 20s \ 78 | --obj.size 1M \ 79 | --concurrent=1 \ 80 | --objects=10 \ 81 | --get-distrib 1 \ 82 | --stat-distrib 1 \ 83 | --put-distrib 2 \ 84 | --delete-distrib 1 85 | 86 | echo "Benchmark completed successfully!" 87 | -------------------------------------------------------------------------------- /internal/util/validation.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "fmt" 5 | "regexp" 6 | "strings" 7 | ) 8 | 9 | const ( 10 | // MaxBucketNameLength is the maximum allowed length for S3 bucket names 11 | MaxBucketNameLength = 63 12 | // MinBucketNameLength is the minimum allowed length for S3 bucket names 13 | MinBucketNameLength = 3 14 | // MaxObjectKeyLength is the maximum allowed length for S3 object keys 15 | MaxObjectKeyLength = 1024 16 | ) 17 | 18 | // S3 bucket name rules per https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html 19 | var bucketNameRegex = regexp.MustCompile(`^[a-z0-9][a-z0-9.-]*[a-z0-9]$`) 20 | 21 | // ValidateBucketName validates an S3 bucket name according to AWS S3 naming rules 22 | func ValidateBucketName(bucket string) error { 23 | if bucket == "" { 24 | return fmt.Errorf("bucket name cannot be empty") 25 | } 26 | 27 | if len(bucket) < MinBucketNameLength { 28 | return fmt.Errorf("bucket name too short (min %d characters)", MinBucketNameLength) 29 | } 30 | 31 | if len(bucket) > MaxBucketNameLength { 32 | return fmt.Errorf("bucket name too long (max %d characters)", MaxBucketNameLength) 33 | } 34 | 35 | // Check for control characters or null bytes 36 | for i, r := range bucket { 37 | if r < 32 || r == 127 { 38 | return fmt.Errorf("bucket name contains control character at position %d", i) 39 | } 40 | } 41 | 42 | // Check for path traversal sequences 43 | if strings.Contains(bucket, "..") { 44 | return fmt.Errorf("bucket name contains path traversal sequence") 45 | } 46 | 47 | // Validate against S3 naming rules 48 | if !bucketNameRegex.MatchString(bucket) { 49 | return fmt.Errorf("bucket name does not conform to S3 naming rules (must be lowercase alphanumeric with dots and hyphens, starting and ending with alphanumeric)") 50 | } 51 | 52 | // Reject IP address format (simple check for patterns like x.x.x.x) 53 | if regexp.MustCompile(`^\d+\.\d+\.\d+\.\d+$`).MatchString(bucket) { 54 | return fmt.Errorf("bucket name must not be formatted as an IP address") 55 | } 56 | 57 | return nil 58 | } 59 | 60 | // ValidateObjectKey validates an S3 object key 61 | func ValidateObjectKey(key string) error { 62 | if key == "" { 63 | return fmt.Errorf("object key cannot be empty") 64 | } 65 | 66 | if len(key) > MaxObjectKeyLength { 67 | return fmt.Errorf("object key too long (max %d characters)", MaxObjectKeyLength) 68 | } 69 | 70 | // Check for control characters (except tab which is sometimes used) 71 | for i, r := range key { 72 | if (r < 32 && r != '\t') || r == 127 { 73 | return fmt.Errorf("object key contains invalid control character at position %d", i) 74 | } 75 | } 76 | 77 | // Check for path traversal sequences 78 | // This catches "../" but also standalone ".." which could be problematic 79 | if strings.Contains(key, "..") { 80 | return fmt.Errorf("object key contains path traversal sequence") 81 | } 82 | 83 | return nil 84 | } 85 | -------------------------------------------------------------------------------- /internal/client/nats_object_client_metrics.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/go-kit/log" 7 | "github.com/nats-io/nats.go" 8 | "github.com/prometheus/client_golang/prometheus" 9 | "github.com/wpnpeiris/nats-s3/internal/logging" 10 | ) 11 | 12 | const ( 13 | namespace = "nats" 14 | clientSubsystem = "client" 15 | objectStoreSubsystem = "objectstore" 16 | ) 17 | 18 | type MetricCollector struct { 19 | logger log.Logger 20 | client *NatsObjectClient 21 | 22 | // Total buckets metrics 23 | totalBucketsDesc *prometheus.Desc 24 | 25 | // Current connection state to NATS metrics 26 | connectStateDesc *prometheus.Desc 27 | 28 | // Total reconnect metrics to NATS 29 | totalReconnectDesc *prometheus.Desc 30 | } 31 | 32 | func NewMetricCollector(logger log.Logger, client *NatsObjectClient) *MetricCollector { 33 | return &MetricCollector{ 34 | logger: logger, 35 | client: client, 36 | 37 | totalBucketsDesc: prometheus.NewDesc( 38 | prometheus.BuildFQName(namespace, objectStoreSubsystem, "buckets_total"), 39 | "The total number of buckets.", 40 | nil, 41 | nil, 42 | ), 43 | connectStateDesc: prometheus.NewDesc( 44 | prometheus.BuildFQName(namespace, clientSubsystem, "state"), 45 | "The state of connection to NATS.", 46 | nil, 47 | nil, 48 | ), 49 | 50 | totalReconnectDesc: prometheus.NewDesc( 51 | prometheus.BuildFQName(namespace, clientSubsystem, "reconnects_total"), 52 | "The total number of reconnects.", 53 | nil, 54 | nil, 55 | ), 56 | } 57 | } 58 | 59 | // Describe implements the prometheus.Collector interface. 60 | func (c MetricCollector) Describe(ch chan<- *prometheus.Desc) { 61 | ch <- c.totalBucketsDesc 62 | } 63 | 64 | // Collect implements the prometheus.Collector interface. 65 | func (c MetricCollector) Collect(ch chan<- prometheus.Metric) { 66 | connected, stats := c.clientStats() 67 | 68 | ch <- prometheus.MustNewConstMetric( 69 | c.totalBucketsDesc, 70 | prometheus.GaugeValue, 71 | c.countBuckets(), 72 | ) 73 | ch <- prometheus.MustNewConstMetric( 74 | c.connectStateDesc, 75 | prometheus.GaugeValue, 76 | float64(connected), 77 | ) 78 | ch <- prometheus.MustNewConstMetric( 79 | c.totalReconnectDesc, 80 | prometheus.CounterValue, 81 | float64(stats.Reconnects), 82 | ) 83 | } 84 | 85 | // countBuckets return number of buckets 86 | func (c MetricCollector) countBuckets() float64 { 87 | buckets := 0.0 88 | ch, err := c.client.ListBuckets(context.Background()) 89 | if err != nil { 90 | logging.Warn(c.logger, "msg", "Error at listing buckets for metrics", "err", err) 91 | } else { 92 | for range ch { 93 | buckets++ 94 | } 95 | } 96 | return buckets 97 | } 98 | 99 | // clientStats return nats client's statistics 100 | func (c MetricCollector) clientStats() (int, nats.Statistics) { 101 | if c.client.IsConnected() { 102 | return 1, c.client.Stats() 103 | } 104 | return 0, nats.Statistics{} 105 | } 106 | -------------------------------------------------------------------------------- /internal/server/opts.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "flag" 5 | "github.com/nats-io/nats.go" 6 | "time" 7 | ) 8 | 9 | // Options holds all configuration options for the NATS S3 server. 10 | type Options struct { 11 | ServerListen string 12 | NatsServers string 13 | User string 14 | Password string 15 | Token string 16 | NkeyFile string 17 | CredsFile string 18 | Replicas int 19 | CredentialsFile string 20 | LogFormat string 21 | LogLevel string 22 | ReadTimeout time.Duration 23 | WriteTimeout time.Duration 24 | IdleTimeout time.Duration 25 | ReadHeaderTimeout time.Duration 26 | } 27 | 28 | // ConfigureOptions parses command-line arguments and returns an Options struct. 29 | // It handles -h/--help and -v/--version flags by calling the provided callbacks. 30 | // Returns nil options and nil error when help or version flags are used. 31 | func ConfigureOptions(fs *flag.FlagSet, args []string, printVersion, printHelp func()) (*Options, error) { 32 | opts := &Options{} 33 | var ( 34 | showVersion bool 35 | showHelp bool 36 | ) 37 | fs.BoolVar(&showVersion, "v", false, "Print version information.") 38 | fs.BoolVar(&showVersion, "version", false, "Print version information.") 39 | fs.BoolVar(&showHelp, "h", false, "Print usage.") 40 | fs.BoolVar(&showHelp, "help", false, "Print usage.") 41 | 42 | fs.StringVar(&opts.ServerListen, "listen", "0.0.0.0:5222", "Network host:port to listen on") 43 | fs.StringVar(&opts.NatsServers, "natsServers", nats.DefaultURL, "List of NATS Servers to connect") 44 | fs.StringVar(&opts.User, "natsUser", "", "NATS server username (basic auth)") 45 | fs.StringVar(&opts.Password, "natsPassword", "", "NATS server password (basic auth)") 46 | fs.StringVar(&opts.Token, "natsToken", "", "NATS server token (token auth)") 47 | fs.StringVar(&opts.NkeyFile, "natsNKeyFile", "", "NATS server NKey seed file path (nkey auth)") 48 | fs.StringVar(&opts.CredsFile, "natsCredsFile", "", "NATS server credentials file path (JWT auth)") 49 | fs.IntVar(&opts.Replicas, "replicas", 1, "Number of replicas for each jetstream element") 50 | fs.StringVar(&opts.CredentialsFile, "s3.credentials", "", "Path to S3 credentials file (JSON format)") 51 | fs.StringVar(&opts.LogFormat, "log.format", "logfmt", "log output format: logfmt or json") 52 | fs.StringVar(&opts.LogLevel, "log.level", "info", "log level: debug, info, warn, error") 53 | fs.DurationVar(&opts.ReadTimeout, "http.read-timeout", 15*time.Minute, "HTTP server read timeout (for large uploads)") 54 | fs.DurationVar(&opts.WriteTimeout, "http.write-timeout", 15*time.Minute, "HTTP server write timeout (for large downloads)") 55 | fs.DurationVar(&opts.IdleTimeout, "http.idle-timeout", 120*time.Second, "HTTP server idle timeout") 56 | fs.DurationVar(&opts.ReadHeaderTimeout, "http.read-header-timeout", 30*time.Second, "HTTP server read header timeout (slowloris protection)") 57 | if err := fs.Parse(args); err != nil { 58 | return nil, err 59 | } 60 | 61 | if showVersion { 62 | printVersion() 63 | return nil, nil 64 | } 65 | 66 | if showHelp { 67 | printHelp() 68 | return nil, nil 69 | } 70 | 71 | return opts, nil 72 | } 73 | -------------------------------------------------------------------------------- /ROADMAP.md: -------------------------------------------------------------------------------- 1 | # Roadmap 2 | 3 | _Last updated: 2025-10-18_ 4 | 5 | ## Vision & Goals 6 | 7 | nats-s3 is a gateway to allow tools, SDKs, and CLI utilities that expect an S3-compatible interface to operate on 8 | NATS JetStream Object Store. 9 | It aims to provide compatibility, security (authentication/signature) with reasonable performance to use familiar 10 | S3 workflows with NATS Object Store. 11 | 12 | 13 | ## Current Status 14 | 15 | | Feature | Status | Notes | 16 | |---|---|----------------------------------------------------------------------------------| 17 | | Basic S3 operations (list buckets, list objects, put, get, delete object) | ✅ Implemented | Works with AWS CLI. | 18 | | SigV4 authentication (header & presigned URLs) | ✅ Implemented | Multi-user credential store with JSON file-based configuration. | 19 | | Multipart uploads (initiate/upload part/list parts/complete/abort) | ✅ Implemented | Follows S3 semantics incl. ETag and part pagination. | 20 | | Basic monitoring endpoints (/healthz, /metrics, /stats) | ✅ Implemented | Prometheus text metrics and JSON stats. | 21 | | Credential store | ✅ Implemented | JSON file-based store supporting multiple AWS-style access/secret key pairs. | 22 | 23 | 24 | ## Milestones & Phases 25 | 26 | Below are planned features grouped in phases. Priorities may changed based on user feedback or community contributions. 27 | 28 | ## v0.1 - MVP 29 | - ✅ S3 basics: ListBuckets, ListObjects, GetObject, HeadObject, PutObject, DeleteObject 30 | - ✅ S3-compatible headers (ETag, Last-Modified RFC1123, Content-Length) 31 | - ✅ Configurable auth (NATS creds) 32 | - ✅ Docker image and simple CI 33 | 34 | ## v0.2 – Multipart support and other improvements (Completed) 35 | - ✅ Multipart upload support (initiate, upload part, list parts, complete, abort) 36 | - ✅ Improve listing behavior for multipart parts (pagination, markers) 37 | - ✅ Add basic metrics / endpoints for monitoring (/healthz, /metrics, /stats) 38 | 39 | ## v0.3 – Credential Store and robustness (Completed) 40 | - ✅ Credential store for auth (first-class) 41 | - ✅ JSON file-based credential store supporting multiple users 42 | - ✅ AWS-style access/secret key pairs with validation 43 | - ✅ SigV4 verification against credential store 44 | - ✅ Thread-safe implementation with proper error handling 45 | - ✅ Mandatory `--s3.credentials` flag for enhanced security 46 | 47 | ## v0.4 – CI improvements and Compatibility 48 | - CI improvements 49 | - Integration tests that spin up NATS + nats-s3 + AWS CLI/SDK flows 50 | - Auth paths: header/presigned, allowed/denied policy cases 51 | - Support key rotation (overlapping secrets) 52 | - Add more examples / sample code for SDKs (Go, Python etc.) 53 | - Helm chart and K8s manifests 54 | - Detailed metrics & dashboards (Prometheus, Grafana) 55 | - Investigate non-S3 API compatibility / S3 API newer features (e.g. AWS S3 Select, event notifications) 56 | 57 | ## v1.0 – Production Ready 58 | - Formal release versioning (v1.0.0) 59 | - Comprehensive documentation and deployment guides 60 | - Performance benchmarks and optimization 61 | - Production deployment best practices 62 | -------------------------------------------------------------------------------- /cmd/nats-s3/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | "github.com/wpnpeiris/nats-s3/internal/server" 7 | "os" 8 | ) 9 | 10 | var usageStr = ` 11 | Usage: nats-s3 [options] 12 | 13 | Server Options: 14 | --listen HTTP bind address for NATS S3 (default: 0.0.0.0:5222) 15 | 16 | NATS Connection Options: 17 | --natsServers Comma-separated NATS server URLs (default: nats://127.0.0.1:4222) 18 | 19 | NATS Object Store Options: 20 | --replicas Number of replicas for each JetStream element (default: 1) 21 | 22 | NATS Authentication Options (choose one): 23 | --natsUser NATS server username (basic auth) 24 | --natsPassword NATS server password (basic auth) 25 | --natsToken NATS server token (token-based auth) 26 | --natsNKeyFile NATS server NKey seed file path (NKey auth) 27 | --natsCredsFile NATS server credentials file path (JWT auth) 28 | 29 | S3 Options: 30 | --s3.credentials Path to S3 credentials file (JSON format, required) 31 | 32 | Logging Options: 33 | --log.format Log output format: logfmt or json (default: logfmt) 34 | --log.level Log level: debug, info, warn, error (default: info) 35 | 36 | HTTP Server Timeout Options: 37 | --http.read-timeout HTTP server read timeout (default: 15m) 38 | --http.write-timeout HTTP server write timeout (default: 15m) 39 | --http.idle-timeout HTTP server idle timeout (default: 120s) 40 | --http.read-header-timeout HTTP server read header timeout (default: 30s) 41 | 42 | Common Options: 43 | -h, --help Show this message 44 | -v, --version Show version 45 | 46 | Examples: 47 | # Start with basic configuration 48 | nats-s3 --natsServers nats://127.0.0.1:4222 --s3.credentials credentials.json 49 | 50 | # Start with NATS authentication 51 | nats-s3 --natsServers nats://127.0.0.1:4222 \ 52 | --natsUser admin --natsPassword secret \ 53 | --s3.credentials credentials.json 54 | 55 | # Start with custom listen address and debug logging 56 | nats-s3 --listen 0.0.0.0:8080 \ 57 | --natsServers nats://127.0.0.1:4222 \ 58 | --s3.credentials credentials.json \ 59 | --log.level debug 60 | ` 61 | 62 | // Version is set at build time via -ldflags. 63 | var Version string 64 | 65 | // printVersionAndExit will print our version and exit. 66 | func printVersionAndExit() { 67 | fmt.Printf("nats-s3: v%s\n", Version) 68 | os.Exit(0) 69 | } 70 | 71 | // usage will print out the flag options of nats-s3. 72 | func usage() { 73 | fmt.Printf("%s\n", usageStr) 74 | os.Exit(0) 75 | } 76 | 77 | func main() { 78 | fs := flag.NewFlagSet("nats-s3", flag.ExitOnError) 79 | fs.Usage = usage 80 | opts, err := server.ConfigureOptions(fs, os.Args[1:], printVersionAndExit, fs.Usage) 81 | if err != nil { 82 | fmt.Fprintln(os.Stderr, err) 83 | os.Exit(1) 84 | } 85 | 86 | gateway, err := server.NewGatewayServer(opts) 87 | if err != nil { 88 | server.LogAndExit(err.Error()) 89 | } 90 | 91 | err = gateway.Start() 92 | if err != nil { 93 | server.LogAndExit(err.Error()) 94 | } 95 | } 96 | -------------------------------------------------------------------------------- /internal/s3api/s3_object_cancellation_test.go: -------------------------------------------------------------------------------- 1 | package s3api 2 | 3 | import ( 4 | "context" 5 | "io" 6 | "net/http/httptest" 7 | "sync" 8 | "testing" 9 | "time" 10 | 11 | "github.com/gorilla/mux" 12 | "github.com/nats-io/nats.go" 13 | "github.com/wpnpeiris/nats-s3/internal/logging" 14 | "github.com/wpnpeiris/nats-s3/internal/testutil" 15 | ) 16 | 17 | // TestUploadCancellation_AbortsWrite verifies that canceling the request context 18 | // during a large PUT causes the backend JetStream put to abort and no object is created. 19 | func TestUploadCancellation_AbortsWrite(t *testing.T) { 20 | s := testutil.StartJSServer(t) 21 | defer s.Shutdown() 22 | 23 | logger := logging.NewLogger(logging.Config{Level: "debug"}) 24 | gw, err := NewS3Gateway(logger, s.ClientURL(), 1, nil, nil) 25 | if err != nil { 26 | t.Fatalf("failed to create S3 gateway: %v", err) 27 | } 28 | 29 | // Create Object Store bucket 30 | nc, err := nats.Connect(s.ClientURL()) 31 | if err != nil { 32 | t.Fatalf("connect failed: %v", err) 33 | } 34 | nc.SetClosedHandler(func(_ *nats.Conn) {}) 35 | defer nc.Close() 36 | js, err := nc.JetStream() 37 | if err != nil { 38 | t.Fatalf("JetStream failed: %v", err) 39 | } 40 | bucket := "cancel-bucket" 41 | if _, err := js.CreateObjectStore(&nats.ObjectStoreConfig{Bucket: bucket}); err != nil { 42 | t.Fatalf("create object store failed: %v", err) 43 | } 44 | 45 | r := mux.NewRouter() 46 | gw.RegisterRoutes(r) 47 | 48 | // Prepare a slow streaming body using an io.Pipe 49 | pr, pw := io.Pipe() 50 | total := int64(10 * 1024 * 1024) // 10 MiB body 51 | chunk := make([]byte, 64*1024) 52 | var wrote int64 53 | done := make(chan struct{}) 54 | go func() { 55 | defer close(done) 56 | defer pw.Close() 57 | for wrote < total { 58 | // write a chunk and sleep a bit 59 | n, err := pw.Write(chunk) 60 | if err != nil { 61 | return 62 | } 63 | wrote += int64(n) 64 | time.Sleep(5 * time.Millisecond) 65 | } 66 | }() 67 | 68 | // Build request with cancelable context and explicit Content-Length 69 | req := httptest.NewRequest("PUT", "/"+bucket+"/big-file.bin", pr) 70 | ctx, cancel := context.WithCancel(req.Context()) 71 | req = req.WithContext(ctx) 72 | req.ContentLength = total 73 | 74 | rec := httptest.NewRecorder() 75 | 76 | // Serve in a goroutine and cancel midway 77 | var wg sync.WaitGroup 78 | wg.Add(1) 79 | go func() { 80 | defer wg.Done() 81 | r.ServeHTTP(rec, req) 82 | }() 83 | 84 | // Allow some data to be written then cancel and close the body reader 85 | time.Sleep(50 * time.Millisecond) 86 | cancel() 87 | _ = pr.CloseWithError(context.Canceled) 88 | 89 | // Wait for handler to finish 90 | wg.Wait() 91 | <-done // ensure writer exited 92 | 93 | if rec.Code == 200 || rec.Code == 204 { 94 | t.Fatalf("expected non-success status on canceled upload, got %d", rec.Code) 95 | } 96 | 97 | // Verify object was not created 98 | os, err := js.ObjectStore(bucket) 99 | if err != nil { 100 | t.Fatalf("ObjectStore failed: %v", err) 101 | } 102 | if _, err := os.GetInfo("big-file.bin"); err == nil { 103 | t.Fatalf("object unexpectedly exists after canceled upload") 104 | } else if err != nats.ErrObjectNotFound { 105 | t.Fatalf("unexpected error when checking object: %v", err) 106 | } 107 | } 108 | -------------------------------------------------------------------------------- /internal/credential/static_file_store.go: -------------------------------------------------------------------------------- 1 | package credential 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "os" 7 | "sync" 8 | ) 9 | 10 | // CredentialsFile represents the JSON structure of the credentials file. 11 | type CredentialsFile struct { 12 | Credentials []Entry `json:"credentials"` 13 | } 14 | 15 | // StaticFileStore implements Store by loading credentials from a JSON file. 16 | // Credentials are loaded once at initialization and stored in memory. 17 | // This implementation is read-only and thread-safe. 18 | type StaticFileStore struct { 19 | mu sync.RWMutex 20 | credentials map[string]string // accessKey -> secretKey 21 | filePath string 22 | } 23 | 24 | // NewStaticFileStore creates a new StaticFileStore and loads credentials from the specified file. 25 | // The file should be in JSON format with a "credentials" array containing access/secret key pairs. 26 | // 27 | // Example JSON format: 28 | // 29 | // { 30 | // "credentials": [ 31 | // { 32 | // "accessKey": "AKIAIOSFODNN7EXAMPLE", 33 | // "secretKey": "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" 34 | // } 35 | // ] 36 | // } 37 | func NewStaticFileStore(filePath string) (*StaticFileStore, error) { 38 | if filePath == "" { 39 | return nil, fmt.Errorf("credentials file path cannot be empty") 40 | } 41 | 42 | store := &StaticFileStore{ 43 | credentials: make(map[string]string), 44 | filePath: filePath, 45 | } 46 | 47 | if err := store.load(); err != nil { 48 | return nil, fmt.Errorf("failed to load credentials from %s: %w", filePath, err) 49 | } 50 | 51 | return store, nil 52 | } 53 | 54 | // load reads and parses the credentials file. 55 | func (s *StaticFileStore) load() error { 56 | data, err := os.ReadFile(s.filePath) 57 | if err != nil { 58 | return fmt.Errorf("failed to read file: %w", err) 59 | } 60 | 61 | var credFile CredentialsFile 62 | if err := json.Unmarshal(data, &credFile); err != nil { 63 | return fmt.Errorf("failed to parse JSON: %w", err) 64 | } 65 | 66 | if len(credFile.Credentials) == 0 { 67 | return fmt.Errorf("no credentials found in file") 68 | } 69 | 70 | s.mu.Lock() 71 | defer s.mu.Unlock() 72 | 73 | // Validate and store each credential 74 | for i, entry := range credFile.Credentials { 75 | if err := entry.Validate(); err != nil { 76 | return fmt.Errorf("invalid credential at index %d: %w", i, err) 77 | } 78 | 79 | // Check for duplicate access keys 80 | if _, exists := s.credentials[entry.AccessKey]; exists { 81 | return fmt.Errorf("duplicate access key at index %d: %s", i, entry.AccessKey) 82 | } 83 | 84 | s.credentials[entry.AccessKey] = entry.SecretKey 85 | } 86 | 87 | return nil 88 | } 89 | 90 | // Get retrieves the secret key for the given access key. 91 | // This method is thread-safe. 92 | func (s *StaticFileStore) Get(accessKey string) (string, bool) { 93 | s.mu.RLock() 94 | defer s.mu.RUnlock() 95 | 96 | secretKey, found := s.credentials[accessKey] 97 | return secretKey, found 98 | } 99 | 100 | // GetName returns the name of this store implementation. 101 | func (s *StaticFileStore) GetName() string { 102 | return "static_file" 103 | } 104 | 105 | // Count returns the number of credentials loaded. 106 | // This is useful for testing and monitoring. 107 | func (s *StaticFileStore) Count() int { 108 | s.mu.RLock() 109 | defer s.mu.RUnlock() 110 | 111 | return len(s.credentials) 112 | } 113 | -------------------------------------------------------------------------------- /.github/workflows/conformance.yml: -------------------------------------------------------------------------------- 1 | name: Conformance 2 | 3 | on: 4 | workflow_dispatch: 5 | inputs: 6 | ref: 7 | description: 'Branch or tag to run against' 8 | required: true 9 | default: 'main' 10 | pull_request: 11 | branches: [ main ] 12 | 13 | jobs: 14 | s3-compat: 15 | name: S3 API Conformance 16 | runs-on: ubuntu-latest 17 | timeout-minutes: 15 18 | steps: 19 | - name: Checkout 20 | uses: actions/checkout@v6 21 | with: 22 | ref: ${{ github.event.inputs.ref || github.ref }} 23 | 24 | - name: Setup Go 25 | uses: actions/setup-go@v6 26 | with: 27 | go-version-file: go.mod 28 | 29 | - name: Install AWS CLI v2 30 | run: | 31 | sudo apt-get update 32 | sudo apt-get install -y curl unzip 33 | curl -sSLo awscliv2.zip https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip 34 | unzip -q awscliv2.zip 35 | sudo ./aws/install --update 36 | aws --version 37 | 38 | - name: Build gateway 39 | run: | 40 | make build 41 | 42 | - name: Start NATS (JetStream) 43 | run: | 44 | docker run -d --name nats-js -p 4222:4222 \ 45 | nats:latest -js --user ci-access --pass ci-secret 46 | echo "Waiting for NATS to be ready..." 47 | for i in {1..30}; do 48 | (echo > /dev/tcp/127.0.0.1/4222) >/dev/null 2>&1 && { echo "NATS ready"; break; }; sleep 1; done 49 | shell: bash 50 | 51 | - name: Create credentials file 52 | run: | 53 | cat > credentials.json < gateway.log 2>&1 & 73 | echo $! > gateway.pid 74 | echo "Waiting for gateway to be ready..." 75 | for i in {1..30}; do 76 | (echo > /dev/tcp/127.0.0.1/5222) >/dev/null 2>&1 && { echo ready; break; }; sleep 1; done 77 | shell: bash 78 | 79 | - name: Run basic conformance tests 80 | env: 81 | AWS_ACCESS_KEY_ID: ci-access 82 | AWS_SECRET_ACCESS_KEY: ci-secret 83 | AWS_DEFAULT_REGION: us-east-1 84 | run: | 85 | bash scripts/ci-conformance.sh 86 | 87 | - name: Run full conformance tests 88 | env: 89 | AWS_ACCESS_KEY_ID: ci-access 90 | AWS_SECRET_ACCESS_KEY: ci-secret 91 | AWS_DEFAULT_REGION: us-east-1 92 | run: | 93 | bash scripts/ci-conformance-full.sh 94 | 95 | - name: Show gateway logs on failure 96 | if: failure() 97 | run: | 98 | echo '--- gateway.log ---' 99 | cat gateway.log || true 100 | 101 | - name: Cleanup 102 | if: always() 103 | run: | 104 | if [ -f gateway.pid ]; then kill $(cat gateway.pid) || true; fi 105 | docker rm -f nats-js || true 106 | -------------------------------------------------------------------------------- /examples/python/README.md: -------------------------------------------------------------------------------- 1 | # NATS-S3 Python Examples 2 | 3 | Python examples demonstrating how to use NATS-S3 with the boto3 library. 4 | 5 | ## Prerequisites 6 | 7 | 1. **Python 3.7+** installed 8 | 2. **NATS-S3 running** (via Docker Compose or binary) 9 | 3. **pip** package manager 10 | 11 | ## Quick Start 12 | 13 | ### Step 1: Start NATS-S3 14 | 15 | From the `examples/` directory: 16 | 17 | ```bash 18 | cd .. 19 | docker-compose up -d 20 | ``` 21 | 22 | Verify it's running: 23 | ```bash 24 | docker-compose ps 25 | ``` 26 | 27 | ### Step 2: Set Up Python Environment 28 | 29 | ```bash 30 | cd python 31 | 32 | # Create a virtual environment (optional but recommended) 33 | python3 -m venv venv 34 | source venv/bin/activate # On Windows: venv\Scripts\activate 35 | 36 | # Install dependencies 37 | pip install -r requirements.txt 38 | ``` 39 | 40 | ### Step 3: Run the Example 41 | 42 | ```bash 43 | python nats_s3_example.py 44 | ``` 45 | 46 | ## What the Example Does 47 | 48 | The example demonstrates all major S3 operations: 49 | 50 | 1. ✅ Create bucket 51 | 2. ✅ List buckets 52 | 3. ✅ Upload objects 53 | 4. ✅ Upload objects with metadata 54 | 5. ✅ List objects 55 | 6. ✅ Get object metadata (HEAD) 56 | 7. ✅ Download objects 57 | 8. ✅ List objects with prefix (hierarchical) 58 | 9. ✅ Copy objects 59 | 10. ✅ Generate presigned URLs 60 | 11. ✅ Delete individual objects 61 | 12. ✅ Batch delete objects 62 | 13. ✅ Delete bucket 63 | 64 | ## Expected Output 65 | 66 | ``` 67 | ============================================================ 68 | NATS-S3 Python Example with Boto3 69 | ============================================================ 70 | 71 | 1. Creating bucket 'python-example-bucket'... 72 | ✓ Bucket created successfully 73 | 74 | 2. Listing all buckets... 75 | - python-example-bucket 76 | 77 | 3. Uploading object to 'python-example-bucket'... 78 | ✓ Uploaded 'test-file.txt' 79 | 80 | ... (more operations) 81 | 82 | ============================================================ 83 | ✓ All operations completed successfully! 84 | ============================================================ 85 | ``` 86 | 87 | ## Troubleshooting 88 | 89 | ### Connection Refused 90 | 91 | Make sure NATS-S3 is running: 92 | ```bash 93 | curl http://localhost:5222/healthz 94 | ``` 95 | 96 | ### Authentication Errors 97 | 98 | Verify credentials match `examples/credentials.json`: 99 | - Access Key: `demo-access-key` 100 | - Secret Key: `demo-secret-key` 101 | 102 | ### Import Errors 103 | 104 | Install boto3: 105 | ```bash 106 | pip install boto3 107 | ``` 108 | 109 | ## Using with Your Own Application 110 | 111 | ```python 112 | import boto3 113 | 114 | # Configure client 115 | s3 = boto3.client( 116 | 's3', 117 | endpoint_url='http://localhost:5222', 118 | aws_access_key_id='demo-access-key', 119 | aws_secret_access_key='demo-secret-key', 120 | region_name='us-east-1' 121 | ) 122 | 123 | # Use standard boto3 operations 124 | s3.create_bucket(Bucket='my-bucket') 125 | s3.put_object(Bucket='my-bucket', Key='file.txt', Body=b'Hello!') 126 | obj = s3.get_object(Bucket='my-bucket', Key='file.txt') 127 | print(obj['Body'].read()) 128 | ``` 129 | 130 | ## Additional Resources 131 | 132 | - [Boto3 Documentation](https://boto3.amazonaws.com/v1/documentation/api/latest/index.html) 133 | - [AWS S3 API Reference](https://docs.aws.amazon.com/AmazonS3/latest/API/Welcome.html) 134 | - [NATS-S3 Conformance](../../CONFORMANCE.md) 135 | -------------------------------------------------------------------------------- /internal/util/validation_test.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "strings" 5 | "testing" 6 | ) 7 | 8 | func TestValidateBucketName(t *testing.T) { 9 | tests := []struct { 10 | name string 11 | bucket string 12 | wantError bool 13 | }{ 14 | // Valid bucket names 15 | {"valid simple", "mybucket", false}, 16 | {"valid with dash", "my-bucket", false}, 17 | {"valid with dot", "my.bucket", false}, 18 | {"valid with numbers", "bucket123", false}, 19 | {"valid complex", "my-bucket.name-123", false}, 20 | {"valid minimum length", "abc", false}, 21 | {"valid maximum length", strings.Repeat("a", 63), false}, 22 | 23 | // Invalid bucket names 24 | {"empty", "", true}, 25 | {"too short", "ab", true}, 26 | {"too long", strings.Repeat("a", 64), true}, 27 | {"uppercase", "MyBucket", true}, 28 | {"starts with dash", "-mybucket", true}, 29 | {"ends with dash", "mybucket-", true}, 30 | {"starts with dot", ".mybucket", true}, 31 | {"ends with dot", "mybucket.", true}, 32 | {"double dot", "my..bucket", true}, 33 | {"path traversal", "my../bucket", true}, 34 | {"IP address format", "192.168.1.1", true}, 35 | {"contains underscore", "my_bucket", true}, 36 | {"contains space", "my bucket", true}, 37 | {"contains slash", "my/bucket", true}, 38 | {"null byte", "bucket\x00name", true}, 39 | {"control character", "bucket\nname", true}, 40 | {"tab character", "bucket\tname", true}, 41 | } 42 | 43 | for _, tt := range tests { 44 | t.Run(tt.name, func(t *testing.T) { 45 | err := ValidateBucketName(tt.bucket) 46 | if (err != nil) != tt.wantError { 47 | t.Errorf("ValidateBucketName(%q) error = %v, wantError %v", tt.bucket, err, tt.wantError) 48 | } 49 | }) 50 | } 51 | } 52 | 53 | func TestValidateObjectKey(t *testing.T) { 54 | tests := []struct { 55 | name string 56 | key string 57 | wantError bool 58 | }{ 59 | // Valid object keys 60 | {"valid simple", "myfile.txt", false}, 61 | {"valid with path", "path/to/file.txt", false}, 62 | {"valid with dash", "my-file.txt", false}, 63 | {"valid with underscore", "my_file.txt", false}, 64 | {"valid with space", "my file.txt", false}, 65 | {"valid with dots", "my.file.name.txt", false}, 66 | {"valid with tab", "file\tname.txt", false}, 67 | {"valid unicode", "文件.txt", false}, 68 | {"valid maximum length", strings.Repeat("a", 1024), false}, 69 | {"valid deep path", "a/b/c/d/e/f/g/h/i/j/k/file.txt", false}, 70 | 71 | // Invalid object keys 72 | {"empty", "", true}, 73 | {"too long", strings.Repeat("a", 1025), true}, 74 | {"path traversal", "../etc/passwd", true}, 75 | {"path traversal middle", "path/../file.txt", true}, 76 | {"path traversal end", "path/..", true}, 77 | {"double dot alone", "..", true}, 78 | {"null byte", "file\x00name.txt", true}, 79 | {"control character newline", "file\nname.txt", true}, 80 | {"control character carriage return", "file\rname.txt", true}, 81 | {"bell character", "file\x07name.txt", true}, 82 | {"delete character", "file\x7fname.txt", true}, 83 | } 84 | 85 | for _, tt := range tests { 86 | t.Run(tt.name, func(t *testing.T) { 87 | err := ValidateObjectKey(tt.key) 88 | if (err != nil) != tt.wantError { 89 | t.Errorf("ValidateObjectKey(%q) error = %v, wantError %v", tt.key, err, tt.wantError) 90 | } 91 | }) 92 | } 93 | } 94 | 95 | func BenchmarkValidateBucketName(b *testing.B) { 96 | bucket := "my-valid-bucket-name-123" 97 | b.ResetTimer() 98 | for i := 0; i < b.N; i++ { 99 | _ = ValidateBucketName(bucket) 100 | } 101 | } 102 | 103 | func BenchmarkValidateObjectKey(b *testing.B) { 104 | key := "path/to/my/object/file.txt" 105 | b.ResetTimer() 106 | for i := 0; i < b.N; i++ { 107 | _ = ValidateObjectKey(key) 108 | } 109 | } 110 | -------------------------------------------------------------------------------- /internal/client/nats_object_client_test.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "testing" 7 | "time" 8 | 9 | "github.com/wpnpeiris/nats-s3/internal/logging" 10 | "github.com/wpnpeiris/nats-s3/internal/testutil" 11 | 12 | "github.com/nats-io/nats.go" 13 | ) 14 | 15 | func TestNatsObjectClient_BasicCRUD(t *testing.T) { 16 | s := testutil.StartJSServer(t) 17 | defer s.Shutdown() 18 | 19 | url := s.ClientURL() 20 | 21 | c := NewClient("object-test") 22 | if err := c.SetupConnectionToNATS(url); err != nil { 23 | t.Fatalf("connect failed: %v", err) 24 | } 25 | nc := c.NATS() 26 | // Avoid panic-on-close during tests. 27 | nc.SetClosedHandler(func(_ *nats.Conn) {}) 28 | defer nc.Close() 29 | 30 | js, err := nc.JetStream() 31 | if err != nil { 32 | t.Fatalf("JetStream failed: %v", err) 33 | } 34 | 35 | bucket := "testbucket" 36 | key := "path/to/object.txt" 37 | data := []byte("hello world") 38 | 39 | // Create the bucket for object store operations. 40 | if _, err := js.CreateObjectStore(&nats.ObjectStoreConfig{Bucket: bucket}); err != nil { 41 | t.Fatalf("create object store failed: %v", err) 42 | } 43 | 44 | logger := logging.NewLogger(logging.Config{Level: "debug"}) 45 | oc, err := NewNatsObjectClient(logger, c, NatsObjectClientOptions{}) 46 | if err != nil { 47 | t.Fatalf("NewNatsObjectClient failed: %v", err) 48 | } 49 | 50 | // Put 51 | info, err := oc.PutObjectStream(context.Background(), bucket, key, "text/plain", map[string]string{"k": "v"}, bytes.NewReader(data)) 52 | if err != nil { 53 | t.Fatalf("PutObject failed: %v", err) 54 | } 55 | if info == nil || 56 | info.Name != key || 57 | info.Headers.Get("Content-Type") != "text/plain" || 58 | info.Metadata["k"] != "v" { 59 | t.Fatalf("unexpected PutObject info: %+v", info) 60 | } 61 | 62 | // GetInfo 63 | gi, err := oc.GetObjectInfo(context.Background(), bucket, key) 64 | if err != nil { 65 | t.Fatalf("GetObjectInfo failed: %v", err) 66 | } 67 | if gi == nil || gi.Size != uint64(len(data)) { 68 | t.Fatalf("unexpected GetObjectInfo: %+v", gi) 69 | } 70 | 71 | // Get 72 | gotInfo, gotData, err := oc.GetObject(context.Background(), bucket, key) 73 | if err != nil { 74 | t.Fatalf("GetObject failed: %v", err) 75 | } 76 | if gotInfo == nil || !bytes.Equal(gotData, data) { 77 | t.Fatalf("unexpected GetObject: info=%+v data=%q", gotInfo, string(gotData)) 78 | } 79 | 80 | // ListObjects should include our key 81 | list, err := oc.ListObjects(context.Background(), bucket) 82 | if err != nil { 83 | t.Fatalf("ListObjects failed: %v", err) 84 | } 85 | found := false 86 | for _, o := range list { 87 | if o.Name == key { 88 | found = true 89 | break 90 | } 91 | } 92 | if !found { 93 | t.Fatalf("ListObjects did not contain key %q", key) 94 | } 95 | 96 | // ListBuckets channel should yield our bucket 97 | ch, err := oc.ListBuckets(context.Background()) 98 | if err != nil { 99 | t.Fatalf("ListBuckets failed: %v", err) 100 | } 101 | found = false 102 | timeout := time.After(2 * time.Second) 103 | for { 104 | select { 105 | case st, ok := <-ch: 106 | if !ok { 107 | if !found { 108 | t.Fatalf("bucket %q not found in ListBuckets", bucket) 109 | } 110 | goto delete 111 | } 112 | if st.Bucket() == bucket { 113 | found = true 114 | } 115 | case <-timeout: 116 | t.Fatalf("timeout waiting for ListBuckets") 117 | } 118 | } 119 | 120 | delete: 121 | // Delete 122 | if err := oc.DeleteObject(context.Background(), bucket, key); err != nil { 123 | t.Fatalf("DeleteObject failed: %v", err) 124 | } 125 | 126 | // Verify deletion by attempting GetObjectInfo and expecting error 127 | if _, err := oc.GetObjectInfo(context.Background(), bucket, key); err == nil { 128 | t.Fatalf("expected error getting deleted object info") 129 | } 130 | } 131 | -------------------------------------------------------------------------------- /scripts/benchmark/README.md: -------------------------------------------------------------------------------- 1 | # NATS-S3 Benchmark Script 2 | 3 | This directory contains a benchmark script to test the performance of the NATS-S3 gateway using MinIO's `warp` benchmarking tool. 4 | 5 | ## Prerequisites 6 | 7 | Before running the benchmark script, ensure you have the following installed: 8 | 9 | 1. **Docker** - Required to run the NATS-S3 gateway container 10 | 2. **NATS Server** - The `nats-server` binary must be available in your system PATH 11 | 3. **MinIO Warp** - Install from https://github.com/minio/warp 12 | 13 | ## Configuration 14 | 15 | The script requires a `credentials.json` file in the same directory. A sample file is already provided with the following structure: 16 | 17 | ```json 18 | { 19 | "credentials": [ 20 | { 21 | "accessKey": "testtest", 22 | "secretKey": "testtest" 23 | } 24 | ] 25 | } 26 | ``` 27 | 28 | **Important**: The access key and secret key must both be set to `testtest` for the benchmark to work correctly. 29 | 30 | ## Running the Benchmark 31 | 32 | 1. Navigate to the benchmark directory: 33 | ```bash 34 | cd scripts/benchmark 35 | ``` 36 | 37 | 2. Make the script executable (if not already): 38 | ```bash 39 | chmod +x benchmark.sh 40 | ``` 41 | 42 | 3. Run the benchmark: 43 | ```bash 44 | ./benchmark.sh 45 | ``` 46 | 47 | ## What the Script Does 48 | 49 | The benchmark script performs the following steps: 50 | 51 | 1. **Starts NATS Server** - Launches a local NATS server with JetStream enabled 52 | 2. **Starts NATS-S3 Gateway** - Pulls & runs the latest NATS-S3 gateway in a Docker container, connected to the NATS server 53 | 3. **Runs Benchmark** - Executes MinIO's `warp` tool with a mixed workload: 54 | - Duration: 20 seconds 55 | - Object size: 1MB 56 | - Concurrent operations: 1 57 | - Number of objects: 10 58 | - Operation distribution: 59 | - GET: 1x 60 | - STAT: 1x 61 | - PUT: 2x 62 | - DELETE: 1x 63 | 64 | 4. **Cleanup** - Automatically stops and removes all containers and cleans up JetStream data, even if the script is interrupted 65 | 66 | ## Customizing the Benchmark 67 | 68 | You can modify the benchmark parameters by editing the `warp` command in the script: 69 | 70 | ```bash 71 | warp mixed --host=localhost:5222 \ 72 | --access-key=testtest \ 73 | --secret-key=testtest \ 74 | --duration 20s \ # Test duration 75 | --obj.size 1M \ # Size of test objects 76 | --concurrent=1 \ # Number of concurrent operations 77 | --objects=10 \ # Number of objects to use 78 | --get-distrib 1 \ # GET operation weight 79 | --stat-distrib 1 \ # STAT operation weight 80 | --put-distrib 2 \ # PUT operation weight 81 | --delete-distrib 1 # DELETE operation weight 82 | ``` 83 | 84 | ## Troubleshooting 85 | 86 | ### Port Already in Use 87 | 88 | If you get an error about ports already being in use, ensure no other NATS server or S3-compatible service is running on: 89 | - Port 4222 (NATS) 90 | - Port 5222 (NATS-S3 Gateway) 91 | 92 | ### Docker Permission Issues 93 | 94 | If you encounter Docker permission issues, ensure your user is in the `docker` group: 95 | ```bash 96 | sudo usermod -aG docker $USER 97 | # Log out and back in for changes to take effect 98 | ``` 99 | 100 | ### Cleanup Not Working 101 | 102 | If the cleanup doesn't work properly, you can manually clean up: 103 | ```bash 104 | # Stop all containers 105 | docker stop $(docker ps -a -q --filter ancestor=wpnpeiris/nats-s3:v0.3.8) 106 | docker rm $(docker ps -a -q --filter ancestor=wpnpeiris/nats-s3:v0.3.8) 107 | 108 | # Kill any NATS server processes 109 | pkill nats-server 110 | 111 | # Remove JetStream data 112 | rm -rf /tmp/nats/jetstream 113 | ``` 114 | 115 | ## Output 116 | 117 | The benchmark will display real-time statistics including: 118 | - Operations per second 119 | - Throughput (MB/s) 120 | - Latency percentiles (min, avg, max, p50, p90, p99) 121 | - Error rates 122 | 123 | A summary report will be displayed at the end of the benchmark run. 124 | -------------------------------------------------------------------------------- /CONFORMANCE.md: -------------------------------------------------------------------------------- 1 | # S3 API Conformance 2 | 3 | This project runs comprehensive AWS CLI-based S3 API compatibility tests in CI to ensure nats-s3 maintains high compatibility with the S3 standard. 4 | 5 | ## Test Coverage 6 | 7 | The CI pipeline runs two test suites: 8 | 9 | ### 1. Basic Conformance Tests (`ci-conformance.sh`) 10 | Quick smoke test covering essential S3 operations: 11 | - ✅ Bucket create (`aws s3 mb`) 12 | - ✅ Service-level bucket listing (`aws s3 ls`) 13 | - ✅ Object upload (`aws s3 cp`) 14 | - ✅ Bucket content listing 15 | - ✅ Object metadata (`aws s3api head-object`) 16 | - ✅ Object download with content verification 17 | - ✅ Object deletion 18 | - ✅ Bucket deletion 19 | 20 | ### 2. Full Conformance Tests (`ci-conformance-full.sh`) 21 | Comprehensive test suite covering advanced S3 features: 22 | 23 | **Core Operations:** 24 | - ✅ Bucket lifecycle (create, duplicate detection, delete) 25 | - ✅ Object CRUD (put, get, head, delete) 26 | - ✅ Content integrity verification 27 | - ✅ Service-level operations 28 | 29 | **Advanced Features:** 30 | - ✅ **Metadata & Headers**: Content-Type, custom metadata (`x-amz-meta-*`) 31 | - ✅ **Object Copy**: Server-side copy operations (`CopyObject`) 32 | - ✅ **Hierarchical Listing**: Delimiter and prefix support for directory-like navigation 33 | - ✅ **Range Requests**: HTTP Range GET for partial content (`Range: bytes=0-9`) 34 | - ✅ **Multipart Uploads**: Large file uploads via multipart API 35 | - Initiate multipart upload 36 | - Upload parts 37 | - Complete multipart upload 38 | - ✅ **Batch Operations**: Delete multiple objects (`DeleteObjects`) 39 | - ✅ **Presigned URLs**: Time-limited pre-authenticated URLs 40 | - ✅ **Bucket Validation**: Non-empty bucket deletion prevention 41 | 42 | **Current Status:** 25/25 tests passing (100% pass rate) 43 | 44 | ## S3 API Feature Matrix 45 | 46 | | Feature Category | Support Status | Notes | 47 | |-----------------|----------------|-------| 48 | | **Bucket Operations** | ✅ Full | Create, Delete, List with validation | 49 | | **Object Operations** | ✅ Full | PUT, GET, HEAD, DELETE, Copy | 50 | | **Multipart Uploads** | ✅ Full | Complete multipart workflow | 51 | | **Authentication** | ✅ Full | AWS SigV4 (headers + presigned URLs) | 52 | | **Metadata** | ✅ Full | Content-Type, custom metadata | 53 | | **Range Requests** | ✅ Full | HTTP Range GET (RFC 7233) | 54 | | **Batch Operations** | ✅ Full | DeleteObjects | 55 | | **Hierarchical Listing** | ✅ Full | Delimiter/prefix support | 56 | | **Presigned URLs** | ✅ Full | Query-string authentication | 57 | | **ACLs** | ⚠️ Planned | Returns 501 Not Implemented | 58 | | **Versioning** | ⚠️ Planned | Returns 501 Not Implemented | 59 | | **Lifecycle Policies** | ⚠️ Planned | Returns 501 Not Implemented | 60 | 61 | ## Running Tests Locally 62 | 63 | ```bash 64 | # Start NATS with JetStream 65 | docker run -d --name nats-js -p 4222:4222 nats:latest -js 66 | 67 | # Build and start gateway 68 | make build 69 | ./bin/nats-s3 --listen 0.0.0.0:5222 \ 70 | --natsServers nats://127.0.0.1:4222 \ 71 | --s3.credentials credentials.json 72 | 73 | # Run basic conformance tests 74 | AWS_ACCESS_KEY_ID=my-access-key \ 75 | AWS_SECRET_ACCESS_KEY=my-secret-key \ 76 | AWS_DEFAULT_REGION=us-east-1 \ 77 | bash scripts/ci-conformance.sh 78 | 79 | # Run full conformance tests 80 | AWS_ACCESS_KEY_ID=my-access-key \ 81 | AWS_SECRET_ACCESS_KEY=my-secret-key \ 82 | AWS_DEFAULT_REGION=us-east-1 \ 83 | bash scripts/ci-conformance-full.sh 84 | ``` 85 | 86 | ## CI Workflow 87 | 88 | The GitHub Actions workflow: 89 | 1. Starts NATS server with JetStream enabled 90 | 2. Builds the nats-s3 gateway 91 | 3. Runs both basic and full conformance test suites 92 | 4. Reports results and uploads logs on failure 93 | 94 | [![Conformance](https://github.com/wpnpeiris/nats-s3/actions/workflows/conformance.yml/badge.svg)](https://github.com/wpnpeiris/nats-s3/actions/workflows/conformance.yml) 95 | 96 | ## Artifacts & Debugging 97 | 98 | - Test results are saved to `conformance-full.txt` 99 | - On failures, the workflow dumps `gateway.log` for debugging 100 | - Coverage reports available as workflow artifacts 101 | -------------------------------------------------------------------------------- /internal/streams/sigv4_streams.go: -------------------------------------------------------------------------------- 1 | package streams 2 | 3 | import ( 4 | "bufio" 5 | "errors" 6 | "fmt" 7 | "io" 8 | "net/http" 9 | "strconv" 10 | "strings" 11 | ) 12 | 13 | // Constants used by SigV4 streaming payloads 14 | const ( 15 | SigV4StreamingPayload = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" 16 | SigV4StreamingPayloadTrailer = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER" 17 | ) 18 | 19 | // IsSigV4StreamingPayload reports whether the request body uses SigV4 streaming chunks. 20 | func IsSigV4StreamingPayload(r *http.Request) bool { 21 | v := r.Header.Get("x-amz-content-sha256") 22 | return v == SigV4StreamingPayload || v == SigV4StreamingPayloadTrailer 23 | } 24 | 25 | // SigV4StreamReader decodes a SigV4 streaming-chunked body, exposing only the decoded payload. 26 | type SigV4StreamReader struct { 27 | br *bufio.Reader 28 | remain int64 // bytes remaining in current chunk; -1 means need to read a new chunk header 29 | done bool 30 | closer io.Closer 31 | } 32 | 33 | // NewSigV4StreamReader wraps an encoded body and returns a reader that yields decoded payload bytes. 34 | func NewSigV4StreamReader(rc io.ReadCloser) io.ReadCloser { 35 | return &SigV4StreamReader{br: bufio.NewReader(rc), remain: -1, closer: rc} 36 | } 37 | 38 | func (r *SigV4StreamReader) Read(p []byte) (int, error) { 39 | if r.done { 40 | return 0, io.EOF 41 | } 42 | 43 | if r.remain <= 0 { 44 | line, err := r.br.ReadString('\n') 45 | if err != nil { 46 | return 0, err 47 | } 48 | line = strings.TrimRight(line, "\r\n") 49 | sizeStr := line 50 | if i := strings.IndexByte(line, ';'); i >= 0 { 51 | sizeStr = line[:i] 52 | } 53 | sizeStr = strings.TrimSpace(sizeStr) 54 | if sizeStr == "" { 55 | return 0, fmt.Errorf("invalid sigv4-chunked size line: empty") 56 | } 57 | sz, err := strconv.ParseInt(sizeStr, 16, 64) 58 | if err != nil { 59 | return 0, fmt.Errorf("invalid sigv4-chunked size: %w", err) 60 | } 61 | if sz == 0 { 62 | for { 63 | l, err := r.br.ReadString('\n') 64 | if err != nil { 65 | return 0, err 66 | } 67 | if l == "\r\n" || l == "\n" { 68 | break 69 | } 70 | } 71 | r.done = true 72 | return 0, io.EOF 73 | } 74 | r.remain = sz 75 | } 76 | 77 | toRead := int64(len(p)) 78 | if toRead > r.remain { 79 | toRead = r.remain 80 | } 81 | n, err := io.ReadFull(r.br, p[:toRead]) 82 | r.remain -= int64(n) 83 | if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { 84 | return n, err 85 | } 86 | if r.remain == 0 { 87 | if _, err := r.br.ReadByte(); err != nil { // '\r' 88 | return n, err 89 | } 90 | if _, err := r.br.ReadByte(); err != nil { // '\n' 91 | return n, err 92 | } 93 | r.remain = -1 94 | } 95 | if n == 0 && !r.done { 96 | return 0, io.EOF 97 | } 98 | return n, nil 99 | } 100 | 101 | func (r *SigV4StreamReader) Close() error { 102 | if r.closer != nil { 103 | return r.closer.Close() 104 | } 105 | return nil 106 | } 107 | 108 | // DecodedContentLength returns the parsed value of x-amz-decoded-content-length, if present and valid. 109 | func DecodedContentLength(r *http.Request) (int64, bool) { 110 | v := r.Header.Get("x-amz-decoded-content-length") 111 | if v == "" { 112 | return 0, false 113 | } 114 | n, err := strconv.ParseInt(v, 10, 64) 115 | if err != nil { 116 | return 0, false 117 | } 118 | return n, true 119 | } 120 | 121 | var ( 122 | ErrDecodedLengthExceedsLimit = errors.New("decoded length exceeds limit") 123 | ErrDecodedLengthMismatch = errors.New("decoded length mismatch") 124 | ) 125 | 126 | // CheckDecodedLengthLimit returns ErrDecodedLengthExceedsLimit if the decoded length header 127 | // is present and exceeds the provided limit. 128 | func CheckDecodedLengthLimit(r *http.Request, limit int64) error { 129 | if dl, ok := DecodedContentLength(r); ok && dl > limit { 130 | return ErrDecodedLengthExceedsLimit 131 | } 132 | return nil 133 | } 134 | 135 | // CheckDecodedLengthMatches returns ErrDecodedLengthMismatch if the decoded length header 136 | // is present and does not match the provided actual length. 137 | func CheckDecodedLengthMatches(r *http.Request, actual int64) error { 138 | if dl, ok := DecodedContentLength(r); ok && dl != actual { 139 | return ErrDecodedLengthMismatch 140 | } 141 | return nil 142 | } 143 | 144 | // NewLimitedSigV4StreamReader returns an io.ReadCloser that decodes the SigV4 stream and enforces a byte limit. 145 | func NewLimitedSigV4StreamReader(rc io.ReadCloser, limit int64) io.ReadCloser { 146 | dec := NewSigV4StreamReader(rc) 147 | return &limitedRC{Reader: io.LimitReader(dec, limit), Closer: dec} 148 | } 149 | 150 | type limitedRC struct { 151 | Reader io.Reader 152 | Closer io.Closer 153 | } 154 | 155 | func (l *limitedRC) Read(p []byte) (int, error) { return l.Reader.Read(p) } 156 | func (l *limitedRC) Close() error { 157 | if l.Closer != nil { 158 | return l.Closer.Close() 159 | } 160 | return nil 161 | } 162 | -------------------------------------------------------------------------------- /examples/README.md: -------------------------------------------------------------------------------- 1 | # NATS-S3 Examples 2 | 3 | This directory contains example configurations and integration guides for NATS-S3. 4 | 5 | ## Quick Start with Docker Compose 6 | 7 | The fastest way to try NATS-S3: 8 | 9 | ```bash 10 | cd examples 11 | docker-compose up -d 12 | ``` 13 | 14 | Then test with AWS CLI: 15 | 16 | ```bash 17 | export AWS_ACCESS_KEY_ID=demo-access-key 18 | export AWS_SECRET_ACCESS_KEY=demo-secret-key 19 | export AWS_DEFAULT_REGION=us-east-1 20 | 21 | # Create a bucket 22 | aws s3 mb s3://my-bucket --endpoint-url=http://localhost:5222 23 | 24 | # Upload a file 25 | echo "Hello NATS-S3!" > test.txt 26 | aws s3 cp test.txt s3://my-bucket/ --endpoint-url=http://localhost:5222 27 | 28 | # List objects 29 | aws s3 ls s3://my-bucket --endpoint-url=http://localhost:5222 30 | 31 | # Download the file 32 | aws s3 cp s3://my-bucket/test.txt downloaded.txt --endpoint-url=http://localhost:5222 33 | ``` 34 | 35 | ## Directory Structure 36 | 37 | ``` 38 | examples/ 39 | ├── docker-compose.yml # Complete Docker Compose setup 40 | ├── credentials.json # Example S3 credentials 41 | ├── kubernetes/ # Kubernetes deployment examples (coming soon) 42 | ├── rclone/ # Rclone integration (coming soon) 43 | ├── restic/ # Restic backup integration (coming soon) 44 | └── terraform/ # Terraform examples (coming soon) 45 | ``` 46 | 47 | ## Integration Examples 48 | 49 | ### AWS SDK (Python/Boto3) 50 | 51 | ```python 52 | import boto3 53 | 54 | s3 = boto3.client( 55 | 's3', 56 | endpoint_url='http://localhost:5222', 57 | aws_access_key_id='demo-access-key', 58 | aws_secret_access_key='demo-secret-key', 59 | region_name='us-east-1' 60 | ) 61 | 62 | # Create bucket 63 | s3.create_bucket(Bucket='my-bucket') 64 | 65 | # Upload file 66 | s3.upload_file('local-file.txt', 'my-bucket', 'remote-file.txt') 67 | 68 | # List objects 69 | response = s3.list_objects_v2(Bucket='my-bucket') 70 | for obj in response.get('Contents', []): 71 | print(obj['Key']) 72 | ``` 73 | 74 | ### AWS SDK (Node.js) 75 | 76 | ```javascript 77 | const AWS = require('aws-sdk'); 78 | 79 | const s3 = new AWS.S3({ 80 | endpoint: 'http://localhost:5222', 81 | accessKeyId: 'demo-access-key', 82 | secretAccessKey: 'demo-secret-key', 83 | region: 'us-east-1', 84 | s3ForcePathStyle: true 85 | }); 86 | 87 | // Create bucket 88 | s3.createBucket({ Bucket: 'my-bucket' }, (err, data) => { 89 | if (err) console.error(err); 90 | else console.log('Bucket created:', data); 91 | }); 92 | 93 | // Upload file 94 | s3.putObject({ 95 | Bucket: 'my-bucket', 96 | Key: 'file.txt', 97 | Body: 'Hello from Node.js!' 98 | }, (err, data) => { 99 | if (err) console.error(err); 100 | else console.log('File uploaded:', data); 101 | }); 102 | ``` 103 | 104 | ### AWS SDK (Go) 105 | 106 | ```go 107 | package main 108 | 109 | import ( 110 | "github.com/aws/aws-sdk-go/aws" 111 | "github.com/aws/aws-sdk-go/aws/credentials" 112 | "github.com/aws/aws-sdk-go/aws/session" 113 | "github.com/aws/aws-sdk-go/service/s3" 114 | ) 115 | 116 | func main() { 117 | sess := session.Must(session.NewSession(&aws.Config{ 118 | Region: aws.String("us-east-1"), 119 | Endpoint: aws.String("http://localhost:5222"), 120 | Credentials: credentials.NewStaticCredentials( 121 | "demo-access-key", 122 | "demo-secret-key", 123 | "", 124 | ), 125 | S3ForcePathStyle: aws.Bool(true), 126 | })) 127 | 128 | svc := s3.New(sess) 129 | 130 | // Create bucket 131 | _, err := svc.CreateBucket(&s3.CreateBucketInput{ 132 | Bucket: aws.String("my-bucket"), 133 | }) 134 | if err != nil { 135 | panic(err) 136 | } 137 | 138 | // List buckets 139 | result, err := svc.ListBuckets(nil) 140 | if err != nil { 141 | panic(err) 142 | } 143 | 144 | for _, b := range result.Buckets { 145 | fmt.Printf("* %s\n", aws.StringValue(b.Name)) 146 | } 147 | } 148 | ``` 149 | 150 | ## Production Deployment 151 | 152 | For production use: 153 | 154 | 1. **Use strong credentials**: Generate secure access/secret key pairs 155 | 2. **Enable TLS**: Put NATS-S3 behind a reverse proxy (nginx, traefik) with TLS 156 | 3. **Configure NATS clustering**: Use NATS cluster for high availability 157 | 4. **Monitor with Prometheus**: NATS-S3 exposes metrics at `/metrics` 158 | 5. **Set resource limits**: Configure appropriate memory/CPU limits 159 | 6. **Use persistent storage**: Configure NATS JetStream with file storage 160 | 161 | See the main [README.md](../README.md) for detailed configuration options. 162 | 163 | ## Troubleshooting 164 | 165 | ### Connection Refused 166 | ```bash 167 | # Check if services are running 168 | docker-compose ps 169 | 170 | # Check logs 171 | docker-compose logs nats-s3 172 | ``` 173 | 174 | ### Authentication Errors 175 | ```bash 176 | # Verify credentials match 177 | cat examples/credentials.json 178 | echo $AWS_ACCESS_KEY_ID 179 | echo $AWS_SECRET_ACCESS_KEY 180 | ``` 181 | 182 | ### Bucket/Object Not Found 183 | ```bash 184 | # Check NATS JetStream status 185 | docker exec nats-server nats stream ls 186 | docker exec nats-server nats object store ls 187 | ``` 188 | 189 | ## Contributing 190 | 191 | Have an integration example to share? Please open a pull request! 192 | -------------------------------------------------------------------------------- /internal/server/gateway_server.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "fmt" 5 | "net/http" 6 | "os" 7 | "time" 8 | 9 | "github.com/go-kit/log" 10 | "github.com/gorilla/mux" 11 | "github.com/nats-io/nats.go" 12 | "github.com/wpnpeiris/nats-s3/internal/credential" 13 | "github.com/wpnpeiris/nats-s3/internal/logging" 14 | "github.com/wpnpeiris/nats-s3/internal/metrics" 15 | "github.com/wpnpeiris/nats-s3/internal/s3api" 16 | ) 17 | 18 | type GatewayServerOptions struct { 19 | NATSReplicas int 20 | } 21 | 22 | // Config holds HTTP server configuration options. 23 | type Config struct { 24 | Endpoint string 25 | ReadTimeout time.Duration 26 | WriteTimeout time.Duration 27 | IdleTimeout time.Duration 28 | ReadHeaderTimeout time.Duration 29 | } 30 | 31 | type GatewayServer struct { 32 | logger log.Logger 33 | config Config 34 | s3Gateway *s3api.S3Gateway 35 | } 36 | 37 | // LogAndExit logs an error message to stderr and exits with status code 1. 38 | func LogAndExit(msg string) { 39 | fmt.Fprintln(os.Stderr, msg) 40 | os.Exit(1) 41 | } 42 | 43 | // NewGatewayServer constructs a server that exposes the S3 API backed by NATS. 44 | // Returns an error if initialization fails (e.g., cannot connect to NATS). 45 | func NewGatewayServer(opts *Options) (*GatewayServer, error) { 46 | logger := logging.NewLogger(logging.Config{ 47 | Format: opts.LogFormat, 48 | Level: opts.LogLevel, 49 | }) 50 | 51 | natsOptions := loadNatsOptions(logger, opts) 52 | credStore := initializeCredentialStore(logger, opts) 53 | s3Gateway, err := s3api.NewS3Gateway(logger, 54 | opts.NatsServers, 55 | opts.Replicas, 56 | natsOptions, 57 | credStore) 58 | if err != nil { 59 | return nil, err 60 | } 61 | 62 | config := Config{ 63 | Endpoint: opts.ServerListen, 64 | ReadTimeout: opts.ReadTimeout, 65 | WriteTimeout: opts.WriteTimeout, 66 | IdleTimeout: opts.IdleTimeout, 67 | ReadHeaderTimeout: opts.ReadHeaderTimeout, 68 | } 69 | return &GatewayServer{logger, config, s3Gateway}, nil 70 | } 71 | 72 | // initializeCredentialStore loads the S3 credentials from the configured file path. 73 | func initializeCredentialStore(logger log.Logger, opts *Options) *credential.StaticFileStore { 74 | credentialsFile := opts.CredentialsFile 75 | if credentialsFile == "" { 76 | logging.Error(logger, "msg", "Credentials file is required", "flag", "-s3.credentials") 77 | os.Exit(1) 78 | } 79 | 80 | credStore, err := credential.NewStaticFileStore(credentialsFile) 81 | if err != nil { 82 | logging.Error(logger, "msg", "Failed to load credentials file", "file", credentialsFile, "err", err) 83 | os.Exit(1) 84 | } 85 | logging.Info(logger, "msg", "Loaded S3 credentials", "count", credStore.Count(), "store", credStore.GetName(), "file", credentialsFile) 86 | return credStore 87 | } 88 | 89 | // loadNatsOptions builds NATS connection options based on the configured authentication type. 90 | func loadNatsOptions(logger log.Logger, opts *Options) []nats.Option { 91 | var natsOptions []nats.Option 92 | 93 | if opts.User != "" && opts.Password != "" { 94 | logging.Info(logger, "msg", "Using NATS username/password authentication") 95 | natsOptions = append(natsOptions, nats.UserInfo(opts.User, opts.Password)) 96 | } else if opts.Token != "" { 97 | logging.Info(logger, "msg", "Using NATS token authentication") 98 | natsOptions = append(natsOptions, nats.Token(opts.Token)) 99 | } else if opts.NkeyFile != "" { 100 | logging.Info(logger, "msg", "Using NATS NKey authentication", "file", opts.NkeyFile) 101 | opt, err := nats.NkeyOptionFromSeed(opts.NkeyFile) 102 | if err != nil { 103 | logging.Error(logger, "msg", "Failed to load NKey file", "file", opts.NkeyFile, "err", err) 104 | os.Exit(1) 105 | } 106 | natsOptions = append(natsOptions, opt) 107 | } else if opts.CredsFile != "" { 108 | logging.Info(logger, "msg", "Using NATS JWT/Credentials authentication", "file", opts.CredsFile) 109 | natsOptions = append(natsOptions, nats.UserCredentials(opts.CredsFile)) 110 | } else { 111 | logging.Info(logger, "msg", "Using NATS anonymous connection (no authentication)") 112 | } 113 | return natsOptions 114 | } 115 | 116 | // Start starts the HTTP server with the provided configuration and blocks until it exits. 117 | func (s *GatewayServer) Start() error { 118 | logging.Info(s.logger, "msg", fmt.Sprintf("Starting NATS S3 server...")) 119 | router := mux.NewRouter().SkipClean(true) 120 | 121 | metrics.RegisterMetricEndpoint(router) 122 | s.s3Gateway.RegisterRoutes(router) 123 | 124 | srv := &http.Server{ 125 | Addr: s.config.Endpoint, 126 | Handler: router, 127 | // ReadTimeout covers the time from connection accept to request body read completion. 128 | // For S3-compatible operations, we need to support large uploads (up to 5GB single PUT). 129 | ReadTimeout: s.config.ReadTimeout, 130 | // WriteTimeout covers the time from request header read to response write completion. 131 | // For S3 downloads of large objects (multi-GB), we need generous timeout. 132 | WriteTimeout: s.config.WriteTimeout, 133 | // IdleTimeout is the maximum time to wait for the next request when keep-alives are enabled. 134 | IdleTimeout: s.config.IdleTimeout, 135 | // ReadHeaderTimeout prevents slowloris attacks by limiting time to read request headers. 136 | ReadHeaderTimeout: s.config.ReadHeaderTimeout, 137 | // MaxHeaderBytes limits the size of request headers to prevent memory exhaustion. 138 | MaxHeaderBytes: 1 << 20, // 1 MB 139 | } 140 | 141 | logging.Info(s.logger, "msg", fmt.Sprintf("Listening for HTTP requests on %s", s.config.Endpoint)) 142 | return srv.ListenAndServe() 143 | } 144 | -------------------------------------------------------------------------------- /internal/s3api/s3_bucket_handlers.go: -------------------------------------------------------------------------------- 1 | package s3api 2 | 3 | import ( 4 | "context" 5 | "encoding/xml" 6 | "errors" 7 | "net/http" 8 | "time" 9 | 10 | "github.com/aws/aws-sdk-go/aws" 11 | "github.com/aws/aws-sdk-go/service/s3" 12 | "github.com/gorilla/mux" 13 | "github.com/wpnpeiris/nats-s3/internal/client" 14 | "github.com/wpnpeiris/nats-s3/internal/model" 15 | ) 16 | 17 | // BucketsResult is the XML envelope for ListBuckets responses. 18 | type BucketsResult struct { 19 | XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListAllMyBucketsResult"` 20 | Owner *s3.Owner 21 | Buckets []*s3.Bucket `xml:"Buckets>Bucket"` 22 | } 23 | 24 | // LocationConstraintResponse is the XML response for GetBucketLocation. 25 | type LocationConstraintResponse struct { 26 | XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LocationConstraint"` 27 | Location string `xml:",chardata"` 28 | } 29 | 30 | // CreateBucket handles S3 CreateBucket by creating a JetStream Object Store 31 | // bucket and returning a minimal S3-compatible XML response. 32 | func (s *S3Gateway) CreateBucket(w http.ResponseWriter, r *http.Request) { 33 | bucket := mux.Vars(r)["bucket"] 34 | os, err := s.client.CreateBucket(r.Context(), bucket) 35 | if err != nil { 36 | if errors.Is(err, client.ErrBucketAlreadyExists) { 37 | model.WriteErrorResponse(w, r, model.ErrBucketAlreadyOwnedByYou) 38 | return 39 | } 40 | model.WriteErrorResponse(w, r, model.ErrInternalError) 41 | return 42 | } 43 | 44 | buckets := []*s3.Bucket{{ 45 | Name: aws.String(os.Bucket()), 46 | CreationDate: aws.Time(time.Now()), 47 | }} 48 | 49 | response := BucketsResult{ 50 | Buckets: buckets, 51 | } 52 | 53 | model.WriteXMLResponse(w, r, http.StatusOK, response) 54 | } 55 | 56 | // DeleteBucket deletes the specified bucket and responds with 204 No Content. 57 | // Returns NoSuchBucket if the bucket does not exist. 58 | // Returns BucketNotEmpty if the bucket contains objects. 59 | func (s *S3Gateway) DeleteBucket(w http.ResponseWriter, r *http.Request) { 60 | bucket := mux.Vars(r)["bucket"] 61 | 62 | // Check if bucket is empty before attempting deletion 63 | objects, err := s.client.ListObjects(r.Context(), bucket) 64 | if err != nil { 65 | if errors.Is(err, client.ErrBucketNotFound) { 66 | model.WriteErrorResponse(w, r, model.ErrNoSuchBucket) 67 | return 68 | } 69 | // If no objects found, that's actually fine - bucket is empty 70 | if !errors.Is(err, client.ErrObjectNotFound) { 71 | model.WriteErrorResponse(w, r, model.ErrInternalError) 72 | return 73 | } 74 | } else if len(objects) > 0 { 75 | // Bucket has objects, cannot delete 76 | model.WriteErrorResponse(w, r, model.ErrBucketNotEmpty) 77 | return 78 | } 79 | 80 | // Bucket is empty, proceed with deletion 81 | err = s.client.DeleteBucket(r.Context(), bucket) 82 | if err != nil { 83 | if errors.Is(err, client.ErrBucketNotFound) { 84 | model.WriteErrorResponse(w, r, model.ErrNoSuchBucket) 85 | return 86 | } 87 | model.WriteErrorResponse(w, r, model.ErrInternalError) 88 | return 89 | } 90 | 91 | model.WriteEmptyResponse(w, r, http.StatusNoContent) 92 | } 93 | 94 | // ListBuckets enumerates existing JetStream Object Store buckets and returns 95 | // a simple S3-compatible XML response. 96 | func (s *S3Gateway) ListBuckets(w http.ResponseWriter, r *http.Request) { 97 | entries, err := s.client.ListBuckets(r.Context()) 98 | if err != nil { 99 | model.WriteErrorResponse(w, r, model.ErrInternalError) 100 | return 101 | } 102 | 103 | var buckets []*s3.Bucket 104 | 105 | for entry := range entries { 106 | buckets = append(buckets, &s3.Bucket{ 107 | Name: aws.String(entry.Bucket()), 108 | CreationDate: aws.Time(time.Now())}, 109 | ) 110 | } 111 | 112 | response := BucketsResult{ 113 | Buckets: buckets, 114 | } 115 | 116 | model.WriteXMLResponse(w, r, http.StatusOK, response) 117 | } 118 | 119 | // HeadBucket checks if a bucket exists and the user has permission to access it. 120 | // Returns 200 OK if bucket exists, 404 if not found. 121 | func (s *S3Gateway) HeadBucket(w http.ResponseWriter, r *http.Request) { 122 | bucket := mux.Vars(r)["bucket"] 123 | 124 | err := s.bucketExists(r.Context(), bucket) 125 | if err != nil { 126 | if errors.Is(err, client.ErrBucketNotFound) { 127 | w.WriteHeader(http.StatusNotFound) 128 | return 129 | } 130 | w.WriteHeader(http.StatusInternalServerError) 131 | return 132 | } 133 | 134 | w.WriteHeader(http.StatusOK) 135 | } 136 | 137 | // GetBucketLocation returns the region where the bucket resides. 138 | func (s *S3Gateway) GetBucketLocation(w http.ResponseWriter, r *http.Request) { 139 | bucket := mux.Vars(r)["bucket"] 140 | 141 | err := s.bucketExists(r.Context(), bucket) 142 | if err != nil { 143 | if errors.Is(err, client.ErrBucketNotFound) { 144 | model.WriteErrorResponse(w, r, model.ErrNoSuchBucket) 145 | return 146 | } 147 | model.WriteErrorResponse(w, r, model.ErrInternalError) 148 | return 149 | } 150 | 151 | // Return empty location constraint for default location 152 | response := LocationConstraintResponse{ 153 | Location: "", 154 | } 155 | 156 | model.WriteXMLResponse(w, r, http.StatusOK, response) 157 | } 158 | 159 | // bucketExists checks if a bucket exists by attempting to list objects. 160 | func (s *S3Gateway) bucketExists(ctx context.Context, bucket string) error { 161 | _, err := s.client.ListObjects(ctx, bucket) 162 | if err != nil { 163 | if errors.Is(err, client.ErrBucketNotFound) { 164 | return client.ErrBucketNotFound 165 | } 166 | if errors.Is(err, client.ErrObjectNotFound) { 167 | return nil 168 | } 169 | return err 170 | } 171 | return nil 172 | } 173 | -------------------------------------------------------------------------------- /internal/s3api/s3_bucket_handlers_test.go: -------------------------------------------------------------------------------- 1 | package s3api 2 | 3 | import ( 4 | "encoding/xml" 5 | "net/http/httptest" 6 | "testing" 7 | 8 | "github.com/wpnpeiris/nats-s3/internal/logging" 9 | "github.com/wpnpeiris/nats-s3/internal/testutil" 10 | 11 | "github.com/gorilla/mux" 12 | "github.com/nats-io/nats.go" 13 | ) 14 | 15 | func TestListBuckets(t *testing.T) { 16 | s := testutil.StartJSServer(t) 17 | defer s.Shutdown() 18 | 19 | logger := logging.NewLogger(logging.Config{Level: "debug"}) 20 | gw, err := NewS3Gateway(logger, s.ClientURL(), 1, nil, nil) 21 | if err != nil { 22 | t.Fatalf("failed to create S3 gateway: %v", err) 23 | } 24 | 25 | // Create a couple of buckets so ListBuckets has content. 26 | natsEndpoint := s.Addr().String() 27 | nc, err := nats.Connect(natsEndpoint) 28 | // Avoid production panic handler during test shutdown. 29 | nc.SetClosedHandler(func(_ *nats.Conn) {}) 30 | defer nc.Close() 31 | 32 | js, err := nc.JetStream() 33 | if err != nil { 34 | t.Fatalf("JetStream failed: %v", err) 35 | } 36 | for _, b := range []string{"bucket1", "bucket2"} { 37 | if _, err := js.CreateObjectStore(&nats.ObjectStoreConfig{Bucket: b}); err != nil { 38 | t.Fatalf("create object store %s failed: %v", b, err) 39 | } 40 | } 41 | 42 | r := mux.NewRouter() 43 | gw.RegisterRoutes(r) 44 | 45 | req := httptest.NewRequest("GET", "/", nil) 46 | rr := httptest.NewRecorder() 47 | r.ServeHTTP(rr, req) 48 | 49 | if rr.Code != 200 { 50 | t.Fatalf("unexpected status: got %d body=%s", rr.Code, rr.Body.String()) 51 | } 52 | 53 | // Minimal struct to pull out bucket names from the XML 54 | var parsed struct { 55 | Names []string `xml:"Buckets>Bucket>Name"` 56 | } 57 | if err := xml.Unmarshal(rr.Body.Bytes(), &parsed); err != nil { 58 | t.Fatalf("unmarshal xml failed: %v\nxml=%s", err, rr.Body.String()) 59 | } 60 | 61 | want := map[string]bool{"bucket1": false, "bucket2": false} 62 | for _, n := range parsed.Names { 63 | if _, ok := want[n]; ok { 64 | want[n] = true 65 | } 66 | } 67 | for name, found := range want { 68 | if !found { 69 | t.Fatalf("expected bucket %q in response", name) 70 | } 71 | } 72 | } 73 | 74 | func TestCreateBucket(t *testing.T) { 75 | s := testutil.StartJSServer(t) 76 | defer s.Shutdown() 77 | 78 | logger := logging.NewLogger(logging.Config{Level: "debug"}) 79 | gw, err := NewS3Gateway(logger, s.ClientURL(), 1, nil, nil) 80 | if err != nil { 81 | t.Fatalf("failed to create S3 gateway: %v", err) 82 | } 83 | 84 | r := mux.NewRouter() 85 | gw.RegisterRoutes(r) 86 | 87 | bucket := "created-bucket" 88 | req := httptest.NewRequest("PUT", "/"+bucket, nil) 89 | rr := httptest.NewRecorder() 90 | r.ServeHTTP(rr, req) 91 | 92 | if rr.Code != 200 { 93 | t.Fatalf("unexpected status: got %d body=%s", rr.Code, rr.Body.String()) 94 | } 95 | 96 | // Verify bucket exists in NATS by opening ObjectStore 97 | natsEndpoint := s.Addr().String() 98 | nc, err := nats.Connect(natsEndpoint) 99 | nc.SetClosedHandler(func(_ *nats.Conn) {}) 100 | defer nc.Close() 101 | js, err := nc.JetStream() 102 | if err != nil { 103 | t.Fatalf("JetStream failed: %v", err) 104 | } 105 | if _, err := js.ObjectStore(bucket); err != nil { 106 | t.Fatalf("expected created object store %q, got error: %v", bucket, err) 107 | } 108 | } 109 | 110 | func TestCreateBucketDuplicateFails(t *testing.T) { 111 | s := testutil.StartJSServer(t) 112 | defer s.Shutdown() 113 | 114 | logger := logging.NewLogger(logging.Config{Level: "debug"}) 115 | gw, err := NewS3Gateway(logger, s.ClientURL(), 1, nil, nil) 116 | if err != nil { 117 | t.Fatalf("failed to create S3 gateway: %v", err) 118 | } 119 | 120 | r := mux.NewRouter() 121 | gw.RegisterRoutes(r) 122 | 123 | bucket := "dup-bucket" 124 | 125 | // First create should succeed 126 | req1 := httptest.NewRequest("PUT", "/"+bucket, nil) 127 | rr1 := httptest.NewRecorder() 128 | r.ServeHTTP(rr1, req1) 129 | if rr1.Code != 200 { 130 | t.Fatalf("unexpected status on first create: got %d body=%s", rr1.Code, rr1.Body.String()) 131 | } 132 | 133 | // Second create should fail with conflict 134 | req2 := httptest.NewRequest("PUT", "/"+bucket, nil) 135 | rr2 := httptest.NewRecorder() 136 | r.ServeHTTP(rr2, req2) 137 | if rr2.Code != 409 { 138 | t.Fatalf("expected 409 on duplicate create, got %d body=%s", rr2.Code, rr2.Body.String()) 139 | } 140 | } 141 | 142 | func TestCreateBucket_Replicated(t *testing.T) { 143 | servers := testutil.StartJSServerCluster(t) 144 | for _, s := range servers { 145 | defer s.Shutdown() 146 | } 147 | s := servers[0] 148 | 149 | logger := logging.NewLogger(logging.Config{Level: "debug"}) 150 | gw, err := NewS3Gateway(logger, s.ClientURL(), 3, nil, nil) 151 | if err != nil { 152 | t.Fatalf("failed to create S3 gateway: %v", err) 153 | } 154 | 155 | r := mux.NewRouter() 156 | gw.RegisterRoutes(r) 157 | 158 | bucket := "created-bucket" 159 | req := httptest.NewRequest("PUT", "/"+bucket, nil) 160 | rr := httptest.NewRecorder() 161 | r.ServeHTTP(rr, req) 162 | 163 | if rr.Code != 200 { 164 | t.Fatalf("unexpected status: got %d body=%s", rr.Code, rr.Body.String()) 165 | } 166 | 167 | // Verify bucket exists in NATS by opening ObjectStore 168 | natsEndpoint := s.Addr().String() 169 | nc, err := nats.Connect(natsEndpoint) 170 | if err != nil { 171 | t.Fatalf("failed to connect to NATS: %v", err) 172 | } 173 | nc.SetClosedHandler(func(_ *nats.Conn) {}) 174 | defer nc.Close() 175 | js, err := nc.JetStream() 176 | if err != nil { 177 | t.Fatalf("JetStream failed: %v", err) 178 | } 179 | 180 | objStore, err := js.ObjectStore(bucket) 181 | if err != nil { 182 | t.Fatalf("expected created object store %q, got error: %v", bucket, err) 183 | } 184 | 185 | objStoreStatus, err := objStore.Status() 186 | if err != nil { 187 | t.Fatalf("failed to get object store status: %v", err) 188 | } 189 | 190 | if objStoreStatus.Replicas() != 3 { 191 | t.Errorf("Expected 3 replicas, got %d", objStoreStatus.Replicas()) 192 | } 193 | } 194 | -------------------------------------------------------------------------------- /examples/python/nats_s3_example.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | NATS-S3 Python Example using Boto3 4 | 5 | This example demonstrates how to use the NATS-S3 gateway with Python's boto3 library. 6 | 7 | Prerequisites: 8 | pip install boto3 9 | 10 | Usage: 11 | python nats_s3_example.py 12 | """ 13 | 14 | import boto3 15 | from botocore.exceptions import ClientError 16 | import sys 17 | 18 | # NATS-S3 Configuration 19 | ENDPOINT_URL = 'http://localhost:5222' 20 | AWS_ACCESS_KEY_ID = 'demo-access-key' 21 | AWS_SECRET_ACCESS_KEY = 'demo-secret-key' 22 | AWS_REGION = 'us-east-1' 23 | 24 | def create_s3_client(): 25 | """Create and return an S3 client configured for NATS-S3""" 26 | return boto3.client( 27 | 's3', 28 | endpoint_url=ENDPOINT_URL, 29 | aws_access_key_id=AWS_ACCESS_KEY_ID, 30 | aws_secret_access_key=AWS_SECRET_ACCESS_KEY, 31 | region_name=AWS_REGION, 32 | # Disable SSL verification for local testing 33 | use_ssl=False, 34 | verify=False 35 | ) 36 | 37 | def main(): 38 | """Run example S3 operations""" 39 | s3 = create_s3_client() 40 | bucket_name = 'python-example-bucket' 41 | 42 | print("=" * 60) 43 | print("NATS-S3 Python Example with Boto3") 44 | print("=" * 60) 45 | 46 | try: 47 | # 1. Create bucket 48 | print(f"\n1. Creating bucket '{bucket_name}'...") 49 | s3.create_bucket(Bucket=bucket_name) 50 | print(" ✓ Bucket created successfully") 51 | 52 | # 2. List buckets 53 | print("\n2. Listing all buckets...") 54 | response = s3.list_buckets() 55 | for bucket in response['Buckets']: 56 | print(f" - {bucket['Name']}") 57 | 58 | # 3. Create a test file and upload it 59 | print(f"\n3. Uploading object to '{bucket_name}'...") 60 | test_content = "Hello from NATS-S3!\nThis is a test file created by Python." 61 | test_key = 'test-file.txt' 62 | 63 | s3.put_object( 64 | Bucket=bucket_name, 65 | Key=test_key, 66 | Body=test_content.encode('utf-8'), 67 | ContentType='text/plain', 68 | Metadata={'created-by': 'python-example', 'version': '1.0'} 69 | ) 70 | print(f" ✓ Uploaded '{test_key}'") 71 | 72 | # 4. Upload another file with different content 73 | print(f"\n4. Uploading additional objects...") 74 | s3.put_object( 75 | Bucket=bucket_name, 76 | Key='data/numbers.txt', 77 | Body='1\n2\n3\n4\n5\n'.encode('utf-8') 78 | ) 79 | s3.put_object( 80 | Bucket=bucket_name, 81 | Key='data/letters.txt', 82 | Body='a\nb\nc\nd\ne\n'.encode('utf-8') 83 | ) 84 | print(" ✓ Uploaded 'data/numbers.txt'") 85 | print(" ✓ Uploaded 'data/letters.txt'") 86 | 87 | # 5. List objects in bucket 88 | print(f"\n5. Listing objects in '{bucket_name}'...") 89 | response = s3.list_objects_v2(Bucket=bucket_name) 90 | if 'Contents' in response: 91 | for obj in response['Contents']: 92 | print(f" - {obj['Key']} ({obj['Size']} bytes)") 93 | else: 94 | print(" (bucket is empty)") 95 | 96 | # 6. Get object metadata 97 | print(f"\n6. Getting metadata for '{test_key}'...") 98 | response = s3.head_object(Bucket=bucket_name, Key=test_key) 99 | print(f" - Content-Type: {response['ContentType']}") 100 | print(f" - Content-Length: {response['ContentLength']} bytes") 101 | print(f" - Last-Modified: {response['LastModified']}") 102 | if 'Metadata' in response: 103 | print(f" - Custom Metadata: {response['Metadata']}") 104 | 105 | # 7. Download and read object 106 | print(f"\n7. Downloading '{test_key}'...") 107 | response = s3.get_object(Bucket=bucket_name, Key=test_key) 108 | content = response['Body'].read().decode('utf-8') 109 | print(f" Content:\n {content.replace(chr(10), chr(10) + ' ')}") 110 | 111 | # 8. List objects with prefix (hierarchical listing) 112 | print(f"\n8. Listing objects with prefix 'data/'...") 113 | response = s3.list_objects_v2(Bucket=bucket_name, Prefix='data/') 114 | if 'Contents' in response: 115 | for obj in response['Contents']: 116 | print(f" - {obj['Key']}") 117 | 118 | # 9. Copy object 119 | print(f"\n9. Copying object...") 120 | copy_source = {'Bucket': bucket_name, 'Key': test_key} 121 | s3.copy_object( 122 | CopySource=copy_source, 123 | Bucket=bucket_name, 124 | Key='test-file-copy.txt' 125 | ) 126 | print(" ✓ Copied 'test-file.txt' to 'test-file-copy.txt'") 127 | 128 | # 10. Generate presigned URL 129 | print(f"\n10. Generating presigned URL for '{test_key}'...") 130 | url = s3.generate_presigned_url( 131 | 'get_object', 132 | Params={'Bucket': bucket_name, 'Key': test_key}, 133 | ExpiresIn=3600 # 1 hour 134 | ) 135 | print(f" URL: {url[:80]}...") 136 | 137 | # 11. Delete specific objects 138 | print(f"\n11. Deleting objects...") 139 | s3.delete_object(Bucket=bucket_name, Key='test-file-copy.txt') 140 | print(" ✓ Deleted 'test-file-copy.txt'") 141 | 142 | # 12. Batch delete 143 | print(f"\n12. Batch deleting remaining objects...") 144 | response = s3.list_objects_v2(Bucket=bucket_name) 145 | if 'Contents' in response: 146 | objects_to_delete = [{'Key': obj['Key']} for obj in response['Contents']] 147 | s3.delete_objects( 148 | Bucket=bucket_name, 149 | Delete={'Objects': objects_to_delete} 150 | ) 151 | print(f" ✓ Deleted {len(objects_to_delete)} objects") 152 | 153 | # 13. Delete bucket 154 | print(f"\n13. Deleting bucket '{bucket_name}'...") 155 | s3.delete_bucket(Bucket=bucket_name) 156 | print(" ✓ Bucket deleted successfully") 157 | 158 | print("\n" + "=" * 60) 159 | print("✓ All operations completed successfully!") 160 | print("=" * 60 + "\n") 161 | 162 | except ClientError as e: 163 | error_code = e.response['Error']['Code'] 164 | error_message = e.response['Error']['Message'] 165 | print(f"\n✗ Error ({error_code}): {error_message}", file=sys.stderr) 166 | sys.exit(1) 167 | except Exception as e: 168 | print(f"\n✗ Unexpected error: {e}", file=sys.stderr) 169 | sys.exit(1) 170 | 171 | if __name__ == '__main__': 172 | main() 173 | -------------------------------------------------------------------------------- /internal/s3api/s3_object_tag_handlers.go: -------------------------------------------------------------------------------- 1 | package s3api 2 | 3 | import ( 4 | "encoding/xml" 5 | "fmt" 6 | "net/http" 7 | "net/url" 8 | "sort" 9 | "strings" 10 | 11 | "github.com/gorilla/mux" 12 | "github.com/wpnpeiris/nats-s3/internal/logging" 13 | "github.com/wpnpeiris/nats-s3/internal/model" 14 | ) 15 | 16 | // Object tagging constants 17 | const ( 18 | tagMetadataPrefix = "x-amz-tag-" 19 | maxTagsPerObject = 10 20 | maxTagKeyLength = 128 21 | maxTagValueLength = 256 22 | ) 23 | 24 | // GetObjectTagging retrieves tags associated with an object. 25 | func (s *S3Gateway) GetObjectTagging(w http.ResponseWriter, r *http.Request) { 26 | bucket := mux.Vars(r)["bucket"] 27 | key := mux.Vars(r)["key"] 28 | 29 | logging.Info(s.logger, "msg", fmt.Sprintf("GetObjectTagging: bucket=%s key=%s", bucket, key)) 30 | 31 | // Get object info to retrieve metadata 32 | info, err := s.client.GetObjectInfo(r.Context(), bucket, key) 33 | if s.handleObjectError(w, r, err) { 34 | return 35 | } 36 | 37 | // Extract tags from metadata (prefix: x-amz-tag-) 38 | tags := extractTagsFromMetadata(info.Metadata) 39 | 40 | // Build response 41 | response := model.Tagging{ 42 | TagSet: model.TagSet{ 43 | Tags: tags, 44 | }, 45 | } 46 | 47 | model.WriteXMLResponse(w, r, http.StatusOK, response) 48 | } 49 | 50 | // PutObjectTagging sets or replaces tags on an existing object. 51 | func (s *S3Gateway) PutObjectTagging(w http.ResponseWriter, r *http.Request) { 52 | bucket := mux.Vars(r)["bucket"] 53 | key := mux.Vars(r)["key"] 54 | 55 | logging.Info(s.logger, "msg", fmt.Sprintf("PutObjectTagging: bucket=%s key=%s", bucket, key)) 56 | 57 | // Parse tagging XML from request body 58 | var tagging model.Tagging 59 | if err := xml.NewDecoder(r.Body).Decode(&tagging); err != nil { 60 | logging.Error(s.logger, "msg", "Error decoding tagging XML", "err", err) 61 | model.WriteErrorResponse(w, r, model.ErrMalformedXML) 62 | return 63 | } 64 | 65 | // Validate tags 66 | if err := validateTags(tagging.TagSet.Tags); err != nil { 67 | logging.Error(s.logger, "msg", "Tag validation failed", "err", err) 68 | model.WriteErrorResponse(w, r, model.ErrInvalidTag) 69 | return 70 | } 71 | 72 | // Convert tags to metadata format 73 | tagMetadata := tagsToMetadata(tagging.TagSet.Tags) 74 | 75 | // Update object tags in NATS 76 | err := s.client.PutObjectTags(r.Context(), bucket, key, tagMetadata) 77 | if s.handleObjectError(w, r, err) { 78 | return 79 | } 80 | 81 | model.WriteEmptyResponse(w, r, http.StatusOK) 82 | } 83 | 84 | // DeleteObjectTagging removes all tags from an object. 85 | func (s *S3Gateway) DeleteObjectTagging(w http.ResponseWriter, r *http.Request) { 86 | bucket := mux.Vars(r)["bucket"] 87 | key := mux.Vars(r)["key"] 88 | 89 | logging.Info(s.logger, "msg", fmt.Sprintf("DeleteObjectTagging: bucket=%s key=%s", bucket, key)) 90 | 91 | // Delete all tags from object 92 | err := s.client.DeleteObjectTags(r.Context(), bucket, key) 93 | if s.handleObjectError(w, r, err) { 94 | return 95 | } 96 | 97 | model.WriteEmptyResponse(w, r, http.StatusNoContent) 98 | } 99 | 100 | // extractTagsFromMetadata converts NATS metadata to S3 Tag array. 101 | // Filters metadata keys with "x-amz-tag-" prefix. 102 | func extractTagsFromMetadata(metadata map[string]string) []model.Tag { 103 | var tags []model.Tag 104 | 105 | if metadata == nil { 106 | return tags 107 | } 108 | 109 | for key, value := range metadata { 110 | if strings.HasPrefix(key, tagMetadataPrefix) { 111 | tagKey := strings.TrimPrefix(key, tagMetadataPrefix) 112 | tags = append(tags, model.Tag{ 113 | Key: tagKey, 114 | Value: value, 115 | }) 116 | } 117 | } 118 | 119 | // Sort tags by key for consistent output 120 | sort.Slice(tags, func(i, j int) bool { 121 | return tags[i].Key < tags[j].Key 122 | }) 123 | 124 | return tags 125 | } 126 | 127 | // tagsToMetadata converts S3 Tag array to NATS metadata format. 128 | func tagsToMetadata(tags []model.Tag) map[string]string { 129 | metadata := make(map[string]string) 130 | 131 | for _, tag := range tags { 132 | metaKey := tagMetadataPrefix + tag.Key 133 | metadata[metaKey] = tag.Value 134 | } 135 | 136 | return metadata 137 | } 138 | 139 | // validateTags validates tag constraints per AWS S3 specification. 140 | func validateTags(tags []model.Tag) error { 141 | // Check max number of tags 142 | if len(tags) > maxTagsPerObject { 143 | return fmt.Errorf("tag count exceeds maximum of %d", maxTagsPerObject) 144 | } 145 | 146 | // Check for duplicate keys (case-sensitive) 147 | seen := make(map[string]bool) 148 | 149 | for _, tag := range tags { 150 | // Validate key length 151 | if len(tag.Key) == 0 { 152 | return fmt.Errorf("tag key cannot be empty") 153 | } 154 | if len(tag.Key) > maxTagKeyLength { 155 | return fmt.Errorf("tag key exceeds maximum length of %d: %s", maxTagKeyLength, tag.Key) 156 | } 157 | 158 | // Validate value length 159 | if len(tag.Value) > maxTagValueLength { 160 | return fmt.Errorf("tag value exceeds maximum length of %d for key: %s", maxTagValueLength, tag.Key) 161 | } 162 | 163 | // Check for duplicates 164 | if seen[tag.Key] { 165 | return fmt.Errorf("duplicate tag key: %s", tag.Key) 166 | } 167 | seen[tag.Key] = true 168 | } 169 | 170 | return nil 171 | } 172 | 173 | // extractTagMetadataFromRequest extracts and validates tags from x-amz-tagging header. 174 | // Returns tag metadata map or error if parsing/validation fails. 175 | func extractTagMetadataFromRequest(r *http.Request) (map[string]string, error) { 176 | taggingHeader := r.Header.Get("x-amz-tagging") 177 | if taggingHeader == "" { 178 | return nil, nil 179 | } 180 | 181 | tags, err := parseTaggingHeader(taggingHeader) 182 | if err != nil { 183 | return nil, fmt.Errorf("failed to parse tagging header: %w", err) 184 | } 185 | 186 | if err := validateTags(tags); err != nil { 187 | return nil, fmt.Errorf("tag validation failed: %w", err) 188 | } 189 | 190 | return tagsToMetadata(tags), nil 191 | } 192 | 193 | // parseTaggingHeader parses the x-amz-tagging header format. 194 | // Format: key1=value1&key2=value2 (URL-encoded query string style) 195 | func parseTaggingHeader(header string) ([]model.Tag, error) { 196 | if header == "" { 197 | return nil, nil 198 | } 199 | 200 | // URL decode the header value 201 | decoded, err := url.QueryUnescape(header) 202 | if err != nil { 203 | return nil, fmt.Errorf("invalid URL encoding in x-amz-tagging header: %w", err) 204 | } 205 | 206 | // Parse as query string 207 | values, err := url.ParseQuery(decoded) 208 | if err != nil { 209 | return nil, fmt.Errorf("invalid format in x-amz-tagging header: %w", err) 210 | } 211 | 212 | var tags []model.Tag 213 | for key, vals := range values { 214 | if len(vals) > 0 { 215 | tags = append(tags, model.Tag{ 216 | Key: key, 217 | Value: vals[0], // Use first value if multiple 218 | }) 219 | } 220 | } 221 | 222 | return tags, nil 223 | } 224 | -------------------------------------------------------------------------------- /internal/credential/static_file_store_test.go: -------------------------------------------------------------------------------- 1 | package credential 2 | 3 | import ( 4 | "os" 5 | "path/filepath" 6 | "testing" 7 | ) 8 | 9 | func TestStaticFileStore_Load(t *testing.T) { 10 | tests := []struct { 11 | name string 12 | fileContent string 13 | wantError bool 14 | wantCount int 15 | checkAccessKey string 16 | expectFound bool 17 | }{ 18 | { 19 | name: "valid single credential", 20 | fileContent: `{ 21 | "credentials": [ 22 | { 23 | "accessKey": "AKIAIOSFODNN7EXAMPLE", 24 | "secretKey": "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" 25 | } 26 | ] 27 | }`, 28 | wantError: false, 29 | wantCount: 1, 30 | checkAccessKey: "AKIAIOSFODNN7EXAMPLE", 31 | expectFound: true, 32 | }, 33 | { 34 | name: "valid multiple credentials", 35 | fileContent: `{ 36 | "credentials": [ 37 | { 38 | "accessKey": "AKIAIOSFODNN7EXAMPLE", 39 | "secretKey": "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" 40 | }, 41 | { 42 | "accessKey": "AKIAI44QH8DHBEXAMPLE", 43 | "secretKey": "je7MtGbClwBF/2Zp9Utk/h3yCo8nvbEXAMPLEKEY" 44 | } 45 | ] 46 | }`, 47 | wantError: false, 48 | wantCount: 2, 49 | checkAccessKey: "AKIAI44QH8DHBEXAMPLE", 50 | expectFound: true, 51 | }, 52 | { 53 | name: "empty credentials array", 54 | fileContent: `{ 55 | "credentials": [] 56 | }`, 57 | wantError: true, 58 | }, 59 | { 60 | name: "invalid JSON", 61 | fileContent: `{invalid json}`, 62 | wantError: true, 63 | }, 64 | { 65 | name: "duplicate access key", 66 | fileContent: `{ 67 | "credentials": [ 68 | { 69 | "accessKey": "AKIAIOSFODNN7EXAMPLE", 70 | "secretKey": "secret1" 71 | }, 72 | { 73 | "accessKey": "AKIAIOSFODNN7EXAMPLE", 74 | "secretKey": "secret2" 75 | } 76 | ] 77 | }`, 78 | wantError: true, 79 | }, 80 | { 81 | name: "invalid access key too short", 82 | fileContent: `{ 83 | "credentials": [ 84 | { 85 | "accessKey": "AB", 86 | "secretKey": "validSecretKey123" 87 | } 88 | ] 89 | }`, 90 | wantError: true, 91 | }, 92 | { 93 | name: "invalid secret key too short", 94 | fileContent: `{ 95 | "credentials": [ 96 | { 97 | "accessKey": "validAccessKey", 98 | "secretKey": "short" 99 | } 100 | ] 101 | }`, 102 | wantError: true, 103 | }, 104 | { 105 | name: "access key with reserved characters", 106 | fileContent: `{ 107 | "credentials": [ 108 | { 109 | "accessKey": "ACCESS=KEY", 110 | "secretKey": "validSecretKey123" 111 | } 112 | ] 113 | }`, 114 | wantError: true, 115 | }, 116 | } 117 | 118 | for _, tt := range tests { 119 | t.Run(tt.name, func(t *testing.T) { 120 | // Create temporary file 121 | tmpFile := filepath.Join(t.TempDir(), "credentials.json") 122 | if err := os.WriteFile(tmpFile, []byte(tt.fileContent), 0600); err != nil { 123 | t.Fatalf("Failed to create temp file: %v", err) 124 | } 125 | 126 | // Load credentials 127 | store, err := NewStaticFileStore(tmpFile) 128 | if (err != nil) != tt.wantError { 129 | t.Errorf("NewStaticFileStore() error = %v, wantError %v", err, tt.wantError) 130 | return 131 | } 132 | 133 | if err != nil { 134 | return // Expected error, test passed 135 | } 136 | 137 | // Verify count 138 | if store.Count() != tt.wantCount { 139 | t.Errorf("Count() = %d, want %d", store.Count(), tt.wantCount) 140 | } 141 | 142 | // Verify access key lookup if specified 143 | if tt.checkAccessKey != "" { 144 | _, found := store.Get(tt.checkAccessKey) 145 | if found != tt.expectFound { 146 | t.Errorf("Get(%s) found = %v, want %v", tt.checkAccessKey, found, tt.expectFound) 147 | } 148 | } 149 | }) 150 | } 151 | } 152 | 153 | func TestStaticFileStore_Get(t *testing.T) { 154 | // Create a temporary credentials file 155 | tmpFile := filepath.Join(t.TempDir(), "credentials.json") 156 | fileContent := `{ 157 | "credentials": [ 158 | { 159 | "accessKey": "AKIAIOSFODNN7EXAMPLE", 160 | "secretKey": "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" 161 | }, 162 | { 163 | "accessKey": "AKIAI44QH8DHBEXAMPLE", 164 | "secretKey": "je7MtGbClwBF/2Zp9Utk/h3yCo8nvbEXAMPLEKEY" 165 | } 166 | ] 167 | }` 168 | if err := os.WriteFile(tmpFile, []byte(fileContent), 0600); err != nil { 169 | t.Fatalf("Failed to create temp file: %v", err) 170 | } 171 | 172 | store, err := NewStaticFileStore(tmpFile) 173 | if err != nil { 174 | t.Fatalf("NewStaticFileStore() failed: %v", err) 175 | } 176 | 177 | tests := []struct { 178 | name string 179 | accessKey string 180 | wantSecret string 181 | wantFound bool 182 | }{ 183 | { 184 | name: "existing access key 1", 185 | accessKey: "AKIAIOSFODNN7EXAMPLE", 186 | wantSecret: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", 187 | wantFound: true, 188 | }, 189 | { 190 | name: "existing access key 2", 191 | accessKey: "AKIAI44QH8DHBEXAMPLE", 192 | wantSecret: "je7MtGbClwBF/2Zp9Utk/h3yCo8nvbEXAMPLEKEY", 193 | wantFound: true, 194 | }, 195 | { 196 | name: "non-existing access key", 197 | accessKey: "NONEXISTENT", 198 | wantSecret: "", 199 | wantFound: false, 200 | }, 201 | { 202 | name: "empty access key", 203 | accessKey: "", 204 | wantSecret: "", 205 | wantFound: false, 206 | }, 207 | } 208 | 209 | for _, tt := range tests { 210 | t.Run(tt.name, func(t *testing.T) { 211 | secret, found := store.Get(tt.accessKey) 212 | if found != tt.wantFound { 213 | t.Errorf("Get() found = %v, want %v", found, tt.wantFound) 214 | } 215 | if secret != tt.wantSecret { 216 | t.Errorf("Get() secret = %v, want %v", secret, tt.wantSecret) 217 | } 218 | }) 219 | } 220 | } 221 | 222 | func TestStaticFileStore_GetName(t *testing.T) { 223 | tmpFile := filepath.Join(t.TempDir(), "credentials.json") 224 | fileContent := `{ 225 | "credentials": [ 226 | { 227 | "accessKey": "AKIAIOSFODNN7EXAMPLE", 228 | "secretKey": "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" 229 | } 230 | ] 231 | }` 232 | if err := os.WriteFile(tmpFile, []byte(fileContent), 0600); err != nil { 233 | t.Fatalf("Failed to create temp file: %v", err) 234 | } 235 | 236 | store, err := NewStaticFileStore(tmpFile) 237 | if err != nil { 238 | t.Fatalf("NewStaticFileStore() failed: %v", err) 239 | } 240 | 241 | if name := store.GetName(); name != "static_file" { 242 | t.Errorf("GetName() = %v, want %v", name, "static_file") 243 | } 244 | } 245 | 246 | func TestStaticFileStore_EmptyPath(t *testing.T) { 247 | _, err := NewStaticFileStore("") 248 | if err == nil { 249 | t.Error("NewStaticFileStore with empty path should return error") 250 | } 251 | } 252 | 253 | func TestStaticFileStore_NonExistentFile(t *testing.T) { 254 | _, err := NewStaticFileStore("/nonexistent/path/credentials.json") 255 | if err == nil { 256 | t.Error("NewStaticFileStore with non-existent file should return error") 257 | } 258 | } 259 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/antithesishq/antithesis-sdk-go v0.5.0 h1:cudCFF83pDDANcXFzkQPUHHedfnnIbUO3JMr9fqwFJs= 2 | github.com/antithesishq/antithesis-sdk-go v0.5.0/go.mod h1:IUpT2DPAKh6i/YhSbt6Gl3v2yvUZjmKncl7U91fup7E= 3 | github.com/aws/aws-sdk-go v1.55.8 h1:JRmEUbU52aJQZ2AjX4q4Wu7t4uZjOu71uyNmaWlUkJQ= 4 | github.com/aws/aws-sdk-go v1.55.8/go.mod h1:ZkViS9AqA6otK+JBBNH2++sx1sgxrPKcSzPPvQkUtXk= 5 | github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= 6 | github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= 7 | github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= 8 | github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= 9 | github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= 10 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 11 | github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= 12 | github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 13 | github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= 14 | github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= 15 | github.com/go-logfmt/logfmt v0.6.1 h1:4hvbpePJKnIzH1B+8OR/JPbTx37NktoI9LE2QZBBkvE= 16 | github.com/go-logfmt/logfmt v0.6.1/go.mod h1:EV2pOAQoZaT1ZXZbqDl5hrymndi4SY9ED9/z6CO0XAk= 17 | github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= 18 | github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= 19 | github.com/google/go-tpm v0.9.7 h1:u89J4tUUeDTlH8xxC3CTW7OHZjbjKoHdQ9W7gCUhtxA= 20 | github.com/google/go-tpm v0.9.7/go.mod h1:h9jEsEECg7gtLis0upRBQU+GhYVH6jMjrFxI8u6bVUY= 21 | github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= 22 | github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= 23 | github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= 24 | github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= 25 | github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= 26 | github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= 27 | github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= 28 | github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= 29 | github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk= 30 | github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4= 31 | github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= 32 | github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= 33 | github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= 34 | github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= 35 | github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= 36 | github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= 37 | github.com/minio/highwayhash v1.0.4-0.20251030100505-070ab1a87a76 h1:KGuD/pM2JpL9FAYvBrnBBeENKZNh6eNtjqytV6TYjnk= 38 | github.com/minio/highwayhash v1.0.4-0.20251030100505-070ab1a87a76/go.mod h1:GGYsuwP/fPD6Y9hMiXuapVvlIUEhFhMTh0rxU3ik1LQ= 39 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= 40 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= 41 | github.com/nats-io/jwt/v2 v2.8.0 h1:K7uzyz50+yGZDO5o772eRE7atlcSEENpL7P+b74JV1g= 42 | github.com/nats-io/jwt/v2 v2.8.0/go.mod h1:me11pOkwObtcBNR8AiMrUbtVOUGkqYjMQZ6jnSdVUIA= 43 | github.com/nats-io/nats-server/v2 v2.12.3 h1:KRv+1n7lddMVgkJPQer+pt36TcO0ENxjilBmeWdjcHs= 44 | github.com/nats-io/nats-server/v2 v2.12.3/go.mod h1:MQXjG9WjyXKz9koWzUc3jYUMKD8x3CLmTNy91IQQz3Y= 45 | github.com/nats-io/nats.go v1.48.0 h1:pSFyXApG+yWU/TgbKCjmm5K4wrHu86231/w84qRVR+U= 46 | github.com/nats-io/nats.go v1.48.0/go.mod h1:iRWIPokVIFbVijxuMQq4y9ttaBTMe0SFdlZfMDd+33g= 47 | github.com/nats-io/nkeys v0.4.12 h1:nssm7JKOG9/x4J8II47VWCL1Ds29avyiQDRn0ckMvDc= 48 | github.com/nats-io/nkeys v0.4.12/go.mod h1:MT59A1HYcjIcyQDJStTfaOY6vhy9XTUjOFo+SVsvpBg= 49 | github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= 50 | github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= 51 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 52 | github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= 53 | github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 54 | github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= 55 | github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= 56 | github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= 57 | github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= 58 | github.com/prometheus/common v0.67.4 h1:yR3NqWO1/UyO1w2PhUvXlGQs/PtFmoveVO0KZ4+Lvsc= 59 | github.com/prometheus/common v0.67.4/go.mod h1:gP0fq6YjjNCLssJCQp0yk4M8W6ikLURwkdd/YKtTbyI= 60 | github.com/prometheus/procfs v0.19.2 h1:zUMhqEW66Ex7OXIiDkll3tl9a1ZdilUOd/F6ZXw4Vws= 61 | github.com/prometheus/procfs v0.19.2/go.mod h1:M0aotyiemPhBCM0z5w87kL22CxfcH05ZpYlu+b4J7mw= 62 | github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= 63 | github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= 64 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 65 | github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= 66 | github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= 67 | go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= 68 | go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= 69 | go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= 70 | go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= 71 | golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU= 72 | golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0= 73 | golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= 74 | golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= 75 | golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= 76 | golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= 77 | golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= 78 | google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= 79 | google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= 80 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 81 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= 82 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= 83 | gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= 84 | gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 85 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 86 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 87 | -------------------------------------------------------------------------------- /scripts/ci-conformance-full.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -euo pipefail 3 | 4 | ENDPOINT="http://localhost:5222" 5 | BUCKET="fullconf-$(date +%s)-$RANDOM" 6 | TMPDIR="$(mktemp -d)" 7 | RESULTS="$PWD/conformance-full.txt" 8 | PASS_COUNT=0 9 | FAIL_COUNT=0 10 | 11 | log() { echo "[$(date +%H:%M:%S)] $*"; } 12 | pass() { echo "PASS: $1" | tee -a "$RESULTS"; PASS_COUNT=$((PASS_COUNT+1)); } 13 | fail() { echo "FAIL: $1" | tee -a "$RESULTS"; FAIL_COUNT=$((FAIL_COUNT+1)); } 14 | 15 | cleanup() { 16 | rm -rf "$TMPDIR" || true 17 | } 18 | trap cleanup EXIT 19 | 20 | run() { 21 | # run cmd; on success pass name; else fail name (do not exit) 22 | local name="$1"; shift 23 | if "$@"; then pass "$name"; else fail "$name"; fi 24 | } 25 | 26 | assert_grep() { 27 | # grep for pattern in file/stdin; usage: assert_grep name pattern file 28 | local name="$1"; local pattern="$2"; local target="$3" 29 | if grep -qE "$pattern" "$target"; then pass "$name"; else fail "$name"; fi 30 | } 31 | 32 | echo "AWS CLI full conformance against $ENDPOINT" | tee "$RESULTS" 33 | echo "Bucket: $BUCKET" | tee -a "$RESULTS" 34 | 35 | # ---------- Bucket lifecycle ---------- 36 | run "bucket.create" aws s3 mb "s3://$BUCKET" --endpoint-url="$ENDPOINT" >/dev/null 37 | 38 | # Duplicate create should fail 39 | if aws s3 mb "s3://$BUCKET" --endpoint-url="$ENDPOINT" >/dev/null 2>"$TMPDIR/mb2.err"; then 40 | fail "bucket.create.duplicate_should_fail" 41 | else 42 | pass "bucket.create.duplicate_should_fail" 43 | fi 44 | 45 | # Service list includes bucket 46 | aws s3 ls --endpoint-url="$ENDPOINT" >"$TMPDIR/svc.txt" || true 47 | assert_grep "service.list.contains_bucket" "$BUCKET" "$TMPDIR/svc.txt" 48 | 49 | # Non-existent bucket list should fail 50 | if aws s3 ls "s3://no-such-$BUCKET" --endpoint-url="$ENDPOINT" >/dev/null 2>"$TMPDIR/ls-no.err"; then 51 | fail "bucket.list_nonexistent_should_fail" 52 | else 53 | pass "bucket.list_nonexistent_should_fail" 54 | fi 55 | 56 | # ---------- Object basic ops ---------- 57 | echo "hello world" > "$TMPDIR/hello.txt" 58 | run "object.put" aws s3 cp "$TMPDIR/hello.txt" "s3://$BUCKET/hello.txt" --endpoint-url="$ENDPOINT" >/dev/null 59 | 60 | aws s3 ls "s3://$BUCKET" --endpoint-url="$ENDPOINT" >"$TMPDIR/blist.txt" || true 61 | assert_grep "bucket.list_contains_object" "hello.txt" "$TMPDIR/blist.txt" 62 | 63 | run "object.head" aws s3api head-object --bucket "$BUCKET" --key hello.txt --endpoint-url="$ENDPOINT" >/dev/null 64 | 65 | run "object.get" aws s3 cp "s3://$BUCKET/hello.txt" "$TMPDIR/out.txt" --endpoint-url="$ENDPOINT" >/dev/null 66 | if diff -q "$TMPDIR/hello.txt" "$TMPDIR/out.txt" >/dev/null; then pass "object.get_content_match"; else fail "object.get_content_match"; fi 67 | 68 | # Head of non-existent should fail 69 | if aws s3api head-object --bucket "$BUCKET" --key missing.txt --endpoint-url="$ENDPOINT" >/dev/null 2>"$TMPDIR/head-miss.err"; then 70 | fail "object.head_missing_should_fail" 71 | else 72 | pass "object.head_missing_should_fail" 73 | fi 74 | 75 | # ---------- Metadata and content-type ---------- 76 | echo "meta file" > "$TMPDIR/meta.txt" 77 | run "object.put_with_metadata" aws s3api put-object \ 78 | --bucket "$BUCKET" --key meta.txt \ 79 | --body "$TMPDIR/meta.txt" \ 80 | --content-type text/plain \ 81 | --metadata foo=bar,baz=qux \ 82 | --endpoint-url="$ENDPOINT" >/dev/null 83 | 84 | aws s3api head-object --bucket "$BUCKET" --key meta.txt --endpoint-url="$ENDPOINT" >"$TMPDIR/head-meta.json" || true 85 | assert_grep "object.head_has_content_type" '"ContentType":\s*"text/plain"' "$TMPDIR/head-meta.json" 86 | assert_grep "object.head_has_metadata_foo" '"Foo":\s*"bar"' "$TMPDIR/head-meta.json" 87 | 88 | # ---------- Copy object ---------- 89 | run "object.copy" aws s3api copy-object --bucket "$BUCKET" --copy-source "$BUCKET/hello.txt" --key copy.txt --endpoint-url="$ENDPOINT" >/dev/null 90 | run "object.get_copy" aws s3 cp "s3://$BUCKET/copy.txt" "$TMPDIR/copy.txt" --endpoint-url="$ENDPOINT" >/dev/null 91 | if diff -q "$TMPDIR/hello.txt" "$TMPDIR/copy.txt" >/dev/null; then pass "object.copy_content_match"; else fail "object.copy_content_match"; fi 92 | 93 | # ---------- Prefixes and delimiter ---------- 94 | mkdir -p "$TMPDIR/dirs" 95 | echo a > "$TMPDIR/dirs/dir1-file1" 96 | echo b > "$TMPDIR/dirs/dir1-file2" 97 | echo c > "$TMPDIR/dirs/dir2-file1" 98 | aws s3 cp "$TMPDIR/dirs/dir1-file1" "s3://$BUCKET/dir1/file1" --endpoint-url="$ENDPOINT" >/dev/null || true 99 | aws s3 cp "$TMPDIR/dirs/dir1-file2" "s3://$BUCKET/dir1/file2" --endpoint-url="$ENDPOINT" >/dev/null || true 100 | aws s3 cp "$TMPDIR/dirs/dir2-file1" "s3://$BUCKET/dir2/file1" --endpoint-url="$ENDPOINT" >/dev/null || true 101 | 102 | # list-objects-v2 with delimiter 103 | aws s3api list-objects-v2 --bucket "$BUCKET" --delimiter '/' --endpoint-url="$ENDPOINT" --output json >"$TMPDIR/list-delim.json" || true 104 | assert_grep "list.delimiter_commonprefixes_dir1" '"Prefix":\s*"dir1/"' "$TMPDIR/list-delim.json" 105 | assert_grep "list.delimiter_commonprefixes_dir2" '"Prefix":\s*"dir2/"' "$TMPDIR/list-delim.json" 106 | 107 | # ---------- Range GET ---------- 108 | dd if=/dev/zero bs=1 count=64 of="$TMPDIR/range.src" >/dev/null 2>&1 || true 109 | aws s3 cp "$TMPDIR/range.src" "s3://$BUCKET/range.bin" --endpoint-url="$ENDPOINT" >/dev/null || true 110 | aws s3api get-object --bucket "$BUCKET" --key range.bin --range bytes=0-9 --endpoint-url="$ENDPOINT" "$TMPDIR/range.out" >/dev/null 2>&1 || true 111 | if [ -f "$TMPDIR/range.out" ] && [ "$(wc -c < "$TMPDIR/range.out")" -eq 10 ]; then pass "object.get_range_0_9"; else fail "object.get_range_0_9"; fi 112 | 113 | # ---------- Multipart upload ---------- 114 | dd if=/dev/zero of="$TMPDIR/large.bin" bs=1M count=6 >/dev/null 2>&1 || true 115 | UPLOAD_ID=$(aws s3api create-multipart-upload --bucket "$BUCKET" --key mpu.bin --endpoint-url="$ENDPOINT" --query UploadId --output text 2>/dev/null || true) 116 | if [ -n "${UPLOAD_ID:-}" ] && [ "$UPLOAD_ID" != "None" ]; then 117 | head -c 3145728 "$TMPDIR/large.bin" > "$TMPDIR/part1.bin" 2>/dev/null || true 118 | tail -c +3145729 "$TMPDIR/large.bin" > "$TMPDIR/part2.bin" 2>/dev/null || true 119 | ETAG1=$(aws s3api upload-part --bucket "$BUCKET" --key mpu.bin --part-number 1 --body "$TMPDIR/part1.bin" --upload-id "$UPLOAD_ID" --endpoint-url="$ENDPOINT" --query ETag --output text 2>/dev/null | tr -d '"' || true) 120 | ETAG2=$(aws s3api upload-part --bucket "$BUCKET" --key mpu.bin --part-number 2 --body "$TMPDIR/part2.bin" --upload-id "$UPLOAD_ID" --endpoint-url="$ENDPOINT" --query ETag --output text 2>/dev/null | tr -d '"' || true) 121 | if [ -n "${ETAG1:-}" ] && [ -n "${ETAG2:-}" ]; then 122 | cat >"$TMPDIR/parts.json" </dev/null 2>&1; then 126 | pass "mpu.complete" 127 | # head object to confirm presence 128 | if aws s3api head-object --bucket "$BUCKET" --key mpu.bin --endpoint-url="$ENDPOINT" >/dev/null 2>&1; then pass "mpu.head_after_complete"; else fail "mpu.head_after_complete"; fi 129 | else 130 | fail "mpu.complete" 131 | fi 132 | else 133 | fail "mpu.upload_parts" 134 | fi 135 | else 136 | fail "mpu.create" 137 | fi 138 | 139 | # ---------- Delete multiple objects ---------- 140 | echo 1 > "$TMPDIR/d1"; echo 2 > "$TMPDIR/d2" 141 | aws s3 cp "$TMPDIR/d1" "s3://$BUCKET/del1" --endpoint-url="$ENDPOINT" >/dev/null || true 142 | aws s3 cp "$TMPDIR/d2" "s3://$BUCKET/del2" --endpoint-url="$ENDPOINT" >/dev/null || true 143 | cat >"$TMPDIR/delete.json" </dev/null 147 | 148 | # ---------- Presigned URL ---------- 149 | echo "signed" > "$TMPDIR/signed.txt" 150 | aws s3 cp "$TMPDIR/signed.txt" "s3://$BUCKET/signed.txt" --endpoint-url="$ENDPOINT" >/dev/null || true 151 | URL=$(aws s3 presign "s3://$BUCKET/signed.txt" --endpoint-url="$ENDPOINT" --expires-in 60 2>/dev/null || true) 152 | if [ -n "${URL:-}" ]; then 153 | BODY=$(curl -fsSL "$URL" 2>/dev/null || true) 154 | if [ "$BODY" = "signed" ]; then pass "presign.get"; else fail "presign.get"; fi 155 | else 156 | fail "presign.generate" 157 | fi 158 | 159 | # ---------- Bucket deletion semantics ---------- 160 | # Non-empty bucket removal should fail 161 | aws s3 cp "$TMPDIR/hello.txt" "s3://$BUCKET/tmp.txt" --endpoint-url="$ENDPOINT" >/dev/null || true 162 | if aws s3 rb "s3://$BUCKET" --endpoint-url="$ENDPOINT" >/dev/null 2>"$TMPDIR/rb-nonempty.err"; then 163 | fail "bucket.remove_nonempty_should_fail" 164 | else 165 | pass "bucket.remove_nonempty_should_fail" 166 | fi 167 | # cleanup and delete 168 | aws s3 rm "s3://$BUCKET" --recursive --endpoint-url="$ENDPOINT" >/dev/null 2>&1 || true 169 | run "bucket.remove" aws s3 rb "s3://$BUCKET" --endpoint-url="$ENDPOINT" >/dev/null 170 | 171 | echo "Summary: PASS=$PASS_COUNT FAIL=$FAIL_COUNT" | tee -a "$RESULTS" 172 | echo "Detailed results saved to: $RESULTS" 173 | 174 | # Exit non-zero if any test failed 175 | if [ "$FAIL_COUNT" -ne 0 ]; then exit 1; fi 176 | -------------------------------------------------------------------------------- /internal/model/s3_responses.go: -------------------------------------------------------------------------------- 1 | /* 2 | * The following code is mostly copied from seaweedfs implementation. 3 | */ 4 | 5 | package model 6 | 7 | import ( 8 | "bytes" 9 | "encoding/xml" 10 | "fmt" 11 | "github.com/aws/aws-sdk-go/service/s3" 12 | "github.com/gorilla/mux" 13 | "log" 14 | "net/http" 15 | "strconv" 16 | "strings" 17 | "time" 18 | ) 19 | 20 | type mimeType string 21 | 22 | const ( 23 | mimeNone mimeType = "" 24 | MimeXML mimeType = "application/xml" 25 | ) 26 | 27 | // RESTErrorResponse - error response format 28 | type RESTErrorResponse struct { 29 | XMLName xml.Name `xml:"Error" json:"-"` 30 | Code string `xml:"Code" json:"Code"` 31 | Message string `xml:"Message" json:"Message"` 32 | Resource string `xml:"Resource" json:"Resource"` 33 | RequestID string `xml:"RequestId" json:"RequestId"` 34 | Key string `xml:"Key,omitempty" json:"Key,omitempty"` 35 | BucketName string `xml:"BucketName,omitempty" json:"BucketName,omitempty"` 36 | 37 | // Underlying HTTP status code for the returned error 38 | StatusCode int `xml:"-" json:"-"` 39 | } 40 | 41 | // NewRESTErrorResponse constructs an S3-style XML error body for the given 42 | // API error and request context (resource path, bucket, and object). 43 | func NewRESTErrorResponse(err APIError, resource string, bucket, object string) RESTErrorResponse { 44 | return RESTErrorResponse{ 45 | Code: err.Code, 46 | BucketName: bucket, 47 | Key: object, 48 | Message: err.Description, 49 | Resource: resource, 50 | RequestID: fmt.Sprintf("%d", time.Now().UnixNano()), 51 | } 52 | } 53 | 54 | // SetEtag sets the ETag response header. If the provided value is unquoted, 55 | // quotes are added to match S3 behavior. 56 | func SetEtag(w http.ResponseWriter, etag string) { 57 | if etag != "" { 58 | if strings.HasPrefix(etag, "\"") { 59 | w.Header()["ETag"] = []string{etag} 60 | } else { 61 | w.Header()["ETag"] = []string{"\"" + etag + "\""} 62 | } 63 | } 64 | } 65 | 66 | // InitiateMultipartUploadResult wraps the minimal fields returned by S3 67 | // when initiating a multipart upload. It embeds the AWS SDK's response type 68 | // to preserve field names and XML shape. 69 | type InitiateMultipartUploadResult struct { 70 | XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ InitiateMultipartUploadResult"` 71 | s3.CreateMultipartUploadOutput 72 | } 73 | 74 | type CompleteMultipartUpload struct { 75 | Parts []CompletedPart `xml:"Part"` 76 | } 77 | 78 | type CompletedPart struct { 79 | ETag string 80 | PartNumber int 81 | } 82 | 83 | type ListPartsResult struct { 84 | XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListPartsResult"` 85 | 86 | // copied from s3.ListPartsOutput, the Parts is not converting to 87 | Bucket *string `type:"string"` 88 | IsTruncated *bool `type:"boolean"` 89 | Key *string `min:"1" type:"string"` 90 | MaxParts *int64 `type:"integer"` 91 | NextPartNumberMarker *int64 `type:"integer"` 92 | PartNumberMarker *int64 `type:"integer"` 93 | Part []*s3.Part `locationName:"Part" type:"list" flattened:"true"` 94 | StorageClass *string `type:"string" enum:"StorageClass"` 95 | UploadId *string `type:"string"` 96 | } 97 | 98 | type CompleteMultipartUploadResult struct { 99 | XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUploadResult"` 100 | Location *string `xml:"Location,omitempty"` 101 | Bucket *string `xml:"Bucket,omitempty"` 102 | Key *string `xml:"Key,omitempty"` 103 | ETag *string `xml:"ETag,omitempty"` 104 | // VersionId is NOT included in XML body - it should only be in x-amz-version-id HTTP header 105 | 106 | // Store the VersionId internally for setting HTTP header, but don't marshal to XML 107 | VersionId *string `xml:"-"` 108 | } 109 | 110 | // GetObjectAttributesResult is the response for GetObjectAttributes API 111 | type GetObjectAttributesResult struct { 112 | XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ GetObjectAttributesOutput"` 113 | ETag *string `xml:"ETag,omitempty"` 114 | Checksum *Checksum `xml:"Checksum,omitempty"` 115 | ObjectParts *GetObjectAttributesParts `xml:"ObjectParts,omitempty"` 116 | StorageClass *string `xml:"StorageClass,omitempty"` 117 | ObjectSize *int64 `xml:"ObjectSize,omitempty"` 118 | LastModified *time.Time `xml:"LastModified,omitempty"` 119 | VersionId *string `xml:"VersionId,omitempty"` 120 | } 121 | 122 | // Checksum contains checksum information for GetObjectAttributes 123 | type Checksum struct { 124 | ChecksumCRC32 *string `xml:"ChecksumCRC32,omitempty"` 125 | ChecksumCRC32C *string `xml:"ChecksumCRC32C,omitempty"` 126 | ChecksumSHA1 *string `xml:"ChecksumSHA1,omitempty"` 127 | ChecksumSHA256 *string `xml:"ChecksumSHA256,omitempty"` 128 | } 129 | 130 | // GetObjectAttributesParts contains multipart information 131 | type GetObjectAttributesParts struct { 132 | IsTruncated *bool `xml:"IsTruncated,omitempty"` 133 | MaxParts *int64 `xml:"MaxParts,omitempty"` 134 | NextPartNumberMarker *int64 `xml:"NextPartNumberMarker,omitempty"` 135 | PartNumberMarker *int64 `xml:"PartNumberMarker,omitempty"` 136 | PartsCount *int64 `xml:"PartsCount,omitempty"` 137 | TotalPartsCount *int64 `xml:"TotalPartsCount,omitempty"` 138 | } 139 | 140 | // WriteXMLResponse encodes the response as XML and writes it with the given 141 | // HTTP status code and appropriate Content-Type. 142 | func WriteXMLResponse(w http.ResponseWriter, r *http.Request, statusCode int, response interface{}) { 143 | WriteResponse(w, r, statusCode, EncodeXMLResponse(response), MimeXML) 144 | } 145 | 146 | // WriteEmptyResponse writes only headers and the given status code. 147 | func WriteEmptyResponse(w http.ResponseWriter, r *http.Request, statusCode int) { 148 | WriteResponse(w, r, statusCode, []byte{}, mimeNone) 149 | } 150 | 151 | // EncodeXMLResponse serializes a value into an XML byte slice with xml.Header. 152 | func EncodeXMLResponse(response interface{}) []byte { 153 | var bytesBuffer bytes.Buffer 154 | bytesBuffer.WriteString(xml.Header) 155 | encoder := xml.NewEncoder(&bytesBuffer) 156 | err := encoder.Encode(response) 157 | if err != nil { 158 | log.Printf("Error enconding the response, %s", err) 159 | } 160 | return bytesBuffer.Bytes() 161 | } 162 | 163 | // setCommonHeaders sets shared S3-style headers, including a generated 164 | // x-amz-request-id and Accept-Ranges. Also configures permissive CORS if 165 | // the request includes an Origin header. 166 | func setCommonHeaders(w http.ResponseWriter, r *http.Request) { 167 | w.Header().Set("x-amz-request-id", fmt.Sprintf("%d", time.Now().UnixNano())) 168 | w.Header().Set("Accept-Ranges", "bytes") 169 | if r.Header.Get("Origin") != "" { 170 | w.Header().Set("Access-Control-Allow-Origin", "*") 171 | w.Header().Set("Access-Control-Allow-Credentials", "true") 172 | } 173 | } 174 | 175 | // WriteErrorResponse looks up the API error for the given code and writes a 176 | // serialized S3 XML error with the appropriate HTTP status. 177 | func WriteErrorResponse(w http.ResponseWriter, r *http.Request, errorCode ErrorCode) { 178 | vars := mux.Vars(r) 179 | bucket := vars["bucket"] 180 | object := vars["object"] 181 | if strings.HasPrefix(object, "/") { 182 | object = object[1:] 183 | } 184 | 185 | apiError := GetAPIError(errorCode) 186 | errorResponse := NewRESTErrorResponse(apiError, r.URL.Path, bucket, object) 187 | WriteXMLResponse(w, r, apiError.HTTPStatusCode, errorResponse) 188 | } 189 | 190 | // ObjectRetention represents object retention configuration 191 | type ObjectRetention struct { 192 | XMLName xml.Name `xml:"Retention"` 193 | Mode *string `xml:"Mode,omitempty"` 194 | RetainUntilDate *string `xml:"RetainUntilDate,omitempty"` 195 | } 196 | 197 | // ObjectRetentionResponse is the response for GetObjectRetention 198 | type ObjectRetentionResponse struct { 199 | XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Retention"` 200 | Mode string `xml:"Mode"` 201 | RetainUntilDate string `xml:"RetainUntilDate"` 202 | } 203 | 204 | // Tagging represents the root XML element for tagging operations 205 | type Tagging struct { 206 | XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Tagging"` 207 | TagSet TagSet `xml:"TagSet"` 208 | } 209 | 210 | // TagSet contains the collection of tags 211 | type TagSet struct { 212 | Tags []Tag `xml:"Tag"` 213 | } 214 | 215 | // Tag represents a single key-value tag pair 216 | type Tag struct { 217 | Key string `xml:"Key"` 218 | Value string `xml:"Value"` 219 | } 220 | 221 | // WriteResponse writes headers, status code, and optional body, flushing the 222 | // response when done. Body logging is minimized to avoid leaking data. 223 | func WriteResponse(w http.ResponseWriter, r *http.Request, statusCode int, response []byte, mType mimeType) { 224 | setCommonHeaders(w, r) 225 | if response != nil { 226 | w.Header().Set("Content-Length", strconv.Itoa(len(response))) 227 | } 228 | if mType != mimeNone { 229 | w.Header().Set("Content-Type", string(mType)) 230 | } 231 | w.WriteHeader(statusCode) 232 | if response != nil { 233 | log.Printf("status %d %s len=%d", statusCode, mType, len(response)) 234 | _, err := w.Write(response) 235 | if err != nil { 236 | log.Printf("Error writing the response, %s", err) 237 | return 238 | } 239 | if flusher, ok := w.(http.Flusher); ok { 240 | flusher.Flush() 241 | } 242 | } 243 | } 244 | -------------------------------------------------------------------------------- /internal/auth/s3_auth.go: -------------------------------------------------------------------------------- 1 | package auth 2 | 3 | import ( 4 | "crypto/hmac" 5 | "crypto/sha256" 6 | "encoding/hex" 7 | "fmt" 8 | "github.com/wpnpeiris/nats-s3/internal/credential" 9 | "github.com/wpnpeiris/nats-s3/internal/model" 10 | "net/http" 11 | "net/url" 12 | "sort" 13 | "strings" 14 | "time" 15 | ) 16 | 17 | type AuthError struct { 18 | code model.ErrorCode 19 | } 20 | 21 | func (e *AuthError) Error() string { 22 | return fmt.Sprintf("Auth error code: %d", e.code) 23 | } 24 | 25 | type AuthHeaderParameters struct { 26 | algo string 27 | accessKey string 28 | scopeDate string 29 | scopeRegion string 30 | scopeService string 31 | signedHeaders string 32 | signature string 33 | requestTime string 34 | hashedPayload string 35 | } 36 | 37 | // IdentityAccessManagement verifies AWS SigV4 and authorizes requests using 38 | // a credential store that supports multiple users. 39 | type IdentityAccessManagement struct { 40 | credentialStore credential.Store 41 | } 42 | 43 | // NewIdentityAccessManagement returns an IAM verifier that uses a credential store 44 | // for multi-user authentication support. 45 | func NewIdentityAccessManagement(store credential.Store) *IdentityAccessManagement { 46 | return &IdentityAccessManagement{ 47 | credentialStore: store, 48 | } 49 | } 50 | 51 | func (iam *IdentityAccessManagement) isDisabled() bool { 52 | return iam.credentialStore == nil 53 | } 54 | 55 | // Auth verifies AWS SigV4 (header-based and presigned URL) against the 56 | // configured credential. On success, calls the wrapped handler; otherwise 57 | // returns an S3-style XML error response. 58 | func (iam *IdentityAccessManagement) Auth(f http.HandlerFunc) http.HandlerFunc { 59 | return func(w http.ResponseWriter, r *http.Request) { 60 | if iam.isDisabled() { 61 | f(w, r) 62 | return 63 | } 64 | 65 | // Support both header-based SigV4 and presigned URL SigV4. 66 | hp, authErr := extractAuthHeaderParameters(r) 67 | if authErr != nil { 68 | model.WriteErrorResponse(w, r, authErr.code) 69 | return 70 | } 71 | 72 | // Look up the secret key for the access key from the credential store 73 | secretKey, found := iam.credentialStore.Get(hp.accessKey) 74 | if !found { 75 | model.WriteErrorResponse(w, r, model.ErrInvalidAccessKeyID) 76 | return 77 | } 78 | 79 | authErr = validateAuthHeaderParameters(hp) 80 | if authErr != nil { 81 | model.WriteErrorResponse(w, r, authErr.code) 82 | return 83 | } 84 | 85 | canURI := buildCanonicalURI(r) 86 | canQuery := buildCanonicalQueryString(r) 87 | canHeaders := buildCanonicalHeaders(hp.signedHeaders, r) 88 | 89 | if hp.hashedPayload == "" { 90 | hp.hashedPayload = "UNSIGNED-PAYLOAD" 91 | } 92 | 93 | canReq := strings.Join([]string{ 94 | r.Method, 95 | canURI, 96 | canQuery, 97 | canHeaders, 98 | strings.ToLower(hp.signedHeaders), 99 | hp.hashedPayload, 100 | }, "\n") 101 | hash := sha256.Sum256([]byte(canReq)) 102 | canReqHashHex := hex.EncodeToString(hash[:]) 103 | 104 | // Build StringToSign 105 | scope := strings.Join([]string{hp.scopeDate, hp.scopeRegion, hp.scopeService, "aws4_request"}, "/") 106 | sts := strings.Join([]string{ 107 | "AWS4-HMAC-SHA256", 108 | hp.requestTime, 109 | scope, 110 | canReqHashHex, 111 | }, "\n") 112 | 113 | // Derive signing key and compute signature 114 | kDate := hmacSHA256([]byte("AWS4"+secretKey), hp.scopeDate) 115 | kRegion := hmacSHA256(kDate, hp.scopeRegion) 116 | kService := hmacSHA256(kRegion, hp.scopeService) 117 | kSigning := hmacSHA256(kService, "aws4_request") 118 | sigBytes := hmacSHA256(kSigning, sts) 119 | calcSig := hex.EncodeToString(sigBytes) 120 | 121 | if !hmac.Equal([]byte(hp.signature), []byte(calcSig)) { 122 | model.WriteErrorResponse(w, r, model.ErrSignatureDoesNotMatch) 123 | return 124 | } 125 | 126 | f(w, r) 127 | return 128 | } 129 | } 130 | 131 | // extractAuthHeaderParameters extracts parameters from either the Authorization header or X-Amz-* query params. 132 | func extractAuthHeaderParameters(r *http.Request) (*AuthHeaderParameters, *AuthError) { 133 | headerParams := &AuthHeaderParameters{} 134 | headerParams.hashedPayload = r.Header.Get("x-amz-content-sha256") 135 | qs := r.URL.Query() 136 | 137 | if auth := r.Header.Get("Authorization"); strings.HasPrefix(auth, "AWS4-HMAC-SHA256 ") { 138 | headerParams.algo = "AWS4-HMAC-SHA256" 139 | // Parse comma-separated k=v parts 140 | parts := strings.Split(auth[len("AWS4-HMAC-SHA256 "):], ",") 141 | for _, part := range parts { 142 | kv := strings.SplitN(strings.TrimSpace(part), "=", 2) 143 | if len(kv) != 2 { 144 | continue 145 | } 146 | key := kv[0] 147 | val := strings.Trim(kv[1], " ") 148 | if key == "Credential" { 149 | credParts := strings.Split(val, "/") 150 | if len(credParts) >= 5 { 151 | headerParams.accessKey = credParts[0] 152 | headerParams.scopeDate = credParts[1] 153 | headerParams.scopeRegion = credParts[2] 154 | headerParams.scopeService = credParts[3] 155 | } 156 | } else if key == "SignedHeaders" { 157 | headerParams.signedHeaders = val 158 | } else if key == "Signature" { 159 | headerParams.signature = strings.ToLower(val) 160 | } 161 | } 162 | headerParams.requestTime = r.Header.Get("x-amz-date") 163 | if headerParams.requestTime == "" { 164 | // fallback to Date header is not supported for SigV4 here 165 | return nil, &AuthError{model.ErrMissingDateHeader} 166 | } 167 | if headerParams.hashedPayload == "" { 168 | return nil, &AuthError{model.ErrInvalidDigest} 169 | } 170 | } else if qs.Get("X-Amz-Algorithm") == "AWS4-HMAC-SHA256" { 171 | headerParams.algo = "AWS4-HMAC-SHA256" 172 | headerParams.requestTime = qs.Get("X-Amz-Date") 173 | headerParams.signedHeaders = qs.Get("X-Amz-SignedHeaders") 174 | headerParams.signature = strings.ToLower(qs.Get("X-Amz-Signature")) 175 | cred := qs.Get("X-Amz-Credential") 176 | credParts := strings.Split(cred, "/") 177 | if len(credParts) >= 5 { 178 | headerParams.accessKey = credParts[0] 179 | headerParams.scopeDate = credParts[1] 180 | headerParams.scopeRegion = credParts[2] 181 | headerParams.scopeService = credParts[3] 182 | } 183 | // For presigned GET, payload is usually UNSIGNED-PAYLOAD 184 | if headerParams.hashedPayload == "" { 185 | headerParams.hashedPayload = qs.Get("X-Amz-Content-Sha256") 186 | } 187 | if headerParams.hashedPayload == "" { 188 | headerParams.hashedPayload = "UNSIGNED-PAYLOAD" 189 | } 190 | // Expires validation 191 | if expStr := qs.Get("X-Amz-Expires"); expStr != "" { 192 | // Parse requestTime then compare now <= reqTime + exp seconds 193 | if tReq, err := time.Parse("20060102T150405Z", headerParams.requestTime); err == nil { 194 | // Accept small clock skew of 5 minutes 195 | maxT := tReq.Add(time.Duration(parseIntDefault(expStr, 0)) * time.Second).Add(5 * time.Minute) 196 | if time.Now().UTC().After(maxT) { 197 | return nil, &AuthError{model.ErrExpiredPresignRequest} 198 | } 199 | } else { 200 | return nil, &AuthError{model.ErrMalformedPresignedDate} 201 | } 202 | } 203 | } else { 204 | return nil, &AuthError{model.ErrAccessDenied} 205 | } 206 | 207 | return headerParams, nil 208 | } 209 | 210 | func validateAuthHeaderParameters(hp *AuthHeaderParameters) *AuthError { 211 | if hp.algo != "AWS4-HMAC-SHA256" || hp.accessKey == "" || hp.signedHeaders == "" || hp.signature == "" || hp.requestTime == "" { 212 | return &AuthError{model.ErrSignatureDoesNotMatch} 213 | } 214 | 215 | // Minimal validation of scope 216 | if hp.scopeDate == "" || hp.scopeRegion == "" || hp.scopeService != "s3" { 217 | return &AuthError{model.ErrCredMalformed} 218 | } 219 | 220 | // Validate time skew (+/- 5 minutes) 221 | tReq, err := time.Parse("20060102T150405Z", hp.requestTime) 222 | if err != nil { 223 | return &AuthError{model.ErrMalformedPresignedDate} 224 | } 225 | if d := time.Since(tReq.UTC()); d > 5*time.Minute || d < -5*time.Minute { 226 | return &AuthError{model.ErrRequestNotReadyYet} 227 | } 228 | 229 | return nil 230 | } 231 | 232 | func buildCanonicalURI(r *http.Request) string { 233 | canURI := r.URL.EscapedPath() 234 | if canURI == "" { 235 | canURI = "/" 236 | } 237 | return canURI 238 | } 239 | 240 | // buildCanonicalQueryString build canonical query params, for presign include all, otherwise include existing sorted 241 | func buildCanonicalQueryString(r *http.Request) string { 242 | var canQuery string 243 | { 244 | // Copy query parameters, excluding X-Amz-Signature for presigned URLs 245 | qp := url.Values{} 246 | for k, vs := range r.URL.Query() { 247 | // Exclude X-Amz-Signature as it's not part of the canonical request for presigned URLs 248 | if k == "X-Amz-Signature" { 249 | continue 250 | } 251 | for _, v := range vs { 252 | qp.Add(k, v) 253 | } 254 | } 255 | // AWS requires sorting by key and then by value 256 | keys := make([]string, 0, len(qp)) 257 | for k := range qp { 258 | keys = append(keys, k) 259 | } 260 | sort.Strings(keys) 261 | var parts []string 262 | for _, k := range keys { 263 | vals := qp[k] 264 | sort.Strings(vals) 265 | for _, v := range vals { 266 | parts = append(parts, awsURLEncode(k, true)+"="+awsURLEncode(v, true)) 267 | } 268 | } 269 | canQuery = strings.Join(parts, "&") 270 | } 271 | 272 | return canQuery 273 | } 274 | 275 | func buildCanonicalHeaders(signedHeaders string, r *http.Request) string { 276 | sh := strings.Split(signedHeaders, ";") 277 | sort.Strings(sh) 278 | var canonicalHeaders strings.Builder 279 | for _, h := range sh { 280 | name := strings.ToLower(strings.TrimSpace(h)) 281 | val := strings.TrimSpace(r.Header.Get(name)) 282 | if name == "host" && val == "" { 283 | val = r.Host 284 | } 285 | val = collapseSpaces(val) 286 | canonicalHeaders.WriteString(name) 287 | canonicalHeaders.WriteString(":") 288 | canonicalHeaders.WriteString(val) 289 | canonicalHeaders.WriteString("\n") 290 | } 291 | 292 | return canonicalHeaders.String() 293 | } 294 | 295 | func hmacSHA256(key []byte, data string) []byte { 296 | mac := hmac.New(sha256.New, key) 297 | mac.Write([]byte(data)) 298 | return mac.Sum(nil) 299 | } 300 | 301 | func collapseSpaces(s string) string { 302 | // Replace consecutive spaces and tabs with a single space 303 | var b strings.Builder 304 | lastSpace := false 305 | for _, r := range s { 306 | if r == ' ' || r == '\t' || r == '\n' || r == '\r' { 307 | if !lastSpace { 308 | b.WriteByte(' ') 309 | lastSpace = true 310 | } 311 | } else { 312 | b.WriteRune(r) 313 | lastSpace = false 314 | } 315 | } 316 | return strings.TrimSpace(b.String()) 317 | } 318 | 319 | // awsURLEncode encodes a string per AWS SigV4 rules (RFC3986), 320 | // optionally encoding '/' when encodeSlash is true. 321 | func awsURLEncode(s string, encodeSlash bool) string { 322 | var b strings.Builder 323 | for i := 0; i < len(s); i++ { 324 | c := s[i] 325 | isUnreserved := (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z') || (c >= '0' && c <= '9') || c == '-' || c == '_' || c == '.' || c == '~' 326 | if isUnreserved || (!encodeSlash && c == '/') { 327 | b.WriteByte(c) 328 | } else { 329 | // percent-encode 330 | b.WriteString("%") 331 | b.WriteString(strings.ToUpper(hex.EncodeToString([]byte{c}))) 332 | } 333 | } 334 | return b.String() 335 | } 336 | 337 | func parseIntDefault(s string, def int) int { 338 | var n int 339 | for i := 0; i < len(s); i++ { 340 | if s[i] < '0' || s[i] > '9' { 341 | return def 342 | } 343 | n = n*10 + int(s[i]-'0') 344 | } 345 | return n 346 | } 347 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /internal/s3api/s3_multipart_handlers.go: -------------------------------------------------------------------------------- 1 | package s3api 2 | 3 | import ( 4 | "encoding/xml" 5 | "errors" 6 | "github.com/wpnpeiris/nats-s3/internal/model" 7 | "io" 8 | "net/http" 9 | "sort" 10 | "strconv" 11 | "strings" 12 | 13 | "github.com/aws/aws-sdk-go/aws" 14 | "github.com/aws/aws-sdk-go/service/s3" 15 | "github.com/google/uuid" 16 | "github.com/gorilla/mux" 17 | "github.com/wpnpeiris/nats-s3/internal/client" 18 | "github.com/wpnpeiris/nats-s3/internal/streams" 19 | ) 20 | 21 | const ( 22 | maxUploadsList = 10000 // Max number of uploads in a listUploadsResponse. 23 | maxPartsList = 10000 // Max number of parts in a listPartsResponse. 24 | 25 | // S3-compatible size limits 26 | maxPartSize = 5 * 1024 * 1024 * 1024 // 5GB per part (S3 multipart limit) 27 | maxXMLBodySize = 1 * 1024 * 1024 // 1MB for XML request bodies 28 | ) 29 | 30 | // InitiateMultipartUpload creates a new multipart upload session for the given 31 | // bucket and object key, returning UploadId/Bucket/Key in S3-compatible XML. 32 | func (s *S3Gateway) InitiateMultipartUpload(w http.ResponseWriter, r *http.Request) { 33 | uploadID := uuid.New().String() 34 | bucket := mux.Vars(r)["bucket"] 35 | key := mux.Vars(r)["key"] 36 | 37 | err := s.multiPartStore.InitMultipartUpload(r.Context(), bucket, key, uploadID) 38 | if err != nil { 39 | model.WriteErrorResponse(w, r, model.ErrInternalError) 40 | return 41 | } 42 | 43 | response := model.InitiateMultipartUploadResult{ 44 | CreateMultipartUploadOutput: s3.CreateMultipartUploadOutput{ 45 | Bucket: aws.String(bucket), 46 | Key: objectKey(key), 47 | UploadId: aws.String(uploadID), 48 | }, 49 | } 50 | model.WriteXMLResponse(w, r, http.StatusOK, response) 51 | } 52 | 53 | // objectKey normalizes a mux-extracted key for response payloads by trimming 54 | // a possible leading slash. AWS S3 omits a leading '/' in the Key field. 55 | func objectKey(key string) *string { 56 | if strings.HasPrefix(key, "/") { 57 | t := (key)[1:] 58 | return &t 59 | } 60 | return &key 61 | } 62 | 63 | // UploadPart receives an object part for an existing multipart upload and 64 | // responds with the part's ETag in the response headers. The ETag value is 65 | // quoted to match S3 behavior. 66 | func (s *S3Gateway) UploadPart(w http.ResponseWriter, r *http.Request) { 67 | bucket := mux.Vars(r)["bucket"] 68 | key := mux.Vars(r)["key"] 69 | uploadID := r.URL.Query().Get("uploadId") 70 | 71 | if uploadID == "" { 72 | model.WriteErrorResponse(w, r, model.ErrNoSuchUpload) 73 | return 74 | } 75 | 76 | pnStr := r.URL.Query().Get("partNumber") 77 | partNum, _ := strconv.Atoi(pnStr) 78 | if partNum < 1 || partNum > maxUploadsList { 79 | model.WriteErrorResponse(w, r, model.ErrInvalidPart) 80 | return 81 | } 82 | 83 | // Validate Content-Length 84 | if r.ContentLength < 0 { 85 | model.WriteErrorResponse(w, r, model.ErrMissingFields) 86 | return 87 | } 88 | if r.ContentLength > maxPartSize { 89 | model.WriteErrorResponse(w, r, model.ErrEntityTooLarge) 90 | return 91 | } 92 | 93 | // Use LimitReader as defense-in-depth to ensure we never read more than maxPartSize 94 | // Wrap it in a limitedReadCloser to satisfy io.ReadCloser interface 95 | limitedBody := &limitedReadCloser{ 96 | Reader: io.LimitReader(r.Body, maxPartSize+1), 97 | Closer: r.Body, 98 | } 99 | 100 | etag, err := s.multiPartStore.UploadPart(r.Context(), bucket, key, uploadID, partNum, limitedBody) 101 | if err != nil { 102 | if errors.Is(err, client.ErrUploadNotFound) || errors.Is(err, client.ErrUploadCompleted) { 103 | model.WriteErrorResponse(w, r, model.ErrNoSuchUpload) 104 | return 105 | } 106 | model.WriteErrorResponse(w, r, model.ErrInternalError) 107 | return 108 | } 109 | 110 | model.SetEtag(w, etag) 111 | model.WriteEmptyResponse(w, r, http.StatusOK) 112 | } 113 | 114 | // StreamUploadPart handles SigV4 streaming-chunked multipart uploads. 115 | func (s *S3Gateway) StreamUploadPart(w http.ResponseWriter, r *http.Request) { 116 | bucket := mux.Vars(r)["bucket"] 117 | key := mux.Vars(r)["key"] 118 | uploadID := r.URL.Query().Get("uploadId") 119 | 120 | if uploadID == "" { 121 | model.WriteErrorResponse(w, r, model.ErrNoSuchUpload) 122 | return 123 | } 124 | 125 | pnStr := r.URL.Query().Get("partNumber") 126 | partNum, _ := strconv.Atoi(pnStr) 127 | if partNum < 1 || partNum > maxUploadsList { 128 | model.WriteErrorResponse(w, r, model.ErrInvalidPart) 129 | return 130 | } 131 | 132 | if !streams.IsSigV4StreamingPayload(r) { 133 | model.WriteErrorResponse(w, r, model.ErrInvalidRequest) 134 | return 135 | } 136 | 137 | if dl, err := strconv.ParseInt(r.Header.Get("x-amz-decoded-content-length"), 10, 64); err == nil && dl > maxPartSize { 138 | model.WriteErrorResponse(w, r, model.ErrEntityTooLarge) 139 | return 140 | } 141 | 142 | bodyReader := streams.NewLimitedSigV4StreamReader(r.Body, maxPartSize+1) 143 | 144 | etag, err := s.multiPartStore.UploadPart(r.Context(), bucket, key, uploadID, partNum, bodyReader) 145 | if err != nil { 146 | if errors.Is(err, client.ErrUploadNotFound) || errors.Is(err, client.ErrUploadCompleted) { 147 | model.WriteErrorResponse(w, r, model.ErrNoSuchUpload) 148 | return 149 | } 150 | model.WriteErrorResponse(w, r, model.ErrInternalError) 151 | return 152 | } 153 | 154 | model.SetEtag(w, etag) 155 | model.WriteEmptyResponse(w, r, http.StatusOK) 156 | } 157 | 158 | // CompleteMultipartUpload finalizes a multipart upload by parsing the client 159 | // provided part list, delegating composition to the storage client, and 160 | // returning an S3-compatible XML response with the final ETag. 161 | func (s *S3Gateway) CompleteMultipartUpload(w http.ResponseWriter, r *http.Request) { 162 | bucket := mux.Vars(r)["bucket"] 163 | key := mux.Vars(r)["key"] 164 | uploadID := r.URL.Query().Get("uploadId") 165 | 166 | if uploadID == "" { 167 | model.WriteErrorResponse(w, r, model.ErrNoSuchUpload) 168 | return 169 | } 170 | 171 | if r.ContentLength < 0 { 172 | model.WriteErrorResponse(w, r, model.ErrMissingFields) 173 | return 174 | } 175 | if r.ContentLength > maxXMLBodySize { 176 | model.WriteErrorResponse(w, r, model.ErrEntityTooLarge) 177 | return 178 | } 179 | 180 | parts := &model.CompleteMultipartUpload{} 181 | xmlSize := r.ContentLength 182 | if xmlSize > maxXMLBodySize { 183 | xmlSize = maxXMLBodySize 184 | } 185 | if err := xmlDecoder(r.Body, parts, xmlSize); err != nil { 186 | model.WriteErrorResponse(w, r, model.ErrMalformedXML) 187 | return 188 | } 189 | 190 | sortedPartNumbers := parsePartNumbers(parts) 191 | etag, err := s.multiPartStore.CompleteMultipartUpload(r.Context(), bucket, key, uploadID, sortedPartNumbers) 192 | if err != nil { 193 | model.WriteErrorResponse(w, r, model.ErrInternalError) 194 | return 195 | } 196 | 197 | response := model.CompleteMultipartUploadResult{ 198 | Bucket: aws.String(bucket), 199 | ETag: aws.String(etag), 200 | Key: objectKey(key), 201 | } 202 | model.WriteXMLResponse(w, r, http.StatusOK, response) 203 | } 204 | 205 | // AbortMultipartUpload aborts a multipart upload, removing uploaded parts and 206 | // session metadata, and responds with 204 No Content on success. 207 | func (s *S3Gateway) AbortMultipartUpload(w http.ResponseWriter, r *http.Request) { 208 | bucket := mux.Vars(r)["bucket"] 209 | key := mux.Vars(r)["key"] 210 | uploadID := r.URL.Query().Get("uploadId") 211 | 212 | if uploadID == "" { 213 | model.WriteErrorResponse(w, r, model.ErrNoSuchUpload) 214 | return 215 | } 216 | err := s.multiPartStore.AbortMultipartUpload(r.Context(), bucket, key, uploadID) 217 | if err != nil { 218 | model.WriteErrorResponse(w, r, model.ErrInternalError) 219 | return 220 | } 221 | 222 | model.WriteEmptyResponse(w, r, http.StatusNoContent) 223 | } 224 | 225 | // ListParts lists uploaded parts for a multipart upload. Supports pagination via 226 | // part-number-marker and max-parts query parameters. 227 | func (s *S3Gateway) ListParts(w http.ResponseWriter, r *http.Request) { 228 | bucket := mux.Vars(r)["bucket"] 229 | key := mux.Vars(r)["key"] 230 | uploadID := r.URL.Query().Get("uploadId") 231 | partNumberMarker, _ := strconv.Atoi(r.URL.Query().Get("part-number-marker")) 232 | 233 | if uploadID == "" { 234 | model.WriteErrorResponse(w, r, model.ErrNoSuchUpload) 235 | return 236 | } 237 | if partNumberMarker < 0 { 238 | model.WriteErrorResponse(w, r, model.ErrInvalidPartNumberMarker) 239 | return 240 | } 241 | 242 | var maxParts int 243 | if r.URL.Query().Get("max-parts") != "" { 244 | maxParts, _ = strconv.Atoi(r.URL.Query().Get("max-parts")) 245 | } else { 246 | maxParts = maxPartsList 247 | } 248 | if maxParts < 0 { 249 | model.WriteErrorResponse(w, r, model.ErrInvalidMaxParts) 250 | return 251 | } 252 | 253 | meta, err := s.multiPartStore.ListParts(r.Context(), bucket, key, uploadID) 254 | if err != nil { 255 | if errors.Is(err, client.ErrUploadNotFound) || errors.Is(err, client.ErrUploadCompleted) { 256 | model.WriteErrorResponse(w, r, model.ErrNoSuchUpload) 257 | return 258 | } 259 | model.WriteErrorResponse(w, r, model.ErrInternalError) 260 | return 261 | } 262 | response := model.ListPartsResult{ 263 | Bucket: aws.String(meta.Bucket), 264 | Key: aws.String(meta.Key), 265 | UploadId: aws.String(uploadID), 266 | MaxParts: aws.Int64(int64(maxParts)), 267 | PartNumberMarker: aws.Int64(int64(partNumberMarker)), 268 | StorageClass: aws.String("STANDARD"), 269 | } 270 | 271 | // Collect and sort part numbers to ensure deterministic ordering. 272 | partNumbers := make([]int, 0, len(meta.Parts)) 273 | for pn := range meta.Parts { 274 | partNumbers = append(partNumbers, pn) 275 | } 276 | sort.Ints(partNumbers) 277 | 278 | // Apply part-number-marker: start strictly after the marker value. 279 | start := 0 280 | if partNumberMarker > 0 { 281 | for i, pn := range partNumbers { 282 | if pn > partNumberMarker { 283 | start = i 284 | break 285 | } 286 | start = len(partNumbers) // if all <= marker, this will skip all 287 | } 288 | } 289 | 290 | // Limit by max-parts. 291 | end := start + maxParts 292 | if end > len(partNumbers) { 293 | end = len(partNumbers) 294 | } 295 | 296 | // Slice and build response parts. 297 | if start < end { 298 | for _, pn := range partNumbers[start:end] { 299 | p := meta.Parts[pn] 300 | response.Part = append(response.Part, &s3.Part{ 301 | PartNumber: aws.Int64(int64(p.Number)), 302 | Size: aws.Int64(int64(p.Size)), 303 | ETag: aws.String(p.ETag), 304 | }) 305 | response.NextPartNumberMarker = aws.Int64(int64(p.Number)) 306 | } 307 | } else { 308 | // No parts in this page; keep marker as provided. 309 | response.NextPartNumberMarker = aws.Int64(int64(partNumberMarker)) 310 | } 311 | 312 | // Indicate if there are more parts beyond this page. 313 | isTruncated := end < len(partNumbers) 314 | response.IsTruncated = aws.Bool(isTruncated) 315 | 316 | model.WriteXMLResponse(w, r, http.StatusOK, response) 317 | } 318 | 319 | func parsePartNumbers(parts *model.CompleteMultipartUpload) []int { 320 | if parts == nil || len(parts.Parts) == 0 { 321 | return nil 322 | } 323 | 324 | nums := make([]int, 0, len(parts.Parts)) 325 | for _, p := range parts.Parts { 326 | nums = append(nums, p.PartNumber) 327 | } 328 | sort.Ints(nums) 329 | return nums 330 | } 331 | 332 | func xmlDecoder(body io.Reader, v interface{}, size int64) error { 333 | var reader io.Reader 334 | if size > 0 { 335 | reader = io.LimitReader(body, size) 336 | } else { 337 | reader = body 338 | } 339 | d := xml.NewDecoder(reader) 340 | d.CharsetReader = func(label string, input io.Reader) (io.Reader, error) { 341 | return input, nil 342 | } 343 | return d.Decode(v) 344 | } 345 | 346 | // limitedReadCloser wraps an io.Reader with io.Closer to satisfy io.ReadCloser interface 347 | type limitedReadCloser struct { 348 | Reader io.Reader 349 | Closer io.Closer 350 | } 351 | 352 | func (lrc *limitedReadCloser) Read(p []byte) (n int, err error) { 353 | return lrc.Reader.Read(p) 354 | } 355 | 356 | func (lrc *limitedReadCloser) Close() error { 357 | if lrc.Closer != nil { 358 | return lrc.Closer.Close() 359 | } 360 | return nil 361 | } 362 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # NATS-S3 Gateway 2 | 3 | [![Build Status](https://github.com/wpnpeiris/nats-s3/actions/workflows/release.yml/badge.svg)](https://github.com/wpnpeiris/nats-s3/actions/workflows/release.yml) 4 | [![Conformance](https://github.com/wpnpeiris/nats-s3/actions/workflows/conformance.yml/badge.svg)](https://github.com/wpnpeiris/nats-s3/actions/workflows/conformance.yml) 5 | [![Coverage](https://github.com/wpnpeiris/nats-s3/actions/workflows/coverage.yml/badge.svg)](https://github.com/wpnpeiris/nats-s3/actions/workflows/coverage.yml) 6 | [![Go Report Card](https://goreportcard.com/badge/github.com/wpnpeiris/nats-s3)](https://goreportcard.com/report/github.com/wpnpeiris/nats-s3) 7 | [![License](https://img.shields.io/github/license/wpnpeiris/nats-s3)](https://github.com/wpnpeiris/nats-s3/blob/main/LICENSE) 8 | [![GitHub Stars](https://img.shields.io/github/stars/wpnpeiris/nats-s3?style=social)](https://github.com/wpnpeiris/nats-s3/stargazers) 9 | [![Docker Pulls](https://img.shields.io/docker/pulls/wpnpeiris/nats-s3)](https://hub.docker.com/r/wpnpeiris/nats-s3) 10 | 11 | > **S3-compatible object storage powered by NATS JetStream** - Lightweight, fast, and cloud-native. 12 | 13 | ## About NATS 14 | 15 | NATS is a high‑performance distributed messaging system with pub/sub at its core and a 16 | built‑in persistence layer (JetStream) enabling Streaming, Key‑Value, and Object Store. 17 | It includes authentication/authorization, multi‑tenancy, and rich deployment topologies. 18 | 19 | ``` 20 | +-----------------------+ +-----------------------+ 21 | | Clients | | Clients | 22 | | (Publish / Subscribe)| | (Apps/Services) | 23 | +-----------+-----------+ +-----------+-----------+ 24 | | | 25 | v v 26 | +-----+-----------------------------------+-----+ 27 | | NATS Cluster | 28 | | +-----------+ +-----------+ +-----------+ | 29 | | | Server | | Server | | Server | | 30 | | +-----------+ +-----------+ +-----------+ | 31 | +------------------------------------------------+ 32 | ``` 33 | 34 | ## NATS‑S3 35 | Modern object stores like [MinIO](https://github.com/minio/minio), 36 | [SeaweedFS](https://github.com/seaweedfs/seaweedfs), [JuiceFS](https://github.com/juicedata/juicefs), 37 | and [AIStore](https://github.com/NVIDIA/aistore) expose S3‑compatible HTTP APIs for simple integration. 38 | NATS‑S3 follows this approach to provide S3 access to NATS JetStream Object Store. 39 | 40 | ``` 41 | NATS-S3 Gateway 42 | 43 | +-------------------+ HTTP (S3 API) +--------------------+ 44 | | S3 Clients +------------------------->+ nats-s3 | 45 | | (AWS CLI/SDKs) | | HTTP Gateway | 46 | +-------------------+ +----------+---------+ 47 | | 48 | | 49 | +--------------------+ 50 | | NATS Cluster | 51 | | JetStream Object | 52 | | Store | 53 | +--------------------+ 54 | ``` 55 | 56 | 57 | ## Quick Start 58 | Follow these steps to spin up NATS-S3, integrated with NATS server, and use AWS CLI to work with a bucket and objects. 59 | 60 | 1) Prerequisites 61 | - Docker (for NATS) 62 | - AWS CLI (v2 recommended) 63 | 64 | 2) Start NATS (with JetStream) via Docker 65 | ```bash 66 | docker run -p 4222:4222 -ti nats:latest -js 67 | ``` 68 | 69 | 3) Create a credentials file 70 | ```bash 71 | cat > credentials.json < file.txt 113 | aws s3 cp file.txt s3://bucket1/hello.txt --endpoint-url=http://localhost:5222 114 | ``` 115 | 116 | 9) List bucket contents 117 | ```bash 118 | aws s3 ls s3://bucket1 --endpoint-url=http://localhost:5222 119 | ``` 120 | 121 | 10) Download the object 122 | ```bash 123 | aws s3 cp s3://bucket1/hello.txt ./hello_copy.txt --endpoint-url=http://localhost:5222 124 | ``` 125 | 126 | 11) Delete the object 127 | ```bash 128 | aws s3 rm s3://bucket1/hello.txt --endpoint-url=http://localhost:5222 129 | ``` 130 | 131 | Optional: delete the bucket 132 | ```bash 133 | aws s3 rb s3://bucket1 --endpoint-url=http://localhost:5222 134 | ``` 135 | 136 | ## Build & Run 137 | - Prereqs: Go 1.22+, a running NATS server (with JetStream enabled for Object Store). 138 | 139 | Build 140 | ```shell 141 | make build 142 | ``` 143 | 144 | Run 145 | ```shell 146 | ./nats-s3 \ 147 | --listen 0.0.0.0:5222 \ 148 | --natsServers nats://127.0.0.1:4222 \ 149 | --s3.credentials credentials.json 150 | ``` 151 | 152 | Flags 153 | - `--listen`: HTTP bind address for the S3 gateway (default `0.0.0.0:5222`). 154 | - `--natsServers`: Comma‑separated NATS server URLs (default from `nats.DefaultURL`). 155 | - `--natsUser`, `--natsPassword`: Optional NATS credentials for connecting to NATS server. 156 | - `--natsToken`: NATS server token for token-based authentication. 157 | - `--natsNKeyFile`: NATS server NKey seed file path for NKey authentication. 158 | - `--natsCredsFile`: NATS server credentials file path for JWT authentication. 159 | - `--replicas`: Number of NATS replicas for each jetstream element (default 1). 160 | - `--s3.credentials`: Path to S3 credentials file (JSON format, required). 161 | - `--log.format`: Log output format: logfmt or json (default logfmt). 162 | - `--log.level`: Log level: debug, info, warn, error (default info). 163 | - `--http.read-timeout`: HTTP server read timeout (default 15m). 164 | - `--http.write-timeout`: HTTP server write timeout (default 15m). 165 | - `--http.idle-timeout`: HTTP server idle timeout (default 120s). 166 | - `--http.read-header-timeout`: HTTP server read header timeout (default 30s). 167 | 168 | ### Coverage 169 | Generate coverage profile and HTML report locally: 170 | ```bash 171 | make coverage # writes coverage.out 172 | make coverage-report # prints total coverage summary 173 | make coverage-html # writes coverage.html 174 | ``` 175 | 176 | In CI, coverage is generated and uploaded as an artifact. 177 | 178 | ### Releasing (GoReleaser) 179 | Tagged releases are built and published via GoReleaser. 180 | 181 | - Create and push a tag like `v0.2.0`: 182 | ```bash 183 | git tag -a v0.2.0 -m "v0.2.0" 184 | git push origin v0.2.0 185 | ``` 186 | 187 | CI will build multi‑platform archives and attach them to the GitHub Release. 188 | Local dry run: 189 | ```bash 190 | goreleaser release --snapshot --clean 191 | ``` 192 | 193 | ## Docker 194 | Build the image 195 | ```bash 196 | docker build -t nats-s3:dev . 197 | ``` 198 | 199 | Run with a locally running NATS on the same Docker network 200 | ```bash 201 | 202 | # Start NATS (JetStream) on the host network 203 | docker run --network host -p 4222:4222 \ 204 | nats:latest -js 205 | 206 | # Start nats-s3 and expose port 5222 207 | # Note: Mount credentials.json file into the container 208 | docker run --network host -p 5222:5222 \ 209 | -v $(pwd)/credentials.json:/credentials.json \ 210 | wpnpeiris/nats-s3:latest \ 211 | --listen 0.0.0.0:5222 \ 212 | --natsServers nats://127.0.0.1:4222 \ 213 | --s3.credentials /credentials.json 214 | ``` 215 | 216 | Test with AWS CLI 217 | ```bash 218 | export AWS_ACCESS_KEY_ID=my-access-key 219 | export AWS_SECRET_ACCESS_KEY=my-secret-key 220 | export AWS_DEFAULT_REGION=us-east-1 221 | 222 | aws s3 ls --endpoint-url=http://localhost:5222 223 | ``` 224 | 225 | ### Use prebuilt image from GHCR 226 | Pull and run the published container image from GitHub Container Registry. 227 | 228 | Set a version tag (example: v0.0.2) 229 | ```bash 230 | IMAGE_TAG=v0.0.2 231 | docker pull ghcr.io/wpnpeiris/nats-s3:${IMAGE_TAG} 232 | ``` 233 | 234 | Run against a host‑running NATS (portable across OSes) 235 | ```bash 236 | # Start NATS locally (JetStream enabled) 237 | docker run --network host -p 4222:4222 \ 238 | nats:latest -js 239 | 240 | # Start nats-s3 and point to the host via host-gateway 241 | docker run --network host -p 5222:5222 \ 242 | -v $(pwd)/credentials.json:/credentials.json \ 243 | ghcr.io/wpnpeiris/nats-s3:${IMAGE_TAG} \ 244 | --listen 0.0.0.0:5222 \ 245 | --natsServers nats://127.0.0.1:4222 \ 246 | --s3.credentials /credentials.json 247 | ``` 248 | 249 | Test with AWS CLI 250 | ```bash 251 | export AWS_ACCESS_KEY_ID=my-access-key 252 | export AWS_SECRET_ACCESS_KEY=my-secret-key 253 | export AWS_DEFAULT_REGION=us-east-1 254 | 255 | aws s3 ls --endpoint-url=http://localhost:5222 256 | ``` 257 | 258 | ## Authentication 259 | nats-s3 uses AWS Signature Version 4 (SigV4) for S3 API authentication. The gateway 260 | loads credentials from a JSON file and supports multiple users. 261 | 262 | ### Credential Store 263 | 264 | The credential store is a JSON file containing one or more AWS-style access/secret key pairs: 265 | 266 | ```json 267 | { 268 | "credentials": [ 269 | { 270 | "accessKey": "AKIAIOSFODNN7EXAMPLE", 271 | "secretKey": "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" 272 | }, 273 | { 274 | "accessKey": "AKIAI44QH8DHBEXAMPLE", 275 | "secretKey": "je7MtGbClwBF/2Zp9Utk/h3yCo8nvbEXAMPLEKEY" 276 | } 277 | ] 278 | } 279 | ``` 280 | 281 | Requirements: 282 | - Access keys must be at least 3 characters 283 | - Secret keys must be at least 8 characters 284 | - Access keys cannot contain reserved characters (`=` or `,`) 285 | - Each access key must be unique 286 | 287 | See `credentials.example.json` for a complete example. 288 | 289 | ### How it works 290 | 1. Start the gateway with a credentials file: 291 | ```shell 292 | ./nats-s3 \ 293 | --listen 0.0.0.0:5222 \ 294 | --natsServers nats://127.0.0.1:4222 \ 295 | --s3.credentials credentials.json 296 | ``` 297 | 298 | 2. Clients sign S3 requests using SigV4 with any valid credential from the file: 299 | ```shell 300 | export AWS_ACCESS_KEY_ID=AKIAIOSFODNN7EXAMPLE 301 | export AWS_SECRET_ACCESS_KEY=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY 302 | export AWS_DEFAULT_REGION=us-east-1 303 | 304 | aws s3 ls --endpoint-url=http://localhost:5222 305 | ``` 306 | 307 | 3. The gateway verifies the SigV4 signature and, on success, processes the request. 308 | 309 | ### Notes 310 | - Both header-based SigV4 and presigned URLs (query-string SigV4) are supported. 311 | - Time skew of ±5 minutes is allowed; presigned URLs honor X-Amz-Expires. 312 | - Multiple users can share the same gateway, each with their own credentials. 313 | - NATS server authentication (`--natsUser`/`--natsPassword`) is independent from S3 credentials. 314 | 315 | ## Roadmap & Contributing 316 | - See [ROADMAP.md](ROADMAP.md) for planned milestones. 317 | - Contributions welcome! See CONTRIBUTING.md and CODE_OF_CONDUCT.md. 318 | 319 | 320 | ## Contributing 321 | 322 | - See [CONTRIBUTING.md](CONTRIBUTING.md) for how to get started. 323 | 324 | - Please follow our simple [CODE_OF_CONDUCT.md](CODE_OF_CONDUCT.md). 325 | 326 | ## Conformance 327 | 328 | NATS-S3 achieves **100% pass rate** on core S3 API operations with comprehensive conformance testing. See [CONFORMANCE.md](CONFORMANCE.md) for: 329 | - Full test coverage details (25+ tests) 330 | - S3 API feature matrix 331 | - Instructions for running tests locally 332 | -------------------------------------------------------------------------------- /internal/s3api/s3_multipart_handlers_test.go: -------------------------------------------------------------------------------- 1 | package s3api 2 | 3 | import ( 4 | "bytes" 5 | "encoding/xml" 6 | "fmt" 7 | "net/http/httptest" 8 | "testing" 9 | 10 | "github.com/wpnpeiris/nats-s3/internal/logging" 11 | "github.com/wpnpeiris/nats-s3/internal/testutil" 12 | 13 | "github.com/google/uuid" 14 | "github.com/gorilla/mux" 15 | ) 16 | 17 | // Minimal shape to parse InitiateMultipartUpload XML response 18 | type initResp struct { 19 | UploadId string `xml:"UploadId"` 20 | Bucket string `xml:"Bucket"` 21 | Key string `xml:"Key"` 22 | } 23 | 24 | func TestInitiateMultipartUpload_SucceedsAndPersistsSession(t *testing.T) { 25 | s := testutil.StartJSServer(t) 26 | defer s.Shutdown() 27 | 28 | logger := logging.NewLogger(logging.Config{Level: "debug"}) 29 | gw, err := NewS3Gateway(logger, s.ClientURL(), 1, nil, nil) 30 | if err != nil { 31 | t.Fatalf("failed to create S3 gateway: %v", err) 32 | } 33 | 34 | r := mux.NewRouter() 35 | gw.RegisterRoutes(r) 36 | 37 | bucket := "mpbucket" 38 | key := "dir/parted.txt" 39 | 40 | req := httptest.NewRequest("POST", "/"+bucket+"/"+key+"?uploads=", nil) 41 | rr := httptest.NewRecorder() 42 | r.ServeHTTP(rr, req) 43 | 44 | if rr.Code != 200 { 45 | t.Fatalf("unexpected status: %d body=%s", rr.Code, rr.Body.String()) 46 | } 47 | 48 | var parsed initResp 49 | if err := xml.Unmarshal(rr.Body.Bytes(), &parsed); err != nil { 50 | t.Fatalf("unmarshal xml failed: %v\nxml=%s", err, rr.Body.String()) 51 | } 52 | 53 | if parsed.Bucket != bucket || parsed.Key != key { 54 | t.Fatalf("unexpected response bucket/key: %q/%q", parsed.Bucket, parsed.Key) 55 | } 56 | if parsed.UploadId == "" { 57 | t.Fatalf("expected non-empty UploadId") 58 | } 59 | if _, err := uuid.Parse(parsed.UploadId); err != nil { 60 | t.Fatalf("UploadId is not a valid UUID: %v", err) 61 | } 62 | } 63 | 64 | func TestListParts_PaginatesDeterministically(t *testing.T) { 65 | s := testutil.StartJSServer(t) 66 | defer s.Shutdown() 67 | 68 | logger := logging.NewLogger(logging.Config{Level: "debug"}) 69 | gw, err := NewS3Gateway(logger, s.ClientURL(), 1, nil, nil) 70 | if err != nil { 71 | t.Fatalf("failed to create S3 gateway: %v", err) 72 | } 73 | r := mux.NewRouter() 74 | gw.RegisterRoutes(r) 75 | 76 | bucket := "lpbucket" 77 | key := "dir/large.txt" 78 | 79 | // Initiate multipart upload 80 | req := httptest.NewRequest("POST", "/"+bucket+"/"+key+"?uploads=", nil) 81 | rr := httptest.NewRecorder() 82 | r.ServeHTTP(rr, req) 83 | if rr.Code != 200 { 84 | t.Fatalf("init status=%d body=%s", rr.Code, rr.Body.String()) 85 | } 86 | var ir initResp 87 | if err := xml.Unmarshal(rr.Body.Bytes(), &ir); err != nil { 88 | t.Fatalf("unmarshal init xml failed: %v\nxml=%s", err, rr.Body.String()) 89 | } 90 | 91 | // Upload 7 parts with small payloads 92 | for i := 1; i <= 7; i++ { 93 | body := bytes.NewBufferString(fmt.Sprintf("part-%d", i)) 94 | upr := httptest.NewRequest("PUT", fmt.Sprintf("/%s/%s?uploadId=%s&partNumber=%d", bucket, key, ir.UploadId, i), body) 95 | upr.Header.Set("Content-Length", fmt.Sprintf("%d", body.Len())) 96 | uprr := httptest.NewRecorder() 97 | r.ServeHTTP(uprr, upr) 98 | if uprr.Code != 200 { 99 | t.Fatalf("upload part %d failed: status=%d body=%s", i, uprr.Code, uprr.Body.String()) 100 | } 101 | } 102 | 103 | // Helper to call ListParts and parse minimal fields 104 | type listResp struct { 105 | IsTruncated bool `xml:"IsTruncated"` 106 | NextPartNumberMarker int `xml:"NextPartNumberMarker"` 107 | PartNumbers []int `xml:"Part>PartNumber"` 108 | } 109 | 110 | call := func(marker, max int) listResp { 111 | url := fmt.Sprintf("/%s/%s?uploadId=%s&part-number-marker=%d&max-parts=%d", bucket, key, ir.UploadId, marker, max) 112 | lr := httptest.NewRequest("GET", url, nil) 113 | lrr := httptest.NewRecorder() 114 | r.ServeHTTP(lrr, lr) 115 | if lrr.Code != 200 { 116 | t.Fatalf("list parts failed: status=%d body=%s", lrr.Code, lrr.Body.String()) 117 | } 118 | var out listResp 119 | if err := xml.Unmarshal(lrr.Body.Bytes(), &out); err != nil { 120 | t.Fatalf("unmarshal list xml failed: %v\nxml=%s", err, lrr.Body.String()) 121 | } 122 | return out 123 | } 124 | 125 | // Page 1: expect 1,2,3 126 | p1 := call(0, 3) 127 | if len(p1.PartNumbers) != 3 || p1.PartNumbers[0] != 1 || p1.PartNumbers[1] != 2 || p1.PartNumbers[2] != 3 { 128 | t.Fatalf("page1 parts unexpected: %+v", p1.PartNumbers) 129 | } 130 | if !p1.IsTruncated || p1.NextPartNumberMarker != 3 { 131 | t.Fatalf("page1 truncation/marker unexpected: truncated=%v next=%d", p1.IsTruncated, p1.NextPartNumberMarker) 132 | } 133 | 134 | // Page 2: expect 4,5,6 135 | p2 := call(p1.NextPartNumberMarker, 3) 136 | if len(p2.PartNumbers) != 3 || p2.PartNumbers[0] != 4 || p2.PartNumbers[1] != 5 || p2.PartNumbers[2] != 6 { 137 | t.Fatalf("page2 parts unexpected: %+v", p2.PartNumbers) 138 | } 139 | if !p2.IsTruncated || p2.NextPartNumberMarker != 6 { 140 | t.Fatalf("page2 truncation/marker unexpected: truncated=%v next=%d", p2.IsTruncated, p2.NextPartNumberMarker) 141 | } 142 | 143 | // Page 3: expect 7 144 | p3 := call(p2.NextPartNumberMarker, 3) 145 | if len(p3.PartNumbers) != 1 || p3.PartNumbers[0] != 7 { 146 | t.Fatalf("page3 parts unexpected: %+v", p3.PartNumbers) 147 | } 148 | if p3.IsTruncated || p3.NextPartNumberMarker != 7 { 149 | t.Fatalf("page3 truncation/marker unexpected: truncated=%v next=%d", p3.IsTruncated, p3.NextPartNumberMarker) 150 | } 151 | } 152 | 153 | func TestListParts_NoParts(t *testing.T) { 154 | s := testutil.StartJSServer(t) 155 | defer s.Shutdown() 156 | 157 | logger := logging.NewLogger(logging.Config{Level: "debug"}) 158 | gw, err := NewS3Gateway(logger, s.ClientURL(), 1, nil, nil) 159 | if err != nil { 160 | t.Fatalf("failed to create S3 gateway: %v", err) 161 | } 162 | r := mux.NewRouter() 163 | gw.RegisterRoutes(r) 164 | 165 | bucket := "nopartsbucket" 166 | key := "empty/object" 167 | 168 | // Initiate but do not upload any parts 169 | req := httptest.NewRequest("POST", "/"+bucket+"/"+key+"?uploads=", nil) 170 | rr := httptest.NewRecorder() 171 | r.ServeHTTP(rr, req) 172 | if rr.Code != 200 { 173 | t.Fatalf("init status=%d body=%s", rr.Code, rr.Body.String()) 174 | } 175 | var ir initResp 176 | if err := xml.Unmarshal(rr.Body.Bytes(), &ir); err != nil { 177 | t.Fatalf("unmarshal init xml failed: %v\nxml=%s", err, rr.Body.String()) 178 | } 179 | 180 | // List parts should be empty, not truncated, marker 0 181 | url := fmt.Sprintf("/%s/%s?uploadId=%s", bucket, key, ir.UploadId) 182 | lr := httptest.NewRequest("GET", url, nil) 183 | lrr := httptest.NewRecorder() 184 | r.ServeHTTP(lrr, lr) 185 | if lrr.Code != 200 { 186 | t.Fatalf("list parts failed: status=%d body=%s", lrr.Code, lrr.Body.String()) 187 | } 188 | var out struct { 189 | IsTruncated bool `xml:"IsTruncated"` 190 | NextPartNumberMarker int `xml:"NextPartNumberMarker"` 191 | PartNumbers []int `xml:"Part>PartNumber"` 192 | } 193 | if err := xml.Unmarshal(lrr.Body.Bytes(), &out); err != nil { 194 | t.Fatalf("unmarshal list xml failed: %v\nxml=%s", err, lrr.Body.String()) 195 | } 196 | if out.IsTruncated || out.NextPartNumberMarker != 0 || len(out.PartNumbers) != 0 { 197 | t.Fatalf("unexpected empty list response: %+v", out) 198 | } 199 | } 200 | 201 | func TestListParts_MarkerBeyondLast(t *testing.T) { 202 | s := testutil.StartJSServer(t) 203 | defer s.Shutdown() 204 | 205 | logger := logging.NewLogger(logging.Config{Level: "debug"}) 206 | gw, err := NewS3Gateway(logger, s.ClientURL(), 1, nil, nil) 207 | if err != nil { 208 | t.Fatalf("failed to create S3 gateway: %v", err) 209 | } 210 | r := mux.NewRouter() 211 | gw.RegisterRoutes(r) 212 | 213 | bucket := "markerbeyond" 214 | key := "obj/key" 215 | 216 | // Initiate multipart upload 217 | req := httptest.NewRequest("POST", "/"+bucket+"/"+key+"?uploads=", nil) 218 | rr := httptest.NewRecorder() 219 | r.ServeHTTP(rr, req) 220 | if rr.Code != 200 { 221 | t.Fatalf("init status=%d body=%s", rr.Code, rr.Body.String()) 222 | } 223 | var ir initResp 224 | if err := xml.Unmarshal(rr.Body.Bytes(), &ir); err != nil { 225 | t.Fatalf("unmarshal init xml failed: %v\nxml=%s", err, rr.Body.String()) 226 | } 227 | 228 | // Upload 3 parts 229 | for i := 1; i <= 3; i++ { 230 | body := bytes.NewBufferString(fmt.Sprintf("p-%d", i)) 231 | upr := httptest.NewRequest("PUT", fmt.Sprintf("/%s/%s?uploadId=%s&partNumber=%d", bucket, key, ir.UploadId, i), body) 232 | upr.Header.Set("Content-Length", fmt.Sprintf("%d", body.Len())) 233 | uprr := httptest.NewRecorder() 234 | r.ServeHTTP(uprr, upr) 235 | if uprr.Code != 200 { 236 | t.Fatalf("upload part %d failed: status=%d body=%s", i, uprr.Code, uprr.Body.String()) 237 | } 238 | } 239 | 240 | // Use a marker beyond the last part 241 | url := fmt.Sprintf("/%s/%s?uploadId=%s&part-number-marker=%d&max-parts=%d", bucket, key, ir.UploadId, 10, 2) 242 | lr := httptest.NewRequest("GET", url, nil) 243 | lrr := httptest.NewRecorder() 244 | r.ServeHTTP(lrr, lr) 245 | if lrr.Code != 200 { 246 | t.Fatalf("list parts failed: status=%d body=%s", lrr.Code, lrr.Body.String()) 247 | } 248 | var out struct { 249 | IsTruncated bool `xml:"IsTruncated"` 250 | NextPartNumberMarker int `xml:"NextPartNumberMarker"` 251 | PartNumbers []int `xml:"Part>PartNumber"` 252 | } 253 | if err := xml.Unmarshal(lrr.Body.Bytes(), &out); err != nil { 254 | t.Fatalf("unmarshal list xml failed: %v\nxml=%s", err, lrr.Body.String()) 255 | } 256 | if out.IsTruncated || out.NextPartNumberMarker != 10 || len(out.PartNumbers) != 0 { 257 | t.Fatalf("unexpected marker-beyond response: %+v", out) 258 | } 259 | } 260 | 261 | func TestListParts_NonContiguousParts(t *testing.T) { 262 | s := testutil.StartJSServer(t) 263 | defer s.Shutdown() 264 | 265 | logger := logging.NewLogger(logging.Config{Level: "debug"}) 266 | gw, err := NewS3Gateway(logger, s.ClientURL(), 1, nil, nil) 267 | if err != nil { 268 | t.Fatalf("failed to create S3 gateway: %v", err) 269 | } 270 | r := mux.NewRouter() 271 | gw.RegisterRoutes(r) 272 | 273 | bucket := "noncontig" 274 | key := "obj/noncontig" 275 | 276 | // Initiate multipart upload 277 | req := httptest.NewRequest("POST", "/"+bucket+"/"+key+"?uploads=", nil) 278 | rr := httptest.NewRecorder() 279 | r.ServeHTTP(rr, req) 280 | if rr.Code != 200 { 281 | t.Fatalf("init status=%d body=%s", rr.Code, rr.Body.String()) 282 | } 283 | var ir initResp 284 | if err := xml.Unmarshal(rr.Body.Bytes(), &ir); err != nil { 285 | t.Fatalf("unmarshal init xml failed: %v\nxml=%s", err, rr.Body.String()) 286 | } 287 | 288 | // Upload parts 1,3,5 only 289 | for _, i := range []int{1, 3, 5} { 290 | body := bytes.NewBufferString(fmt.Sprintf("pc-%d", i)) 291 | upr := httptest.NewRequest("PUT", fmt.Sprintf("/%s/%s?uploadId=%s&partNumber=%d", bucket, key, ir.UploadId, i), body) 292 | upr.Header.Set("Content-Length", fmt.Sprintf("%d", body.Len())) 293 | uprr := httptest.NewRecorder() 294 | r.ServeHTTP(uprr, upr) 295 | if uprr.Code != 200 { 296 | t.Fatalf("upload part %d failed: status=%d body=%s", i, uprr.Code, uprr.Body.String()) 297 | } 298 | } 299 | 300 | // Page 1: expect 1,3 (max 2) 301 | url := fmt.Sprintf("/%s/%s?uploadId=%s&part-number-marker=0&max-parts=2", bucket, key, ir.UploadId) 302 | lr := httptest.NewRequest("GET", url, nil) 303 | lrr := httptest.NewRecorder() 304 | r.ServeHTTP(lrr, lr) 305 | if lrr.Code != 200 { 306 | t.Fatalf("list parts failed: status=%d body=%s", lrr.Code, lrr.Body.String()) 307 | } 308 | var p1 struct { 309 | IsTruncated bool `xml:"IsTruncated"` 310 | NextPartNumberMarker int `xml:"NextPartNumberMarker"` 311 | PartNumbers []int `xml:"Part>PartNumber"` 312 | } 313 | if err := xml.Unmarshal(lrr.Body.Bytes(), &p1); err != nil { 314 | t.Fatalf("unmarshal list xml failed: %v\nxml=%s", err, lrr.Body.String()) 315 | } 316 | if len(p1.PartNumbers) != 2 || p1.PartNumbers[0] != 1 || p1.PartNumbers[1] != 3 || !p1.IsTruncated || p1.NextPartNumberMarker != 3 { 317 | t.Fatalf("unexpected page1: %+v", p1) 318 | } 319 | 320 | // Page 2 from marker 3: expect 5 321 | url = fmt.Sprintf("/%s/%s?uploadId=%s&part-number-marker=%d&max-parts=2", bucket, key, ir.UploadId, p1.NextPartNumberMarker) 322 | lr = httptest.NewRequest("GET", url, nil) 323 | lrr = httptest.NewRecorder() 324 | r.ServeHTTP(lrr, lr) 325 | if lrr.Code != 200 { 326 | t.Fatalf("list parts failed: status=%d body=%s", lrr.Code, lrr.Body.String()) 327 | } 328 | var p2 struct { 329 | IsTruncated bool `xml:"IsTruncated"` 330 | NextPartNumberMarker int `xml:"NextPartNumberMarker"` 331 | PartNumbers []int `xml:"Part>PartNumber"` 332 | } 333 | if err := xml.Unmarshal(lrr.Body.Bytes(), &p2); err != nil { 334 | t.Fatalf("unmarshal list xml failed: %v\nxml=%s", err, lrr.Body.String()) 335 | } 336 | if len(p2.PartNumbers) != 1 || p2.PartNumbers[0] != 5 || p2.IsTruncated || p2.NextPartNumberMarker != 5 { 337 | t.Fatalf("unexpected page2: %+v", p2) 338 | } 339 | } 340 | --------------------------------------------------------------------------------