├── .plugin.yaml ├── .gitignore ├── go.mod ├── LICENSE ├── metrics.go ├── errors.go ├── .rr.yaml.example ├── config.go ├── plugin.go ├── bucket_manager.go ├── rpc.go ├── README.md ├── operations.go └── metrics.md /.plugin.yaml: -------------------------------------------------------------------------------- 1 | name: s3-storage 2 | description: 'S3-compatible object storage operations via RPC interface' 3 | category: network 4 | owner: butschster 5 | dependencies: 6 | - rpc 7 | - metrics 8 | - logger 9 | author: 10 | name: Pavel Buchnev 11 | email: butschster@gmail.com 12 | license: MIT -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.exe~ 4 | *.dll 5 | *.so 6 | *.dylib 7 | 8 | # Test binary, built with `go test -c` 9 | *.test 10 | 11 | # Output of the go coverage tool, specifically when used with LiteIDE 12 | *.out 13 | 14 | # Dependency directories (remove the comment below to include it) 15 | **/vendor/ 16 | vendor/ 17 | .idea 18 | .DS_Store 19 | **/composer.lock -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/roadrunner-plugins/s3-storage 2 | 3 | go 1.24 4 | 5 | require ( 6 | github.com/aws/aws-sdk-go-v2 v1.39.5 7 | github.com/prometheus/client_golang v1.20.5 8 | github.com/roadrunner-server/api/v4 v4.0.0 9 | github.com/roadrunner-server/endure/v2 v2.4.0 10 | github.com/roadrunner-server/errors v1.4.1 11 | github.com/roadrunner-server/goridge/v3 v3.8.0 12 | go.uber.org/zap v1.27.0 13 | ) 14 | 15 | require ( 16 | github.com/aws/smithy-go v1.23.1 // indirect 17 | github.com/davecgh/go-spew v1.1.1 // indirect 18 | github.com/pmezard/go-difflib v1.0.0 // indirect 19 | github.com/stretchr/testify v1.11.1 // indirect 20 | go.uber.org/multierr v1.11.0 // indirect 21 | gopkg.in/yaml.v3 v3.0.1 // indirect 22 | ) 23 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 Spiral Scout 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /metrics.go: -------------------------------------------------------------------------------- 1 | package s3 2 | 3 | import ( 4 | "github.com/prometheus/client_golang/prometheus" 5 | ) 6 | 7 | // metricsExporter holds all Prometheus metrics for the S3 plugin 8 | type metricsExporter struct { 9 | // operationsTotal tracks total operations by operation, bucket, and status 10 | operationsTotal *prometheus.CounterVec 11 | 12 | // errorsTotal tracks errors by bucket and error type 13 | errorsTotal *prometheus.CounterVec 14 | } 15 | 16 | // newMetricsExporter creates a new metrics exporter for S3 operations 17 | // Returns error if metrics registration fails 18 | func newMetricsExporter() (*metricsExporter, error) { 19 | m := &metricsExporter{ 20 | // Operation counter with labels: operation, bucket, status 21 | operationsTotal: prometheus.NewCounterVec( 22 | prometheus.CounterOpts{ 23 | Name: "rr_s3_operations_total", 24 | Help: "Total number of S3 operations by type, bucket, and status", 25 | }, 26 | []string{"operation", "bucket", "status"}, 27 | ), 28 | 29 | // Error counter with labels: bucket, error_type 30 | errorsTotal: prometheus.NewCounterVec( 31 | prometheus.CounterOpts{ 32 | Name: "rr_s3_errors_total", 33 | Help: "Total number of S3 errors by bucket and error type", 34 | }, 35 | []string{"bucket", "error_type"}, 36 | ), 37 | } 38 | 39 | // Register metrics with Prometheus default registry 40 | // This ensures metrics are available even if MetricsCollector() isn't called 41 | if err := prometheus.Register(m.operationsTotal); err != nil { 42 | // Check if already registered (happens on plugin reload) 43 | if _, ok := err.(prometheus.AlreadyRegisteredError); !ok { 44 | return nil, err 45 | } 46 | } 47 | 48 | if err := prometheus.Register(m.errorsTotal); err != nil { 49 | // Check if already registered (happens on plugin reload) 50 | if _, ok := err.(prometheus.AlreadyRegisteredError); !ok { 51 | return nil, err 52 | } 53 | } 54 | 55 | return m, nil 56 | } 57 | 58 | // RecordOperation increments the operation counter 59 | // operation: write, read, delete, copy, move, list, exists, get_metadata, set_visibility, get_url 60 | // bucket: bucket name 61 | // status: success, error 62 | func (m *metricsExporter) RecordOperation(bucket, operation, status string) { 63 | if m == nil { 64 | return 65 | } 66 | m.operationsTotal.WithLabelValues(operation, bucket, status).Inc() 67 | } 68 | 69 | // RecordError increments the error counter 70 | // bucket: bucket name 71 | // errorType: error code from ErrorCode constants 72 | func (m *metricsExporter) RecordError(bucket string, errorType ErrorCode) { 73 | if m == nil { 74 | return 75 | } 76 | m.errorsTotal.WithLabelValues(bucket, string(errorType)).Inc() 77 | } 78 | 79 | // getCollectors returns all Prometheus collectors for registration 80 | func (m *metricsExporter) getCollectors() []prometheus.Collector { 81 | if m == nil { 82 | return nil 83 | } 84 | return []prometheus.Collector{ 85 | m.operationsTotal, 86 | m.errorsTotal, 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /errors.go: -------------------------------------------------------------------------------- 1 | package s3 2 | 3 | // ErrorCode represents structured error codes for S3 operations 4 | type ErrorCode string 5 | 6 | const ( 7 | // ErrBucketNotFound indicates the requested bucket doesn't exist 8 | ErrBucketNotFound ErrorCode = "BUCKET_NOT_FOUND" 9 | 10 | // ErrFileNotFound indicates the requested file doesn't exist 11 | ErrFileNotFound ErrorCode = "FILE_NOT_FOUND" 12 | 13 | // ErrInvalidConfig indicates invalid configuration 14 | ErrInvalidConfig ErrorCode = "INVALID_CONFIG" 15 | 16 | // ErrS3Operation indicates an S3 operation failed 17 | ErrS3Operation ErrorCode = "S3_OPERATION_FAILED" 18 | 19 | // ErrPermissionDenied indicates insufficient permissions 20 | ErrPermissionDenied ErrorCode = "PERMISSION_DENIED" 21 | 22 | // ErrInvalidPathname indicates invalid file path 23 | ErrInvalidPathname ErrorCode = "INVALID_PATHNAME" 24 | 25 | // ErrBucketAlreadyExists indicates bucket is already registered 26 | ErrBucketAlreadyExists ErrorCode = "BUCKET_ALREADY_EXISTS" 27 | 28 | // ErrInvalidVisibility indicates invalid visibility value 29 | ErrInvalidVisibility ErrorCode = "INVALID_VISIBILITY" 30 | 31 | // ErrOperationTimeout indicates operation exceeded timeout 32 | ErrOperationTimeout ErrorCode = "OPERATION_TIMEOUT" 33 | ) 34 | 35 | // S3Error represents a structured error returned to PHP 36 | type S3Error struct { 37 | // Code is the error code 38 | Code ErrorCode `json:"code"` 39 | 40 | // Message is the human-readable error message 41 | Message string `json:"message"` 42 | 43 | // Details contains additional error context (optional) 44 | Details string `json:"details,omitempty"` 45 | } 46 | 47 | // Error implements the error interface 48 | func (e *S3Error) Error() string { 49 | if e.Details != "" { 50 | return string(e.Code) + ": " + e.Message + " (" + e.Details + ")" 51 | } 52 | return string(e.Code) + ": " + e.Message 53 | } 54 | 55 | // NewS3Error creates a new S3Error 56 | func NewS3Error(code ErrorCode, message string, details string) *S3Error { 57 | return &S3Error{ 58 | Code: code, 59 | Message: message, 60 | Details: details, 61 | } 62 | } 63 | 64 | // NewBucketNotFoundError creates a bucket not found error 65 | func NewBucketNotFoundError(bucketName string) *S3Error { 66 | return NewS3Error( 67 | ErrBucketNotFound, 68 | "Bucket not found", 69 | "bucket: "+bucketName, 70 | ) 71 | } 72 | 73 | // NewFileNotFoundError creates a file not found error 74 | func NewFileNotFoundError(pathname string) *S3Error { 75 | return NewS3Error( 76 | ErrFileNotFound, 77 | "File not found", 78 | "pathname: "+pathname, 79 | ) 80 | } 81 | 82 | // NewInvalidConfigError creates an invalid config error 83 | func NewInvalidConfigError(reason string) *S3Error { 84 | return NewS3Error( 85 | ErrInvalidConfig, 86 | "Invalid configuration", 87 | reason, 88 | ) 89 | } 90 | 91 | // NewS3OperationError creates an S3 operation error 92 | func NewS3OperationError(operation string, err error) *S3Error { 93 | return NewS3Error( 94 | ErrS3Operation, 95 | "S3 operation failed: "+operation, 96 | err.Error(), 97 | ) 98 | } 99 | 100 | // NewPermissionDeniedError creates a permission denied error 101 | func NewPermissionDeniedError(operation string) *S3Error { 102 | return NewS3Error( 103 | ErrPermissionDenied, 104 | "Permission denied", 105 | "operation: "+operation, 106 | ) 107 | } 108 | 109 | // NewInvalidPathnameError creates an invalid pathname error 110 | func NewInvalidPathnameError(pathname string, reason string) *S3Error { 111 | return NewS3Error( 112 | ErrInvalidPathname, 113 | "Invalid pathname", 114 | "pathname: "+pathname+", reason: "+reason, 115 | ) 116 | } 117 | -------------------------------------------------------------------------------- /.rr.yaml.example: -------------------------------------------------------------------------------- 1 | # Example RoadRunner Configuration with S3 Plugin 2 | 3 | # Server configuration 4 | server: 5 | command: "php worker.php" 6 | relay: pipes 7 | 8 | # RPC configuration for plugin communication 9 | rpc: 10 | listen: tcp://127.0.0.1:6001 11 | 12 | # S3 Plugin Configuration 13 | s3: 14 | # Default bucket to use when bucket name is not specified 15 | default: uploads 16 | 17 | # Server definitions: credentials and endpoints that can be shared between buckets 18 | servers: 19 | # AWS S3 server configuration 20 | aws-us-east: 21 | region: us-east-1 22 | endpoint: "" # Leave empty for AWS S3 (uses default AWS endpoint) 23 | credentials: 24 | key: ${AWS_ACCESS_KEY_ID} 25 | secret: ${AWS_SECRET_ACCESS_KEY} 26 | token: ${AWS_SESSION_TOKEN} # Optional, for temporary credentials 27 | 28 | # AWS S3 different region 29 | aws-eu-west: 30 | region: eu-west-1 31 | endpoint: "" 32 | credentials: 33 | key: ${AWS_EU_ACCESS_KEY_ID} 34 | secret: ${AWS_EU_SECRET_ACCESS_KEY} 35 | 36 | # MinIO server 37 | minio-local: 38 | region: us-east-1 # MinIO requires a region value 39 | endpoint: http://localhost:9000 40 | credentials: 41 | key: minioadmin 42 | secret: minioadmin 43 | 44 | # DigitalOcean Spaces 45 | do-spaces-nyc: 46 | region: nyc3 47 | endpoint: https://nyc3.digitaloceanspaces.com 48 | credentials: 49 | key: ${DO_SPACES_KEY} 50 | secret: ${DO_SPACES_SECRET} 51 | 52 | # Backblaze B2 (S3-compatible) 53 | backblaze-us: 54 | region: us-west-002 55 | endpoint: https://s3.us-west-002.backblazeb2.com 56 | credentials: 57 | key: ${B2_APPLICATION_KEY_ID} 58 | secret: ${B2_APPLICATION_KEY} 59 | 60 | # Bucket definitions: reference servers and define bucket-specific settings 61 | buckets: 62 | # Public uploads bucket on AWS S3 63 | uploads: 64 | server: aws-us-east # References server from servers section 65 | bucket: my-app-uploads # Actual S3 bucket name 66 | prefix: "uploads/" # Optional: path prefix for all operations 67 | visibility: public # Default ACL: "public" or "private" 68 | max_concurrent_operations: 100 # Limit concurrent operations per bucket 69 | part_size: 5242880 # 5MB - multipart upload chunk size 70 | concurrency: 5 # Goroutines for multipart uploads 71 | 72 | # Private documents bucket (same AWS account, different bucket) 73 | documents: 74 | server: aws-us-east 75 | bucket: my-app-private-docs 76 | prefix: "documents/" 77 | visibility: private 78 | max_concurrent_operations: 50 79 | 80 | # User avatars in EU region 81 | avatars: 82 | server: aws-eu-west 83 | bucket: user-avatars-eu 84 | prefix: "avatars/" 85 | visibility: public 86 | max_concurrent_operations: 200 87 | 88 | # Development bucket on local MinIO 89 | dev-storage: 90 | server: minio-local 91 | bucket: dev-bucket 92 | prefix: "" # No prefix 93 | visibility: public 94 | max_concurrent_operations: 50 95 | 96 | # DigitalOcean Spaces bucket 97 | cdn-assets: 98 | server: do-spaces-nyc 99 | bucket: cdn-assets-bucket 100 | prefix: "static/" 101 | visibility: public 102 | max_concurrent_operations: 100 103 | 104 | # Backblaze B2 for backups 105 | backups: 106 | server: backblaze-us 107 | bucket: application-backups 108 | prefix: "daily/" 109 | visibility: private 110 | max_concurrent_operations: 20 111 | part_size: 104857600 # 100MB - larger chunks for big backup files 112 | concurrency: 10 113 | 114 | # Logging 115 | logs: 116 | level: info 117 | encoding: console 118 | output: stdout 119 | err_output: stderr 120 | 121 | # Optional: HTTP server for health checks 122 | http: 123 | address: 127.0.0.1:8080 124 | max_request_size: 256 125 | middleware: [] 126 | pool: 127 | num_workers: 4 128 | max_jobs: 0 129 | allocate_timeout: 60s 130 | destroy_timeout: 60s 131 | -------------------------------------------------------------------------------- /config.go: -------------------------------------------------------------------------------- 1 | package s3 2 | 3 | import ( 4 | "fmt" 5 | ) 6 | 7 | // Config represents the plugin configuration from .rr.yaml 8 | type Config struct { 9 | // Default bucket name to use when none specified 10 | Default string `mapstructure:"default"` 11 | 12 | // Servers contains S3 server definitions (credentials and endpoints) 13 | Servers map[string]*ServerConfig `mapstructure:"servers"` 14 | 15 | // Buckets contains bucket definitions that reference servers 16 | Buckets map[string]*BucketConfig `mapstructure:"buckets"` 17 | } 18 | 19 | // ServerConfig represents S3 server configuration (credentials and endpoint) 20 | type ServerConfig struct { 21 | // Region is the AWS region (e.g., "us-east-1", "fra1" for DigitalOcean) 22 | Region string `mapstructure:"region"` 23 | 24 | // Endpoint is the S3 endpoint URL (required for S3-compatible services) 25 | // Example: "https://fra1.digitaloceanspaces.com" 26 | // Leave empty for AWS S3 (will use default AWS endpoint) 27 | Endpoint string `mapstructure:"endpoint"` 28 | 29 | // Credentials contains authentication credentials for this server 30 | Credentials ServerCredentials `mapstructure:"credentials"` 31 | } 32 | 33 | // ServerCredentials contains S3 authentication credentials 34 | type ServerCredentials struct { 35 | // Key is the Access Key ID 36 | Key string `mapstructure:"key"` 37 | 38 | // Secret is the Secret Access Key 39 | Secret string `mapstructure:"secret"` 40 | 41 | // Token is the Session Token (optional, for temporary credentials) 42 | Token string `mapstructure:"token"` 43 | } 44 | 45 | // BucketConfig represents a single bucket configuration 46 | type BucketConfig struct { 47 | // Server is the reference to a server defined in the servers section 48 | Server string `mapstructure:"server"` 49 | 50 | // Bucket is the actual S3 bucket name 51 | Bucket string `mapstructure:"bucket"` 52 | 53 | // Prefix is the path prefix for all operations (optional) 54 | // Example: "uploads/" - all files will be stored under this prefix 55 | Prefix string `mapstructure:"prefix"` 56 | 57 | // Visibility defines default ACL: "public" or "private" 58 | Visibility string `mapstructure:"visibility"` 59 | 60 | // MaxConcurrentOperations limits concurrent operations per bucket (default: 100) 61 | MaxConcurrentOperations int `mapstructure:"max_concurrent_operations"` 62 | 63 | // PartSize defines multipart upload part size in bytes (default: 5MB) 64 | PartSize int64 `mapstructure:"part_size"` 65 | 66 | // Concurrency defines number of goroutines for multipart uploads (default: 5) 67 | Concurrency int `mapstructure:"concurrency"` 68 | } 69 | 70 | // Validate validates the configuration 71 | func (c *Config) Validate() error { 72 | if len(c.Servers) == 0 { 73 | return fmt.Errorf("at least one server must be configured") 74 | } 75 | 76 | if len(c.Buckets) == 0 { 77 | return fmt.Errorf("at least one bucket must be configured") 78 | } 79 | 80 | // Validate each server configuration 81 | for name, server := range c.Servers { 82 | if err := server.Validate(); err != nil { 83 | return fmt.Errorf("invalid configuration for server '%s': %w", name, err) 84 | } 85 | } 86 | 87 | // Validate each bucket configuration 88 | for name, bucket := range c.Buckets { 89 | if err := bucket.Validate(c.Servers); err != nil { 90 | return fmt.Errorf("invalid configuration for bucket '%s': %w", name, err) 91 | } 92 | } 93 | 94 | // Validate default bucket exists if specified 95 | if c.Default != "" { 96 | if _, exists := c.Buckets[c.Default]; !exists { 97 | return fmt.Errorf("default bucket '%s' not found in configuration", c.Default) 98 | } 99 | } 100 | 101 | return nil 102 | } 103 | 104 | // Validate validates a server configuration 105 | func (sc *ServerConfig) Validate() error { 106 | if sc.Region == "" { 107 | return fmt.Errorf("region is required") 108 | } 109 | 110 | if sc.Credentials.Key == "" { 111 | return fmt.Errorf("credentials.key is required") 112 | } 113 | 114 | if sc.Credentials.Secret == "" { 115 | return fmt.Errorf("credentials.secret is required") 116 | } 117 | 118 | return nil 119 | } 120 | 121 | // Validate validates a bucket configuration 122 | func (bc *BucketConfig) Validate(servers map[string]*ServerConfig) error { 123 | if bc.Server == "" { 124 | return fmt.Errorf("server reference is required") 125 | } 126 | 127 | // Validate server reference exists 128 | if _, exists := servers[bc.Server]; !exists { 129 | return fmt.Errorf("referenced server '%s' not found in configuration", bc.Server) 130 | } 131 | 132 | if bc.Bucket == "" { 133 | return fmt.Errorf("bucket name is required") 134 | } 135 | 136 | if bc.Visibility != "" && bc.Visibility != "public" && bc.Visibility != "private" { 137 | return fmt.Errorf("visibility must be 'public' or 'private', got '%s'", bc.Visibility) 138 | } 139 | 140 | // Set defaults 141 | if bc.Visibility == "" { 142 | bc.Visibility = "private" 143 | } 144 | 145 | if bc.MaxConcurrentOperations <= 0 { 146 | bc.MaxConcurrentOperations = 100 147 | } 148 | 149 | if bc.PartSize <= 0 { 150 | bc.PartSize = 5 * 1024 * 1024 // 5MB default 151 | } 152 | 153 | if bc.Concurrency <= 0 { 154 | bc.Concurrency = 5 155 | } 156 | 157 | return nil 158 | } 159 | 160 | // GetVisibility returns the ACL string for S3 operations 161 | func (bc *BucketConfig) GetVisibility() string { 162 | if bc.Visibility == "public" { 163 | return "public-read" 164 | } 165 | return "private" 166 | } 167 | 168 | // GetFullPath returns the full path including prefix 169 | func (bc *BucketConfig) GetFullPath(pathname string) string { 170 | if bc.Prefix == "" { 171 | return pathname 172 | } 173 | return bc.Prefix + pathname 174 | } 175 | 176 | // GetServerConfig returns the server configuration for this bucket 177 | func (bc *BucketConfig) GetServerConfig(servers map[string]*ServerConfig) (*ServerConfig, error) { 178 | server, exists := servers[bc.Server] 179 | if !exists { 180 | return nil, fmt.Errorf("server '%s' not found", bc.Server) 181 | } 182 | return server, nil 183 | } 184 | -------------------------------------------------------------------------------- /plugin.go: -------------------------------------------------------------------------------- 1 | package s3 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "sync" 7 | 8 | "github.com/prometheus/client_golang/prometheus" 9 | "github.com/roadrunner-server/endure/v2/dep" 10 | "github.com/roadrunner-server/errors" 11 | "go.uber.org/zap" 12 | ) 13 | 14 | const ( 15 | // PluginName is the name of the S3 plugin 16 | PluginName = "s3" 17 | ) 18 | 19 | // Plugin represents the main S3 storage plugin structure 20 | type Plugin struct { 21 | // Configuration provider 22 | cfg Configurer 23 | 24 | // Logger 25 | log *zap.Logger 26 | 27 | // Bucket manager holds all registered buckets 28 | buckets *BucketManager 29 | 30 | // Operations handler for S3 operations 31 | operations *Operations 32 | 33 | // Metrics exporter for Prometheus integration 34 | metrics *metricsExporter 35 | 36 | // Context for graceful shutdown 37 | ctx context.Context 38 | cancel context.CancelFunc 39 | 40 | // WaitGroup for tracking ongoing operations 41 | wg sync.WaitGroup 42 | 43 | // Mutex for thread-safe operations 44 | mu sync.RWMutex 45 | } 46 | 47 | // Configurer interface for configuration loading 48 | type Configurer interface { 49 | // UnmarshalKey takes a single key and unmarshal it into a Struct 50 | UnmarshalKey(name string, out interface{}) error 51 | // Has checks if config section exists 52 | Has(name string) bool 53 | } 54 | 55 | // Logger interface for logging 56 | type Logger interface { 57 | NamedLogger(name string) *zap.Logger 58 | } 59 | 60 | // Init initializes the plugin with dependencies 61 | func (p *Plugin) Init(cfg Configurer, log Logger) error { 62 | const op = errors.Op("s3_plugin_init") 63 | if !cfg.Has(PluginName) { 64 | return errors.E(op, errors.Disabled) 65 | } 66 | 67 | p.cfg = cfg 68 | p.log = log.NamedLogger(PluginName) 69 | p.ctx, p.cancel = context.WithCancel(context.Background()) 70 | 71 | // Initialize metrics exporter with explicit Prometheus registration 72 | metrics, err := newMetricsExporter() 73 | if err != nil { 74 | return fmt.Errorf("failed to initialize metrics: %w", err) 75 | } 76 | p.metrics = metrics 77 | 78 | // Initialize bucket manager 79 | p.buckets = NewBucketManager(p.log) 80 | 81 | // Initialize operations handler 82 | p.operations = NewOperations(p, p.log) 83 | 84 | // Load static configuration from .rr.yaml 85 | var config Config 86 | if err := cfg.UnmarshalKey(PluginName, &config); err != nil { 87 | return fmt.Errorf("failed to unmarshal config: %w", err) 88 | } 89 | 90 | // Validate configuration 91 | if err := config.Validate(); err != nil { 92 | return fmt.Errorf("invalid configuration: %w", err) 93 | } 94 | 95 | // Set server configurations in bucket manager 96 | p.buckets.SetServers(config.Servers) 97 | 98 | // Register buckets from static configuration 99 | for name, bucketCfg := range config.Buckets { 100 | p.log.Debug("registering bucket from config", 101 | zap.String("name", name), 102 | zap.String("bucket", bucketCfg.Bucket), 103 | zap.String("server", bucketCfg.Server), 104 | ) 105 | 106 | if err := p.buckets.RegisterBucket(p.ctx, name, bucketCfg); err != nil { 107 | // Log error but don't fail initialization - allow other buckets to work 108 | p.log.Error("failed to register bucket", 109 | zap.String("name", name), 110 | zap.Error(err), 111 | ) 112 | continue 113 | } 114 | } 115 | 116 | // Set default bucket if specified 117 | if config.Default != "" { 118 | if err := p.buckets.SetDefault(config.Default); err != nil { 119 | p.log.Warn("failed to set default bucket", 120 | zap.String("default", config.Default), 121 | zap.Error(err), 122 | ) 123 | } 124 | } 125 | 126 | p.log.Info("S3 plugin initialized", 127 | zap.Int("servers", len(config.Servers)), 128 | zap.Int("buckets", len(config.Buckets)), 129 | zap.String("default", config.Default), 130 | ) 131 | 132 | return nil 133 | } 134 | 135 | // Serve starts the plugin (long-running service) 136 | func (p *Plugin) Serve() chan error { 137 | errCh := make(chan error, 1) 138 | 139 | // This plugin doesn't have background workers, but implements Service interface 140 | // for proper lifecycle management 141 | p.log.Debug("S3 plugin serving") 142 | 143 | return errCh 144 | } 145 | 146 | // Stop gracefully stops the plugin 147 | func (p *Plugin) Stop(ctx context.Context) error { 148 | p.log.Debug("stopping S3 plugin") 149 | 150 | // Cancel all ongoing operations 151 | p.cancel() 152 | 153 | // Wait for all operations to complete or timeout 154 | done := make(chan struct{}) 155 | go func() { 156 | p.wg.Wait() 157 | close(done) 158 | }() 159 | 160 | select { 161 | case <-done: 162 | p.log.Debug("all S3 operations completed") 163 | case <-ctx.Done(): 164 | p.log.Warn("shutdown timeout reached, forcing stop") 165 | } 166 | 167 | // Close all S3 clients 168 | if err := p.buckets.CloseAll(); err != nil { 169 | p.log.Error("error closing bucket clients", zap.Error(err)) 170 | return err 171 | } 172 | 173 | p.log.Debug("S3 plugin stopped") 174 | return nil 175 | } 176 | 177 | // Name returns the plugin name 178 | func (p *Plugin) Name() string { 179 | return PluginName 180 | } 181 | 182 | // Weight returns plugin weight for dependency resolution 183 | // Higher weight = initialized later 184 | func (p *Plugin) Weight() uint { 185 | return 10 186 | } 187 | 188 | // RPC returns the RPC interface exposed to PHP 189 | func (p *Plugin) RPC() interface{} { 190 | return &rpc{ 191 | plugin: p, 192 | log: p.log, 193 | } 194 | } 195 | 196 | // Collects declares the plugin's dependencies 197 | func (p *Plugin) Collects() []*dep.In { 198 | return []*dep.In{ 199 | dep.Fits(func(pp any) { 200 | p.cfg = pp.(Configurer) 201 | }, (*Configurer)(nil)), 202 | dep.Fits(func(pp any) { 203 | p.log = pp.(Logger).NamedLogger(PluginName) 204 | }, (*Logger)(nil)), 205 | } 206 | } 207 | 208 | // GetBucketManager returns the bucket manager (for internal use) 209 | func (p *Plugin) GetBucketManager() *BucketManager { 210 | p.mu.RLock() 211 | defer p.mu.RUnlock() 212 | return p.buckets 213 | } 214 | 215 | // GetContext returns the plugin context 216 | func (p *Plugin) GetContext() context.Context { 217 | return p.ctx 218 | } 219 | 220 | // TrackOperation adds an operation to the wait group 221 | func (p *Plugin) TrackOperation() { 222 | p.wg.Add(1) 223 | } 224 | 225 | // CompleteOperation marks an operation as complete 226 | func (p *Plugin) CompleteOperation() { 227 | p.wg.Done() 228 | } 229 | 230 | // MetricsCollector implements the StatProvider interface for Prometheus metrics integration 231 | // This method is called by the metrics plugin during its Serve phase to register all collectors 232 | // Metrics are also registered directly in Init() to ensure availability even if this method isn't called 233 | func (p *Plugin) MetricsCollector() []prometheus.Collector { 234 | if p.metrics == nil { 235 | return nil 236 | } 237 | return p.metrics.getCollectors() 238 | } 239 | -------------------------------------------------------------------------------- /bucket_manager.go: -------------------------------------------------------------------------------- 1 | package s3 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "sync" 7 | 8 | "github.com/aws/aws-sdk-go-v2/aws" 9 | "github.com/aws/aws-sdk-go-v2/config" 10 | "github.com/aws/aws-sdk-go-v2/credentials" 11 | "github.com/aws/aws-sdk-go-v2/service/s3" 12 | "go.uber.org/zap" 13 | ) 14 | 15 | // BucketManager manages all S3 bucket clients 16 | type BucketManager struct { 17 | // Map of bucket name to bucket instance 18 | buckets map[string]*Bucket 19 | 20 | // Map of server configurations 21 | servers map[string]*ServerConfig 22 | 23 | // Default bucket name 24 | defaultBucket string 25 | 26 | // Logger 27 | log *zap.Logger 28 | 29 | // Mutex for thread-safe access 30 | mu sync.RWMutex 31 | } 32 | 33 | // Bucket represents a single S3 bucket with its client and configuration 34 | type Bucket struct { 35 | // Name is the bucket identifier in the plugin 36 | Name string 37 | 38 | // Config is the bucket configuration 39 | Config *BucketConfig 40 | 41 | // ServerConfig is the server configuration this bucket uses 42 | ServerConfig *ServerConfig 43 | 44 | // Client is the AWS S3 client 45 | Client *s3.Client 46 | 47 | // Semaphore for limiting concurrent operations 48 | sem chan struct{} 49 | } 50 | 51 | // NewBucketManager creates a new bucket manager 52 | func NewBucketManager(log *zap.Logger) *BucketManager { 53 | return &BucketManager{ 54 | buckets: make(map[string]*Bucket), 55 | servers: make(map[string]*ServerConfig), 56 | log: log, 57 | } 58 | } 59 | 60 | // SetServers sets the server configurations 61 | func (bm *BucketManager) SetServers(servers map[string]*ServerConfig) { 62 | bm.mu.Lock() 63 | defer bm.mu.Unlock() 64 | bm.servers = servers 65 | } 66 | 67 | // RegisterBucket registers a new bucket with S3 client initialization 68 | func (bm *BucketManager) RegisterBucket(ctx context.Context, name string, bucketCfg *BucketConfig) error { 69 | bm.mu.Lock() 70 | defer bm.mu.Unlock() 71 | 72 | // Check if bucket already exists 73 | if _, exists := bm.buckets[name]; exists { 74 | return fmt.Errorf("bucket '%s' already registered", name) 75 | } 76 | 77 | // Get server configuration 78 | serverCfg, exists := bm.servers[bucketCfg.Server] 79 | if !exists { 80 | return fmt.Errorf("server '%s' not found for bucket '%s'", bucketCfg.Server, name) 81 | } 82 | 83 | // Validate bucket configuration with server context 84 | if err := bucketCfg.Validate(bm.servers); err != nil { 85 | return fmt.Errorf("invalid bucket configuration: %w", err) 86 | } 87 | 88 | // Create AWS configuration 89 | awsCfg, err := bm.createAWSConfig(ctx, serverCfg) 90 | if err != nil { 91 | return fmt.Errorf("failed to create AWS config: %w", err) 92 | } 93 | 94 | // Create S3 client 95 | s3Client := s3.NewFromConfig(awsCfg, func(o *s3.Options) { 96 | if serverCfg.Endpoint != "" { 97 | o.BaseEndpoint = aws.String(serverCfg.Endpoint) 98 | o.UsePathStyle = true // Required for MinIO and some S3-compatible services 99 | } 100 | }) 101 | 102 | // Create bucket instance 103 | bucket := &Bucket{ 104 | Name: name, 105 | Config: bucketCfg, 106 | ServerConfig: serverCfg, 107 | Client: s3Client, 108 | sem: make(chan struct{}, bucketCfg.MaxConcurrentOperations), 109 | } 110 | 111 | // Store bucket 112 | bm.buckets[name] = bucket 113 | 114 | bm.log.Debug("bucket registered", 115 | zap.String("name", name), 116 | zap.String("bucket", bucketCfg.Bucket), 117 | zap.String("server", bucketCfg.Server), 118 | zap.String("region", serverCfg.Region), 119 | zap.String("endpoint", serverCfg.Endpoint), 120 | ) 121 | 122 | return nil 123 | } 124 | 125 | // GetBucket retrieves a bucket by name 126 | func (bm *BucketManager) GetBucket(name string) (*Bucket, error) { 127 | bm.mu.RLock() 128 | defer bm.mu.RUnlock() 129 | 130 | bucket, exists := bm.buckets[name] 131 | if !exists { 132 | return nil, fmt.Errorf("bucket '%s' not found", name) 133 | } 134 | 135 | return bucket, nil 136 | } 137 | 138 | // GetDefaultBucket retrieves the default bucket 139 | func (bm *BucketManager) GetDefaultBucket() (*Bucket, error) { 140 | bm.mu.RLock() 141 | defer bm.mu.RUnlock() 142 | 143 | if bm.defaultBucket == "" { 144 | return nil, fmt.Errorf("no default bucket configured") 145 | } 146 | 147 | bucket, exists := bm.buckets[bm.defaultBucket] 148 | if !exists { 149 | return nil, fmt.Errorf("default bucket '%s' not found", bm.defaultBucket) 150 | } 151 | 152 | return bucket, nil 153 | } 154 | 155 | // SetDefault sets the default bucket 156 | func (bm *BucketManager) SetDefault(name string) error { 157 | bm.mu.Lock() 158 | defer bm.mu.Unlock() 159 | 160 | if _, exists := bm.buckets[name]; !exists { 161 | return fmt.Errorf("bucket '%s' not found", name) 162 | } 163 | 164 | bm.defaultBucket = name 165 | bm.log.Debug("default bucket set", zap.String("name", name)) 166 | return nil 167 | } 168 | 169 | // ListBuckets returns all registered bucket names 170 | func (bm *BucketManager) ListBuckets() []string { 171 | bm.mu.RLock() 172 | defer bm.mu.RUnlock() 173 | 174 | names := make([]string, 0, len(bm.buckets)) 175 | for name := range bm.buckets { 176 | names = append(names, name) 177 | } 178 | 179 | return names 180 | } 181 | 182 | // GetDefaultBucketName returns the default bucket name 183 | func (bm *BucketManager) GetDefaultBucketName() string { 184 | bm.mu.RLock() 185 | defer bm.mu.RUnlock() 186 | return bm.defaultBucket 187 | } 188 | 189 | // RemoveBucket removes a bucket (used for dynamic buckets) 190 | func (bm *BucketManager) RemoveBucket(name string) error { 191 | bm.mu.Lock() 192 | defer bm.mu.Unlock() 193 | 194 | if name == bm.defaultBucket { 195 | return fmt.Errorf("cannot remove default bucket '%s'", name) 196 | } 197 | 198 | if _, exists := bm.buckets[name]; !exists { 199 | return fmt.Errorf("bucket '%s' not found", name) 200 | } 201 | 202 | delete(bm.buckets, name) 203 | bm.log.Debug("bucket removed", zap.String("name", name)) 204 | return nil 205 | } 206 | 207 | // CloseAll closes all bucket clients 208 | func (bm *BucketManager) CloseAll() error { 209 | bm.mu.Lock() 210 | defer bm.mu.Unlock() 211 | 212 | // AWS SDK v2 doesn't require explicit client closing 213 | // But we clean up resources 214 | for name := range bm.buckets { 215 | close(bm.buckets[name].sem) 216 | } 217 | 218 | bm.buckets = make(map[string]*Bucket) 219 | bm.log.Debug("all bucket clients closed") 220 | return nil 221 | } 222 | 223 | // createAWSConfig creates AWS configuration from server config 224 | func (bm *BucketManager) createAWSConfig(ctx context.Context, serverCfg *ServerConfig) (aws.Config, error) { 225 | // Create credentials provider 226 | credsProvider := credentials.NewStaticCredentialsProvider( 227 | serverCfg.Credentials.Key, 228 | serverCfg.Credentials.Secret, 229 | serverCfg.Credentials.Token, 230 | ) 231 | 232 | // Load AWS config with custom credentials 233 | awsCfg, err := config.LoadDefaultConfig(ctx, 234 | config.WithRegion(serverCfg.Region), 235 | config.WithCredentialsProvider(credsProvider), 236 | ) 237 | if err != nil { 238 | return aws.Config{}, fmt.Errorf("failed to load AWS config: %w", err) 239 | } 240 | 241 | return awsCfg, nil 242 | } 243 | 244 | // Acquire acquires a semaphore slot for the bucket 245 | func (b *Bucket) Acquire() { 246 | b.sem <- struct{}{} 247 | } 248 | 249 | // Release releases a semaphore slot for the bucket 250 | func (b *Bucket) Release() { 251 | <-b.sem 252 | } 253 | 254 | // GetFullPath returns the full S3 key including prefix 255 | func (b *Bucket) GetFullPath(pathname string) string { 256 | return b.Config.GetFullPath(pathname) 257 | } 258 | 259 | // GetVisibility returns the ACL for the bucket 260 | func (b *Bucket) GetVisibility() string { 261 | return b.Config.GetVisibility() 262 | } 263 | -------------------------------------------------------------------------------- /rpc.go: -------------------------------------------------------------------------------- 1 | package s3 2 | 3 | import ( 4 | "go.uber.org/zap" 5 | ) 6 | 7 | // rpc implements the RPC interface exposed to PHP via goridge 8 | type rpc struct { 9 | plugin *Plugin 10 | log *zap.Logger 11 | } 12 | 13 | // RegisterBucketRequest represents the request to register a new bucket dynamically 14 | type RegisterBucketRequest struct { 15 | Name string `json:"name"` 16 | Server string `json:"server"` 17 | Bucket string `json:"bucket"` 18 | Prefix string `json:"prefix"` 19 | Visibility string `json:"visibility"` 20 | } 21 | 22 | // RegisterBucketResponse represents the response from bucket registration 23 | type RegisterBucketResponse struct { 24 | Success bool `json:"success"` 25 | Message string `json:"message"` 26 | } 27 | 28 | // ListBucketsRequest represents the request to list all buckets 29 | type ListBucketsRequest struct{} 30 | 31 | // ListBucketsResponse represents the response with all bucket names 32 | type ListBucketsResponse struct { 33 | Buckets []string `json:"buckets"` 34 | Default string `json:"default"` 35 | } 36 | 37 | // WriteRequest represents a file write/upload request 38 | type WriteRequest struct { 39 | Bucket string `json:"bucket"` 40 | Pathname string `json:"pathname"` 41 | Content []byte `json:"content"` 42 | Config map[string]string `json:"config,omitempty"` 43 | Visibility string `json:"visibility,omitempty"` 44 | } 45 | 46 | // WriteResponse represents the response from a write operation 47 | type WriteResponse struct { 48 | Success bool `json:"success"` 49 | Pathname string `json:"pathname"` 50 | Size int64 `json:"size"` 51 | LastModified int64 `json:"last_modified"` 52 | } 53 | 54 | // ReadRequest represents a file read/download request 55 | type ReadRequest struct { 56 | Bucket string `json:"bucket"` 57 | Pathname string `json:"pathname"` 58 | } 59 | 60 | // ReadResponse represents the response from a read operation 61 | type ReadResponse struct { 62 | Content []byte `json:"content"` 63 | Size int64 `json:"size"` 64 | MimeType string `json:"mime_type"` 65 | LastModified int64 `json:"last_modified"` 66 | } 67 | 68 | // ExistsRequest represents a file existence check request 69 | type ExistsRequest struct { 70 | Bucket string `json:"bucket"` 71 | Pathname string `json:"pathname"` 72 | } 73 | 74 | // ExistsResponse represents the response from an exists check 75 | type ExistsResponse struct { 76 | Exists bool `json:"exists"` 77 | } 78 | 79 | // DeleteRequest represents a file deletion request 80 | type DeleteRequest struct { 81 | Bucket string `json:"bucket"` 82 | Pathname string `json:"pathname"` 83 | } 84 | 85 | // DeleteResponse represents the response from a delete operation 86 | type DeleteResponse struct { 87 | Success bool `json:"success"` 88 | } 89 | 90 | // CopyRequest represents a file copy request 91 | type CopyRequest struct { 92 | SourceBucket string `json:"source_bucket"` 93 | SourcePathname string `json:"source_pathname"` 94 | DestBucket string `json:"dest_bucket"` 95 | DestPathname string `json:"dest_pathname"` 96 | Config map[string]string `json:"config,omitempty"` 97 | Visibility string `json:"visibility,omitempty"` 98 | } 99 | 100 | // CopyResponse represents the response from a copy operation 101 | type CopyResponse struct { 102 | Success bool `json:"success"` 103 | Pathname string `json:"pathname"` 104 | Size int64 `json:"size"` 105 | LastModified int64 `json:"last_modified"` 106 | } 107 | 108 | // MoveRequest represents a file move request (copy + delete) 109 | type MoveRequest struct { 110 | SourceBucket string `json:"source_bucket"` 111 | SourcePathname string `json:"source_pathname"` 112 | DestBucket string `json:"dest_bucket"` 113 | DestPathname string `json:"dest_pathname"` 114 | Config map[string]string `json:"config,omitempty"` 115 | Visibility string `json:"visibility,omitempty"` 116 | } 117 | 118 | // MoveResponse represents the response from a move operation 119 | type MoveResponse struct { 120 | Success bool `json:"success"` 121 | Pathname string `json:"pathname"` 122 | Size int64 `json:"size"` 123 | LastModified int64 `json:"last_modified"` 124 | } 125 | 126 | // GetMetadataRequest represents a request to get file metadata 127 | type GetMetadataRequest struct { 128 | Bucket string `json:"bucket"` 129 | Pathname string `json:"pathname"` 130 | } 131 | 132 | // GetMetadataResponse represents file metadata 133 | type GetMetadataResponse struct { 134 | Size int64 `json:"size"` 135 | MimeType string `json:"mime_type"` 136 | LastModified int64 `json:"last_modified"` 137 | Visibility string `json:"visibility"` 138 | ETag string `json:"etag,omitempty"` 139 | } 140 | 141 | // SetVisibilityRequest represents a request to change file visibility 142 | type SetVisibilityRequest struct { 143 | Bucket string `json:"bucket"` 144 | Pathname string `json:"pathname"` 145 | Visibility string `json:"visibility"` 146 | } 147 | 148 | // SetVisibilityResponse represents the response from visibility change 149 | type SetVisibilityResponse struct { 150 | Success bool `json:"success"` 151 | } 152 | 153 | // GetPublicURLRequest represents a request to generate a public URL 154 | type GetPublicURLRequest struct { 155 | Bucket string `json:"bucket"` 156 | Pathname string `json:"pathname"` 157 | ExpiresIn int64 `json:"expires_in,omitempty"` // Seconds, 0 for permanent 158 | } 159 | 160 | // GetPublicURLResponse represents the response with a public URL 161 | type GetPublicURLResponse struct { 162 | URL string `json:"url"` 163 | ExpiresAt int64 `json:"expires_at,omitempty"` // Unix timestamp 164 | } 165 | 166 | // ListObjectsRequest represents a request to list objects in a bucket 167 | type ListObjectsRequest struct { 168 | Bucket string `json:"bucket"` 169 | Prefix string `json:"prefix,omitempty"` // Filter by prefix 170 | Delimiter string `json:"delimiter,omitempty"` // Delimiter for grouping (e.g., "/") 171 | MaxKeys int32 `json:"max_keys,omitempty"` // Maximum number of keys to return (default: 1000) 172 | ContinuationToken string `json:"continuation_token,omitempty"` // Token for pagination 173 | } 174 | 175 | // ObjectInfo represents information about a single S3 object 176 | type ObjectInfo struct { 177 | Key string `json:"key"` 178 | Size int64 `json:"size"` 179 | LastModified int64 `json:"last_modified"` // Unix timestamp 180 | ETag string `json:"etag"` 181 | StorageClass string `json:"storage_class,omitempty"` 182 | } 183 | 184 | // CommonPrefix represents a common prefix (directory-like structure) 185 | type CommonPrefix struct { 186 | Prefix string `json:"prefix"` 187 | } 188 | 189 | // ListObjectsResponse represents the response from list objects operation 190 | type ListObjectsResponse struct { 191 | Objects []ObjectInfo `json:"objects"` 192 | CommonPrefixes []CommonPrefix `json:"common_prefixes,omitempty"` 193 | IsTruncated bool `json:"is_truncated"` 194 | NextContinuationToken string `json:"next_continuation_token,omitempty"` 195 | KeyCount int32 `json:"key_count"` 196 | } 197 | 198 | // RegisterBucket registers a new bucket dynamically via RPC 199 | // Note: The bucket must reference an existing server from configuration 200 | func (r *rpc) RegisterBucket(req *RegisterBucketRequest, resp *RegisterBucketResponse) error { 201 | r.log.Debug("registering bucket via RPC", 202 | zap.String("name", req.Name), 203 | zap.String("server", req.Server), 204 | zap.String("bucket", req.Bucket), 205 | ) 206 | 207 | // Create bucket configuration from request 208 | cfg := &BucketConfig{ 209 | Server: req.Server, 210 | Bucket: req.Bucket, 211 | Prefix: req.Prefix, 212 | Visibility: req.Visibility, 213 | } 214 | 215 | // Get bucket manager to access server configs 216 | bucketManager := r.plugin.GetBucketManager() 217 | 218 | // Lock for reading servers map 219 | bucketManager.mu.RLock() 220 | servers := bucketManager.servers 221 | bucketManager.mu.RUnlock() 222 | 223 | // Validate configuration (this will check if server exists) 224 | if err := cfg.Validate(servers); err != nil { 225 | resp.Success = false 226 | resp.Message = "Invalid configuration: " + err.Error() 227 | return NewInvalidConfigError(err.Error()) 228 | } 229 | 230 | // Register bucket 231 | if err := bucketManager.RegisterBucket(r.plugin.ctx, req.Name, cfg); err != nil { 232 | resp.Success = false 233 | resp.Message = "Failed to register bucket: " + err.Error() 234 | return err 235 | } 236 | 237 | resp.Success = true 238 | resp.Message = "Bucket registered successfully" 239 | return nil 240 | } 241 | 242 | // ListBuckets lists all registered buckets 243 | func (r *rpc) ListBuckets(req *ListBucketsRequest, resp *ListBucketsResponse) error { 244 | resp.Buckets = r.plugin.buckets.ListBuckets() 245 | resp.Default = r.plugin.buckets.GetDefaultBucketName() 246 | return nil 247 | } 248 | 249 | // Write uploads a file to S3 250 | func (r *rpc) Write(req *WriteRequest, resp *WriteResponse) error { 251 | return r.plugin.operations.Write(r.plugin.ctx, req, resp) 252 | } 253 | 254 | // Read downloads a file from S3 255 | func (r *rpc) Read(req *ReadRequest, resp *ReadResponse) error { 256 | return r.plugin.operations.Read(r.plugin.ctx, req, resp) 257 | } 258 | 259 | // Exists checks if a file exists in S3 260 | func (r *rpc) Exists(req *ExistsRequest, resp *ExistsResponse) error { 261 | return r.plugin.operations.Exists(r.plugin.ctx, req, resp) 262 | } 263 | 264 | // Delete deletes a file from S3 265 | func (r *rpc) Delete(req *DeleteRequest, resp *DeleteResponse) error { 266 | return r.plugin.operations.Delete(r.plugin.ctx, req, resp) 267 | } 268 | 269 | // Copy copies a file within or between buckets 270 | func (r *rpc) Copy(req *CopyRequest, resp *CopyResponse) error { 271 | return r.plugin.operations.Copy(r.plugin.ctx, req, resp) 272 | } 273 | 274 | // Move moves a file within or between buckets 275 | func (r *rpc) Move(req *MoveRequest, resp *MoveResponse) error { 276 | return r.plugin.operations.Move(r.plugin.ctx, req, resp) 277 | } 278 | 279 | // GetMetadata retrieves file metadata 280 | func (r *rpc) GetMetadata(req *GetMetadataRequest, resp *GetMetadataResponse) error { 281 | return r.plugin.operations.GetMetadata(r.plugin.ctx, req, resp) 282 | } 283 | 284 | // SetVisibility changes file visibility (ACL) 285 | func (r *rpc) SetVisibility(req *SetVisibilityRequest, resp *SetVisibilityResponse) error { 286 | return r.plugin.operations.SetVisibility(r.plugin.ctx, req, resp) 287 | } 288 | 289 | // GetPublicURL generates a public or presigned URL for a file 290 | func (r *rpc) GetPublicURL(req *GetPublicURLRequest, resp *GetPublicURLResponse) error { 291 | return r.plugin.operations.GetPublicURL(r.plugin.ctx, req, resp) 292 | } 293 | 294 | // ListObjects lists objects in a bucket with optional filtering 295 | func (r *rpc) ListObjects(req *ListObjectsRequest, resp *ListObjectsResponse) error { 296 | return r.plugin.operations.ListObjects(r.plugin.ctx, req, resp) 297 | } 298 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # RoadRunner S3 Storage Plugin 2 | 3 | A high-performance Go plugin for RoadRunner that provides S3-compatible object storage operations via RPC interface. 4 | 5 | ## Features 6 | 7 | - ✅ **Multiple Bucket Support**: Configure multiple S3 buckets (AWS S3, MinIO, DigitalOcean Spaces, etc.) 8 | - ✅ **Dynamic Configuration**: Register buckets at runtime via RPC 9 | - ✅ **Full S3 Operations**: Upload, download, copy, move, delete, metadata operations 10 | - ✅ **Concurrent Operations**: Built-in goroutine management and connection pooling 11 | - ✅ **Public URL Generation**: Generate public and presigned URLs 12 | - ✅ **Visibility Control**: Manage file ACLs (public/private) 13 | - ✅ **Large File Support**: Multipart upload for files > 5MB 14 | - ✅ **Graceful Shutdown**: Proper context cancellation and operation tracking 15 | 16 | ## Installation 17 | 18 | ```bash 19 | go get github.com/roadrunner-server/s3-plugin 20 | ``` 21 | 22 | ## Configuration 23 | 24 | ### Configuration Structure 25 | 26 | The S3 plugin uses a two-level configuration structure: 27 | 28 | 1. **Servers**: Define S3 server credentials and endpoints (can be reused by multiple buckets) 29 | 2. **Buckets**: Define individual buckets that reference servers 30 | 31 | This separation allows you to: 32 | - Share credentials across multiple buckets 33 | - Easily manage different S3 providers (AWS, MinIO, DigitalOcean Spaces, etc.) 34 | - Keep sensitive credentials in one place 35 | 36 | ### Basic Configuration (.rr.yaml) 37 | 38 | ```yaml 39 | s3: 40 | # Default bucket to use when none specified 41 | default: uploads 42 | 43 | # Server definitions (credentials and endpoints) 44 | servers: 45 | # AWS S3 server 46 | aws-primary: 47 | region: us-east-1 48 | endpoint: "" # Empty for AWS S3 (uses default endpoint) 49 | credentials: 50 | key: ${AWS_ACCESS_KEY_ID} 51 | secret: ${AWS_SECRET_ACCESS_KEY} 52 | token: ${AWS_SESSION_TOKEN} # Optional for temporary credentials 53 | 54 | # MinIO server 55 | minio-dev: 56 | region: us-east-1 57 | endpoint: http://localhost:9000 58 | credentials: 59 | key: minioadmin 60 | secret: minioadmin 61 | 62 | # Bucket definitions (reference servers) 63 | buckets: 64 | # Public uploads bucket 65 | uploads: 66 | server: aws-primary # References server from servers section 67 | bucket: my-uploads-bucket # Actual S3 bucket name 68 | prefix: "uploads/" # Optional path prefix 69 | visibility: public # "public" or "private" 70 | max_concurrent_operations: 100 # Optional, default: 100 71 | part_size: 5242880 # Optional, default: 5MB (multipart uploads) 72 | concurrency: 5 # Optional, default: 5 (goroutines) 73 | 74 | # Private documents bucket (same AWS account) 75 | documents: 76 | server: aws-primary # Reuses same credentials 77 | bucket: company-documents 78 | prefix: "docs/" 79 | visibility: private 80 | max_concurrent_operations: 50 81 | 82 | # Development bucket on MinIO 83 | dev-storage: 84 | server: minio-dev 85 | bucket: dev-bucket 86 | visibility: public 87 | ``` 88 | 89 | ### Multi-Provider Configuration Example 90 | 91 | ```yaml 92 | s3: 93 | default: uploads 94 | 95 | servers: 96 | # AWS S3 US East 97 | aws-us: 98 | region: us-east-1 99 | endpoint: "" 100 | credentials: 101 | key: ${AWS_ACCESS_KEY_ID} 102 | secret: ${AWS_SECRET_ACCESS_KEY} 103 | 104 | # AWS S3 EU West 105 | aws-eu: 106 | region: eu-west-1 107 | endpoint: "" 108 | credentials: 109 | key: ${AWS_EU_ACCESS_KEY_ID} 110 | secret: ${AWS_EU_SECRET_ACCESS_KEY} 111 | 112 | # DigitalOcean Spaces 113 | do-spaces: 114 | region: nyc3 115 | endpoint: https://nyc3.digitaloceanspaces.com 116 | credentials: 117 | key: ${DO_SPACES_KEY} 118 | secret: ${DO_SPACES_SECRET} 119 | 120 | # Backblaze B2 121 | backblaze: 122 | region: us-west-002 123 | endpoint: https://s3.us-west-002.backblazeb2.com 124 | credentials: 125 | key: ${B2_APPLICATION_KEY_ID} 126 | secret: ${B2_APPLICATION_KEY} 127 | 128 | buckets: 129 | # User uploads in US 130 | uploads: 131 | server: aws-us 132 | bucket: app-uploads-us 133 | prefix: "uploads/" 134 | visibility: public 135 | 136 | # User avatars in EU (GDPR compliance) 137 | avatars-eu: 138 | server: aws-eu 139 | bucket: app-avatars-eu 140 | prefix: "avatars/" 141 | visibility: public 142 | 143 | # CDN assets on DigitalOcean 144 | cdn-assets: 145 | server: do-spaces 146 | bucket: cdn-bucket 147 | prefix: "static/" 148 | visibility: public 149 | max_concurrent_operations: 200 150 | 151 | # Long-term backups on Backblaze B2 152 | backups: 153 | server: backblaze 154 | bucket: app-backups 155 | prefix: "daily/" 156 | visibility: private 157 | part_size: 104857600 # 100MB chunks for large files 158 | concurrency: 10 159 | ``` 160 | 161 | ## PHP Usage 162 | 163 | ### Basic Operations 164 | 165 | ```php 166 | use Spiral\Goridge\RPC\RPC; 167 | 168 | $rpc = new RPC(/* connection */); 169 | 170 | // Upload a file 171 | $response = $rpc->call('s3.Write', [ 172 | 'bucket' => 'uploads', 173 | 'pathname' => 'images/photo.jpg', 174 | 'content' => base64_encode(file_get_contents('photo.jpg')), 175 | 'visibility' => 'public' // Optional 176 | ]); 177 | // Returns: ['success' => true, 'pathname' => '...', 'size' => 12345, 'last_modified' => 1234567890] 178 | 179 | // Download a file 180 | $response = $rpc->call('s3.Read', [ 181 | 'bucket' => 'uploads', 182 | 'pathname' => 'images/photo.jpg' 183 | ]); 184 | // Returns: ['content' => base64_data, 'size' => 12345, 'mime_type' => 'image/jpeg', 'last_modified' => 1234567890] 185 | 186 | // Check if file exists 187 | $response = $rpc->call('s3.Exists', [ 188 | 'bucket' => 'uploads', 189 | 'pathname' => 'images/photo.jpg' 190 | ]); 191 | // Returns: ['exists' => true] 192 | 193 | // Delete a file 194 | $response = $rpc->call('s3.Delete', [ 195 | 'bucket' => 'uploads', 196 | 'pathname' => 'images/photo.jpg' 197 | ]); 198 | // Returns: ['success' => true] 199 | 200 | // Get file metadata 201 | $response = $rpc->call('s3.GetMetadata', [ 202 | 'bucket' => 'uploads', 203 | 'pathname' => 'images/photo.jpg' 204 | ]); 205 | // Returns: ['size' => 12345, 'mime_type' => 'image/jpeg', 'last_modified' => 1234567890, 'visibility' => 'public'] 206 | 207 | // Get public URL (permanent) 208 | $response = $rpc->call('s3.GetPublicURL', [ 209 | 'bucket' => 'uploads', 210 | 'pathname' => 'images/photo.jpg', 211 | 'expires_in' => 0 // 0 for permanent URL 212 | ]); 213 | // Returns: ['url' => 'https://...'] 214 | 215 | // Get presigned URL (expires in 1 hour) 216 | $response = $rpc->call('s3.GetPublicURL', [ 217 | 'bucket' => 'uploads', 218 | 'pathname' => 'images/photo.jpg', 219 | 'expires_in' => 3600 // seconds 220 | ]); 221 | // Returns: ['url' => 'https://...', 'expires_at' => 1234567890] 222 | ``` 223 | 224 | ### Advanced Operations 225 | 226 | ```php 227 | // Copy file within same bucket 228 | $response = $rpc->call('s3.Copy', [ 229 | 'source_bucket' => 'uploads', 230 | 'source_pathname' => 'images/photo.jpg', 231 | 'dest_bucket' => 'uploads', 232 | 'dest_pathname' => 'images/photo-copy.jpg', 233 | 'visibility' => 'private' // Optional 234 | ]); 235 | 236 | // Move file between buckets 237 | $response = $rpc->call('s3.Move', [ 238 | 'source_bucket' => 'uploads', 239 | 'source_pathname' => 'temp/photo.jpg', 240 | 'dest_bucket' => 'private-docs', 241 | 'dest_pathname' => 'archive/photo.jpg' 242 | ]); 243 | 244 | // Change file visibility 245 | $response = $rpc->call('s3.SetVisibility', [ 246 | 'bucket' => 'uploads', 247 | 'pathname' => 'images/photo.jpg', 248 | 'visibility' => 'private' // or 'public' 249 | ]); 250 | ``` 251 | 252 | ### Dynamic Bucket Registration 253 | 254 | You can register new buckets at runtime via RPC. **Note**: The bucket must reference an existing server from your configuration. 255 | 256 | ```php 257 | // Register a new bucket at runtime (references existing server) 258 | $response = $rpc->call('s3.RegisterBucket', [ 259 | 'name' => 'dynamic-bucket', // Unique bucket identifier 260 | 'server' => 'aws-primary', // Must reference existing server from config 261 | 'bucket' => 'my-new-bucket', // Actual S3 bucket name 262 | 'prefix' => 'files/', // Optional path prefix 263 | 'visibility' => 'public' // "public" or "private" 264 | ]); 265 | // Returns: ['success' => true, 'message' => 'Bucket registered successfully'] 266 | 267 | // List all registered buckets 268 | $response = $rpc->call('s3.ListBuckets', []); 269 | // Returns: ['buckets' => ['uploads', 'documents', 'dynamic-bucket'], 'default' => 'uploads'] 270 | ``` 271 | 272 | **Important**: Dynamic bucket registration requires that the referenced server already exists in your `.rr.yaml` configuration. You cannot add new servers at runtime - only new buckets that use existing server credentials. 273 | 274 | ## Architecture 275 | 276 | ### Plugin Structure 277 | 278 | ``` 279 | s3/ 280 | ├── plugin.go # Main plugin with DI and lifecycle management 281 | ├── config.go # Configuration structures and validation 282 | ├── bucket_manager.go # Bucket registration and S3 client management 283 | ├── operations.go # All S3 file operations implementation 284 | ├── rpc.go # RPC interface definitions and handlers 285 | ├── errors.go # Structured error types 286 | └── go.mod # Go module dependencies 287 | ``` 288 | 289 | ### Concurrency Model 290 | 291 | - **Per-Bucket Semaphores**: Limits concurrent operations per bucket (default: 100) 292 | - **AWS SDK Connection Pooling**: Built-in HTTP connection reuse 293 | - **Goroutine Tracking**: WaitGroup for graceful shutdown 294 | - **Context Propagation**: All operations support cancellation 295 | 296 | ### Performance Characteristics 297 | 298 | - **Small Files (< 1MB)**: Direct upload, 100+ ops/sec per bucket 299 | - **Large Files (> 5MB)**: Multipart upload with configurable concurrency 300 | - **Memory Usage**: Streams large files, minimal memory footprint 301 | - **Concurrent Operations**: Supports 50+ simultaneous operations per bucket 302 | 303 | ## Error Handling 304 | 305 | All RPC methods return structured errors: 306 | 307 | ```go 308 | type S3Error struct { 309 | Code string `json:"code"` // Error code (e.g., "BUCKET_NOT_FOUND") 310 | Message string `json:"message"` // Human-readable message 311 | Details string `json:"details"` // Additional context 312 | } 313 | ``` 314 | 315 | ### Error Codes 316 | 317 | | Code | Description | 318 | |-------------------------|--------------------------------| 319 | | `BUCKET_NOT_FOUND` | Requested bucket doesn't exist | 320 | | `FILE_NOT_FOUND` | Requested file doesn't exist | 321 | | `INVALID_CONFIG` | Invalid bucket configuration | 322 | | `S3_OPERATION_FAILED` | S3 operation failed | 323 | | `PERMISSION_DENIED` | Insufficient permissions | 324 | | `INVALID_PATHNAME` | Invalid file path | 325 | | `BUCKET_ALREADY_EXISTS` | Bucket already registered | 326 | | `INVALID_VISIBILITY` | Invalid visibility value | 327 | 328 | ## Testing 329 | 330 | ```bash 331 | # Run unit tests 332 | go test ./... 333 | 334 | # Run with coverage 335 | go test -cover ./... 336 | 337 | # Run integration tests (requires S3 credentials) 338 | export AWS_ACCESS_KEY_ID=your_key 339 | export AWS_SECRET_ACCESS_KEY=your_secret 340 | go test -tags=integration ./... 341 | ``` 342 | 343 | ## Integration with RoadRunner 344 | 345 | ### Plugin Registration 346 | 347 | ```go 348 | // In your RoadRunner server 349 | import ( 350 | s3plugin "github.com/roadrunner-server/s3-plugin" 351 | ) 352 | 353 | func main() { 354 | container := endure.New(/* config */) 355 | 356 | // Register S3 plugin 357 | container.Register(&s3plugin.Plugin{}) 358 | 359 | // ... other plugins 360 | 361 | container.Init() 362 | container.Serve() 363 | } 364 | ``` 365 | 366 | ## Security Best Practices 367 | 368 | 1. **Credentials Management** 369 | - Use environment variables for secrets 370 | - Never commit credentials to version control 371 | - Rotate credentials regularly 372 | 373 | 2. **Access Control** 374 | - Use IAM roles when running on AWS infrastructure 375 | - Apply principle of least privilege 376 | - Use bucket policies for additional security 377 | 378 | 3. **Network Security** 379 | - Use HTTPS endpoints for production 380 | - Consider VPC endpoints for AWS S3 381 | - Implement proper CORS policies if needed 382 | 383 | ## Troubleshooting 384 | 385 | ### Common Issues 386 | 387 | **"Bucket not found" error** 388 | 389 | - Verify bucket name is correct in configuration 390 | - Check that bucket is registered (use `ListBuckets` RPC) 391 | - Ensure credentials have access to the bucket 392 | 393 | **"Permission denied" error** 394 | 395 | - Verify AWS credentials are correct 396 | - Check IAM policy allows required S3 operations 397 | - Ensure bucket policy doesn't block access 398 | 399 | **Slow upload performance** 400 | 401 | - Increase `concurrency` setting for multipart uploads 402 | - Adjust `part_size` (larger parts = fewer API calls) 403 | - Check `max_concurrent_operations` limit 404 | 405 | **Memory usage too high** 406 | 407 | - Files are streamed for large uploads 408 | - Check if multiple large files are processed simultaneously 409 | - Adjust `max_concurrent_operations` to limit parallelism 410 | 411 | ## Contributing 412 | 413 | Contributions are welcome! Please follow these guidelines: 414 | 415 | 1. Fork the repository 416 | 2. Create a feature branch 417 | 3. Add tests for new functionality 418 | 4. Ensure all tests pass 419 | 5. Submit a pull request 420 | 421 | ## License 422 | 423 | MIT License - see LICENSE file for details 424 | 425 | ## Support 426 | 427 | - **Documentation**: [RoadRunner Docs](https://docs.roadrunner.dev) 428 | - **Issues**: [GitHub Issues](https://github.com/roadrunner-server/s3-plugin/issues) 429 | - **Community**: [Discord](https://discord.gg/roadrunner) 430 | -------------------------------------------------------------------------------- /operations.go: -------------------------------------------------------------------------------- 1 | package s3 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "errors" 7 | "fmt" 8 | "io" 9 | "strings" 10 | "time" 11 | 12 | "github.com/aws/aws-sdk-go-v2/aws" 13 | "github.com/aws/aws-sdk-go-v2/feature/s3/manager" 14 | "github.com/aws/aws-sdk-go-v2/service/s3" 15 | "github.com/aws/aws-sdk-go-v2/service/s3/types" 16 | "go.uber.org/zap" 17 | ) 18 | 19 | // Operations handles all S3 file operations 20 | type Operations struct { 21 | plugin *Plugin 22 | log *zap.Logger 23 | } 24 | 25 | // NewOperations creates a new Operations instance 26 | func NewOperations(plugin *Plugin, log *zap.Logger) *Operations { 27 | return &Operations{ 28 | plugin: plugin, 29 | log: log, 30 | } 31 | } 32 | 33 | // Write uploads a file to S3 34 | func (o *Operations) Write(ctx context.Context, req *WriteRequest, resp *WriteResponse) error { 35 | // Track operation for graceful shutdown 36 | o.plugin.TrackOperation() 37 | defer o.plugin.CompleteOperation() 38 | 39 | start := time.Now() 40 | 41 | // Validate request 42 | if err := o.validatePathname(req.Pathname); err != nil { 43 | o.plugin.metrics.RecordOperation(req.Bucket, "write", "error") 44 | o.plugin.metrics.RecordError(req.Bucket, ErrInvalidPathname) 45 | return err 46 | } 47 | 48 | // Get bucket 49 | bucket, err := o.plugin.buckets.GetBucket(req.Bucket) 50 | if err != nil { 51 | o.plugin.metrics.RecordOperation(req.Bucket, "write", "error") 52 | o.plugin.metrics.RecordError(req.Bucket, ErrBucketNotFound) 53 | return NewBucketNotFoundError(req.Bucket) 54 | } 55 | 56 | // Acquire semaphore 57 | bucket.Acquire() 58 | defer bucket.Release() 59 | 60 | // Determine visibility 61 | visibility := req.Visibility 62 | if visibility == "" { 63 | visibility = bucket.GetVisibility() 64 | } 65 | 66 | // Get full S3 key 67 | key := bucket.GetFullPath(req.Pathname) 68 | 69 | // Detect content type 70 | contentType := o.detectContentType(req.Pathname, req.Content) 71 | 72 | // Prepare upload input 73 | putInput := &s3.PutObjectInput{ 74 | Bucket: aws.String(bucket.Config.Bucket), 75 | Key: aws.String(key), 76 | Body: bytes.NewReader(req.Content), 77 | ACL: types.ObjectCannedACL(visibility), 78 | ContentType: aws.String(contentType), 79 | } 80 | 81 | // Add custom metadata if provided 82 | if len(req.Config) > 0 { 83 | metadata := make(map[string]string) 84 | for k, v := range req.Config { 85 | metadata[k] = v 86 | } 87 | putInput.Metadata = metadata 88 | } 89 | 90 | // Use upload manager for better performance with large files 91 | uploader := manager.NewUploader(bucket.Client, func(u *manager.Uploader) { 92 | u.PartSize = bucket.Config.PartSize 93 | u.Concurrency = bucket.Config.Concurrency 94 | }) 95 | 96 | // Upload file 97 | result, err := uploader.Upload(ctx, putInput) 98 | if err != nil { 99 | o.log.Error("failed to upload file", 100 | zap.String("bucket", req.Bucket), 101 | zap.String("pathname", req.Pathname), 102 | zap.Error(err), 103 | ) 104 | o.plugin.metrics.RecordOperation(req.Bucket, "write", "error") 105 | o.plugin.metrics.RecordError(req.Bucket, ErrS3Operation) 106 | return NewS3OperationError("upload", err) 107 | } 108 | 109 | // Get metadata for response 110 | headResult, err := bucket.Client.HeadObject(ctx, &s3.HeadObjectInput{ 111 | Bucket: aws.String(bucket.Config.Bucket), 112 | Key: aws.String(key), 113 | }) 114 | if err != nil { 115 | o.log.Warn("failed to get object metadata after upload", 116 | zap.String("bucket", req.Bucket), 117 | zap.String("pathname", req.Pathname), 118 | zap.Error(err), 119 | ) 120 | // Don't fail the operation, just return without metadata 121 | resp.Success = true 122 | resp.Pathname = req.Pathname 123 | resp.Size = int64(len(req.Content)) 124 | resp.LastModified = time.Now().Unix() 125 | o.plugin.metrics.RecordOperation(req.Bucket, "write", "success") 126 | return nil 127 | } 128 | 129 | resp.Success = true 130 | resp.Pathname = req.Pathname 131 | resp.Size = *headResult.ContentLength 132 | resp.LastModified = headResult.LastModified.Unix() 133 | 134 | o.plugin.metrics.RecordOperation(req.Bucket, "write", "success") 135 | 136 | o.log.Debug("file uploaded successfully", 137 | zap.String("bucket", req.Bucket), 138 | zap.String("pathname", req.Pathname), 139 | zap.Int64("size", resp.Size), 140 | zap.Duration("duration", time.Since(start)), 141 | ) 142 | 143 | _ = result // Use result to avoid unused variable warning 144 | 145 | return nil 146 | } 147 | 148 | // Read downloads a file from S3 149 | func (o *Operations) Read(ctx context.Context, req *ReadRequest, resp *ReadResponse) error { 150 | o.plugin.TrackOperation() 151 | defer o.plugin.CompleteOperation() 152 | 153 | start := time.Now() 154 | 155 | // Validate request 156 | if err := o.validatePathname(req.Pathname); err != nil { 157 | o.plugin.metrics.RecordOperation(req.Bucket, "read", "error") 158 | o.plugin.metrics.RecordError(req.Bucket, ErrInvalidPathname) 159 | return err 160 | } 161 | 162 | // Get bucket 163 | bucket, err := o.plugin.buckets.GetBucket(req.Bucket) 164 | if err != nil { 165 | o.plugin.metrics.RecordOperation(req.Bucket, "read", "error") 166 | o.plugin.metrics.RecordError(req.Bucket, ErrBucketNotFound) 167 | return NewBucketNotFoundError(req.Bucket) 168 | } 169 | 170 | bucket.Acquire() 171 | defer bucket.Release() 172 | 173 | // Get full S3 key 174 | key := bucket.GetFullPath(req.Pathname) 175 | 176 | // Download file 177 | result, err := bucket.Client.GetObject(ctx, &s3.GetObjectInput{ 178 | Bucket: aws.String(bucket.Config.Bucket), 179 | Key: aws.String(key), 180 | }) 181 | if err != nil { 182 | var nsk *types.NoSuchKey 183 | if errors.As(err, &nsk) { 184 | o.plugin.metrics.RecordOperation(req.Bucket, "read", "error") 185 | o.plugin.metrics.RecordError(req.Bucket, ErrFileNotFound) 186 | return NewFileNotFoundError(req.Pathname) 187 | } 188 | o.log.Error("failed to download file", 189 | zap.String("bucket", req.Bucket), 190 | zap.String("pathname", req.Pathname), 191 | zap.Error(err), 192 | ) 193 | o.plugin.metrics.RecordOperation(req.Bucket, "read", "error") 194 | o.plugin.metrics.RecordError(req.Bucket, ErrS3Operation) 195 | return NewS3OperationError("download", err) 196 | } 197 | defer result.Body.Close() 198 | 199 | // Read content 200 | content, err := io.ReadAll(result.Body) 201 | if err != nil { 202 | o.log.Error("failed to read file content", 203 | zap.String("bucket", req.Bucket), 204 | zap.String("pathname", req.Pathname), 205 | zap.Error(err), 206 | ) 207 | o.plugin.metrics.RecordOperation(req.Bucket, "read", "error") 208 | o.plugin.metrics.RecordError(req.Bucket, ErrS3Operation) 209 | return NewS3OperationError("read content", err) 210 | } 211 | 212 | resp.Content = content 213 | resp.Size = *result.ContentLength 214 | resp.MimeType = *result.ContentType 215 | resp.LastModified = result.LastModified.Unix() 216 | 217 | o.plugin.metrics.RecordOperation(req.Bucket, "read", "success") 218 | 219 | o.log.Debug("file downloaded successfully", 220 | zap.String("bucket", req.Bucket), 221 | zap.String("pathname", req.Pathname), 222 | zap.Int64("size", resp.Size), 223 | zap.Duration("duration", time.Since(start)), 224 | ) 225 | 226 | return nil 227 | } 228 | 229 | // Exists checks if a file exists in S3 230 | func (o *Operations) Exists(ctx context.Context, req *ExistsRequest, resp *ExistsResponse) error { 231 | o.plugin.TrackOperation() 232 | defer o.plugin.CompleteOperation() 233 | 234 | // Validate request 235 | if err := o.validatePathname(req.Pathname); err != nil { 236 | o.plugin.metrics.RecordOperation(req.Bucket, "exists", "error") 237 | o.plugin.metrics.RecordError(req.Bucket, ErrInvalidPathname) 238 | return err 239 | } 240 | 241 | // Get bucket 242 | bucket, err := o.plugin.buckets.GetBucket(req.Bucket) 243 | if err != nil { 244 | o.plugin.metrics.RecordOperation(req.Bucket, "exists", "error") 245 | o.plugin.metrics.RecordError(req.Bucket, ErrBucketNotFound) 246 | return NewBucketNotFoundError(req.Bucket) 247 | } 248 | 249 | bucket.Acquire() 250 | defer bucket.Release() 251 | 252 | // Get full S3 key 253 | key := bucket.GetFullPath(req.Pathname) 254 | 255 | // Check if object exists 256 | _, err = bucket.Client.HeadObject(ctx, &s3.HeadObjectInput{ 257 | Bucket: aws.String(bucket.Config.Bucket), 258 | Key: aws.String(key), 259 | }) 260 | 261 | if err != nil { 262 | var nsk *types.NoSuchKey 263 | var nf *types.NotFound 264 | if errors.As(err, &nsk) || errors.As(err, &nf) { 265 | resp.Exists = false 266 | o.plugin.metrics.RecordOperation(req.Bucket, "exists", "success") 267 | return nil 268 | } 269 | // Other errors should be returned 270 | o.log.Error("failed to check file existence", 271 | zap.String("bucket", req.Bucket), 272 | zap.String("pathname", req.Pathname), 273 | zap.Error(err), 274 | ) 275 | o.plugin.metrics.RecordOperation(req.Bucket, "exists", "error") 276 | o.plugin.metrics.RecordError(req.Bucket, ErrS3Operation) 277 | return NewS3OperationError("head object", err) 278 | } 279 | 280 | resp.Exists = true 281 | o.plugin.metrics.RecordOperation(req.Bucket, "exists", "success") 282 | return nil 283 | } 284 | 285 | // Delete deletes a file from S3 286 | func (o *Operations) Delete(ctx context.Context, req *DeleteRequest, resp *DeleteResponse) error { 287 | o.plugin.TrackOperation() 288 | defer o.plugin.CompleteOperation() 289 | 290 | // Validate request 291 | if err := o.validatePathname(req.Pathname); err != nil { 292 | o.plugin.metrics.RecordOperation(req.Bucket, "delete", "error") 293 | o.plugin.metrics.RecordError(req.Bucket, ErrInvalidPathname) 294 | return err 295 | } 296 | 297 | // Get bucket 298 | bucket, err := o.plugin.buckets.GetBucket(req.Bucket) 299 | if err != nil { 300 | o.plugin.metrics.RecordOperation(req.Bucket, "delete", "error") 301 | o.plugin.metrics.RecordError(req.Bucket, ErrBucketNotFound) 302 | return NewBucketNotFoundError(req.Bucket) 303 | } 304 | 305 | bucket.Acquire() 306 | defer bucket.Release() 307 | 308 | // Get full S3 key 309 | key := bucket.GetFullPath(req.Pathname) 310 | 311 | // Delete object 312 | _, err = bucket.Client.DeleteObject(ctx, &s3.DeleteObjectInput{ 313 | Bucket: aws.String(bucket.Config.Bucket), 314 | Key: aws.String(key), 315 | }) 316 | if err != nil { 317 | o.log.Error("failed to delete file", 318 | zap.String("bucket", req.Bucket), 319 | zap.String("pathname", req.Pathname), 320 | zap.Error(err), 321 | ) 322 | o.plugin.metrics.RecordOperation(req.Bucket, "delete", "error") 323 | o.plugin.metrics.RecordError(req.Bucket, ErrS3Operation) 324 | return NewS3OperationError("delete", err) 325 | } 326 | 327 | resp.Success = true 328 | o.plugin.metrics.RecordOperation(req.Bucket, "delete", "success") 329 | 330 | o.log.Debug("file deleted successfully", 331 | zap.String("bucket", req.Bucket), 332 | zap.String("pathname", req.Pathname), 333 | ) 334 | 335 | return nil 336 | } 337 | 338 | // Copy copies a file within or between buckets 339 | func (o *Operations) Copy(ctx context.Context, req *CopyRequest, resp *CopyResponse) error { 340 | o.plugin.TrackOperation() 341 | defer o.plugin.CompleteOperation() 342 | 343 | start := time.Now() 344 | 345 | // Validate request 346 | if err := o.validatePathname(req.SourcePathname); err != nil { 347 | o.plugin.metrics.RecordOperation(req.SourceBucket, "copy", "error") 348 | o.plugin.metrics.RecordError(req.SourceBucket, ErrInvalidPathname) 349 | return err 350 | } 351 | if err := o.validatePathname(req.DestPathname); err != nil { 352 | o.plugin.metrics.RecordOperation(req.DestBucket, "copy", "error") 353 | o.plugin.metrics.RecordError(req.DestBucket, ErrInvalidPathname) 354 | return err 355 | } 356 | 357 | // Get source bucket 358 | sourceBucket, err := o.plugin.buckets.GetBucket(req.SourceBucket) 359 | if err != nil { 360 | o.plugin.metrics.RecordOperation(req.SourceBucket, "copy", "error") 361 | o.plugin.metrics.RecordError(req.SourceBucket, ErrBucketNotFound) 362 | return NewBucketNotFoundError(req.SourceBucket) 363 | } 364 | 365 | // Get destination bucket 366 | destBucket, err := o.plugin.buckets.GetBucket(req.DestBucket) 367 | if err != nil { 368 | o.plugin.metrics.RecordOperation(req.DestBucket, "copy", "error") 369 | o.plugin.metrics.RecordError(req.DestBucket, ErrBucketNotFound) 370 | return NewBucketNotFoundError(req.DestBucket) 371 | } 372 | 373 | // Acquire semaphores 374 | sourceBucket.Acquire() 375 | defer sourceBucket.Release() 376 | if req.SourceBucket != req.DestBucket { 377 | destBucket.Acquire() 378 | defer destBucket.Release() 379 | } 380 | 381 | // Get full S3 keys 382 | sourceKey := sourceBucket.GetFullPath(req.SourcePathname) 383 | destKey := destBucket.GetFullPath(req.DestPathname) 384 | 385 | // Prepare copy source 386 | copySource := fmt.Sprintf("%s/%s", sourceBucket.Config.Bucket, sourceKey) 387 | 388 | // Determine visibility 389 | visibility := req.Visibility 390 | if visibility == "" { 391 | visibility = destBucket.GetVisibility() 392 | } 393 | 394 | // Copy object 395 | _, err = destBucket.Client.CopyObject(ctx, &s3.CopyObjectInput{ 396 | Bucket: aws.String(destBucket.Config.Bucket), 397 | Key: aws.String(destKey), 398 | CopySource: aws.String(copySource), 399 | ACL: types.ObjectCannedACL(visibility), 400 | }) 401 | if err != nil { 402 | o.log.Error("failed to copy file", 403 | zap.String("source_bucket", req.SourceBucket), 404 | zap.String("source_pathname", req.SourcePathname), 405 | zap.String("dest_bucket", req.DestBucket), 406 | zap.String("dest_pathname", req.DestPathname), 407 | zap.Error(err), 408 | ) 409 | o.plugin.metrics.RecordOperation(req.DestBucket, "copy", "error") 410 | o.plugin.metrics.RecordError(req.DestBucket, ErrS3Operation) 411 | return NewS3OperationError("copy", err) 412 | } 413 | 414 | // Get metadata for response 415 | headResult, err := destBucket.Client.HeadObject(ctx, &s3.HeadObjectInput{ 416 | Bucket: aws.String(destBucket.Config.Bucket), 417 | Key: aws.String(destKey), 418 | }) 419 | if err == nil { 420 | resp.Size = *headResult.ContentLength 421 | resp.LastModified = headResult.LastModified.Unix() 422 | } 423 | 424 | resp.Success = true 425 | resp.Pathname = req.DestPathname 426 | 427 | o.plugin.metrics.RecordOperation(req.DestBucket, "copy", "success") 428 | 429 | o.log.Debug("file copied successfully", 430 | zap.String("source_bucket", req.SourceBucket), 431 | zap.String("source_pathname", req.SourcePathname), 432 | zap.String("dest_bucket", req.DestBucket), 433 | zap.String("dest_pathname", req.DestPathname), 434 | zap.Duration("duration", time.Since(start)), 435 | ) 436 | 437 | return nil 438 | } 439 | 440 | // Move moves a file within or between buckets (copy + delete) 441 | func (o *Operations) Move(ctx context.Context, req *MoveRequest, resp *MoveResponse) error { 442 | // First, copy the file 443 | copyReq := &CopyRequest{ 444 | SourceBucket: req.SourceBucket, 445 | SourcePathname: req.SourcePathname, 446 | DestBucket: req.DestBucket, 447 | DestPathname: req.DestPathname, 448 | Config: req.Config, 449 | Visibility: req.Visibility, 450 | } 451 | copyResp := &CopyResponse{} 452 | 453 | if err := o.Copy(ctx, copyReq, copyResp); err != nil { 454 | return err 455 | } 456 | 457 | // Then delete the source file 458 | deleteReq := &DeleteRequest{ 459 | Bucket: req.SourceBucket, 460 | Pathname: req.SourcePathname, 461 | } 462 | deleteResp := &DeleteResponse{} 463 | 464 | if err := o.Delete(ctx, deleteReq, deleteResp); err != nil { 465 | o.log.Error("failed to delete source file after copy", 466 | zap.String("bucket", req.SourceBucket), 467 | zap.String("pathname", req.SourcePathname), 468 | zap.Error(err), 469 | ) 470 | // Return error but note that copy succeeded 471 | return fmt.Errorf("copy succeeded but delete failed: %w", err) 472 | } 473 | 474 | resp.Success = true 475 | resp.Pathname = copyResp.Pathname 476 | resp.Size = copyResp.Size 477 | resp.LastModified = copyResp.LastModified 478 | 479 | return nil 480 | } 481 | 482 | // GetMetadata retrieves file metadata 483 | func (o *Operations) GetMetadata(ctx context.Context, req *GetMetadataRequest, resp *GetMetadataResponse) error { 484 | o.plugin.TrackOperation() 485 | defer o.plugin.CompleteOperation() 486 | 487 | // Validate request 488 | if err := o.validatePathname(req.Pathname); err != nil { 489 | o.plugin.metrics.RecordOperation(req.Bucket, "get_metadata", "error") 490 | o.plugin.metrics.RecordError(req.Bucket, ErrInvalidPathname) 491 | return err 492 | } 493 | 494 | // Get bucket 495 | bucket, err := o.plugin.buckets.GetBucket(req.Bucket) 496 | if err != nil { 497 | o.plugin.metrics.RecordOperation(req.Bucket, "get_metadata", "error") 498 | o.plugin.metrics.RecordError(req.Bucket, ErrBucketNotFound) 499 | return NewBucketNotFoundError(req.Bucket) 500 | } 501 | 502 | bucket.Acquire() 503 | defer bucket.Release() 504 | 505 | // Get full S3 key 506 | key := bucket.GetFullPath(req.Pathname) 507 | 508 | // Get object metadata 509 | result, err := bucket.Client.HeadObject(ctx, &s3.HeadObjectInput{ 510 | Bucket: aws.String(bucket.Config.Bucket), 511 | Key: aws.String(key), 512 | }) 513 | if err != nil { 514 | var nsk *types.NoSuchKey 515 | var nf *types.NotFound 516 | if errors.As(err, &nsk) || errors.As(err, &nf) { 517 | o.plugin.metrics.RecordOperation(req.Bucket, "get_metadata", "error") 518 | o.plugin.metrics.RecordError(req.Bucket, ErrFileNotFound) 519 | return NewFileNotFoundError(req.Pathname) 520 | } 521 | o.log.Error("failed to get file metadata", 522 | zap.String("bucket", req.Bucket), 523 | zap.String("pathname", req.Pathname), 524 | zap.Error(err), 525 | ) 526 | o.plugin.metrics.RecordOperation(req.Bucket, "get_metadata", "error") 527 | o.plugin.metrics.RecordError(req.Bucket, ErrS3Operation) 528 | return NewS3OperationError("head object", err) 529 | } 530 | 531 | resp.Size = *result.ContentLength 532 | if result.ContentType != nil { 533 | resp.MimeType = *result.ContentType 534 | } 535 | resp.LastModified = result.LastModified.Unix() 536 | if result.ETag != nil { 537 | resp.ETag = *result.ETag 538 | } 539 | 540 | // Determine visibility from ACL (if available) 541 | resp.Visibility = "private" // Default 542 | 543 | o.plugin.metrics.RecordOperation(req.Bucket, "get_metadata", "success") 544 | 545 | return nil 546 | } 547 | 548 | // SetVisibility changes file visibility (ACL) 549 | func (o *Operations) SetVisibility(ctx context.Context, req *SetVisibilityRequest, resp *SetVisibilityResponse) error { 550 | o.plugin.TrackOperation() 551 | defer o.plugin.CompleteOperation() 552 | 553 | // Validate request 554 | if err := o.validatePathname(req.Pathname); err != nil { 555 | o.plugin.metrics.RecordOperation(req.Bucket, "set_visibility", "error") 556 | o.plugin.metrics.RecordError(req.Bucket, ErrInvalidPathname) 557 | return err 558 | } 559 | 560 | if req.Visibility != "public" && req.Visibility != "private" { 561 | o.plugin.metrics.RecordOperation(req.Bucket, "set_visibility", "error") 562 | o.plugin.metrics.RecordError(req.Bucket, ErrInvalidVisibility) 563 | return NewS3Error(ErrInvalidVisibility, "visibility must be 'public' or 'private'", req.Visibility) 564 | } 565 | 566 | // Get bucket 567 | bucket, err := o.plugin.buckets.GetBucket(req.Bucket) 568 | if err != nil { 569 | o.plugin.metrics.RecordOperation(req.Bucket, "set_visibility", "error") 570 | o.plugin.metrics.RecordError(req.Bucket, ErrBucketNotFound) 571 | return NewBucketNotFoundError(req.Bucket) 572 | } 573 | 574 | bucket.Acquire() 575 | defer bucket.Release() 576 | 577 | // Get full S3 key 578 | key := bucket.GetFullPath(req.Pathname) 579 | 580 | // Map visibility to ACL 581 | acl := types.ObjectCannedACLPrivate 582 | if req.Visibility == "public" { 583 | acl = types.ObjectCannedACLPublicRead 584 | } 585 | 586 | // Set ACL 587 | _, err = bucket.Client.PutObjectAcl(ctx, &s3.PutObjectAclInput{ 588 | Bucket: aws.String(bucket.Config.Bucket), 589 | Key: aws.String(key), 590 | ACL: acl, 591 | }) 592 | if err != nil { 593 | o.log.Error("failed to set file visibility", 594 | zap.String("bucket", req.Bucket), 595 | zap.String("pathname", req.Pathname), 596 | zap.String("visibility", req.Visibility), 597 | zap.Error(err), 598 | ) 599 | o.plugin.metrics.RecordOperation(req.Bucket, "set_visibility", "error") 600 | o.plugin.metrics.RecordError(req.Bucket, ErrS3Operation) 601 | return NewS3OperationError("put object acl", err) 602 | } 603 | 604 | resp.Success = true 605 | 606 | o.plugin.metrics.RecordOperation(req.Bucket, "set_visibility", "success") 607 | 608 | o.log.Debug("file visibility changed", 609 | zap.String("bucket", req.Bucket), 610 | zap.String("pathname", req.Pathname), 611 | zap.String("visibility", req.Visibility), 612 | ) 613 | 614 | return nil 615 | } 616 | 617 | // GetPublicURL generates a public or presigned URL for a file 618 | func (o *Operations) GetPublicURL(ctx context.Context, req *GetPublicURLRequest, resp *GetPublicURLResponse) error { 619 | o.plugin.TrackOperation() 620 | defer o.plugin.CompleteOperation() 621 | 622 | // Validate request 623 | if err := o.validatePathname(req.Pathname); err != nil { 624 | o.plugin.metrics.RecordOperation(req.Bucket, "get_url", "error") 625 | o.plugin.metrics.RecordError(req.Bucket, ErrInvalidPathname) 626 | return err 627 | } 628 | 629 | // Get bucket 630 | bucket, err := o.plugin.buckets.GetBucket(req.Bucket) 631 | if err != nil { 632 | o.plugin.metrics.RecordOperation(req.Bucket, "get_url", "error") 633 | o.plugin.metrics.RecordError(req.Bucket, ErrBucketNotFound) 634 | return NewBucketNotFoundError(req.Bucket) 635 | } 636 | 637 | // Get full S3 key 638 | key := bucket.GetFullPath(req.Pathname) 639 | 640 | // If no expiration, generate permanent public URL 641 | if req.ExpiresIn == 0 { 642 | // Generate public URL (assuming public-read ACL) 643 | endpoint := bucket.ServerConfig.Endpoint 644 | if endpoint == "" { 645 | endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", bucket.ServerConfig.Region) 646 | } 647 | resp.URL = fmt.Sprintf("%s/%s/%s", endpoint, bucket.Config.Bucket, key) 648 | o.plugin.metrics.RecordOperation(req.Bucket, "get_url", "success") 649 | return nil 650 | } 651 | 652 | // Generate presigned URL 653 | presignClient := s3.NewPresignClient(bucket.Client) 654 | presignResult, err := presignClient.PresignGetObject(ctx, &s3.GetObjectInput{ 655 | Bucket: aws.String(bucket.Config.Bucket), 656 | Key: aws.String(key), 657 | }, func(opts *s3.PresignOptions) { 658 | opts.Expires = time.Duration(req.ExpiresIn) * time.Second 659 | }) 660 | if err != nil { 661 | o.log.Error("failed to generate presigned URL", 662 | zap.String("bucket", req.Bucket), 663 | zap.String("pathname", req.Pathname), 664 | zap.Error(err), 665 | ) 666 | o.plugin.metrics.RecordOperation(req.Bucket, "get_url", "error") 667 | o.plugin.metrics.RecordError(req.Bucket, ErrS3Operation) 668 | return NewS3OperationError("presign get object", err) 669 | } 670 | 671 | resp.URL = presignResult.URL 672 | resp.ExpiresAt = time.Now().Add(time.Duration(req.ExpiresIn) * time.Second).Unix() 673 | 674 | o.plugin.metrics.RecordOperation(req.Bucket, "get_url", "success") 675 | 676 | return nil 677 | } 678 | 679 | // ListObjects lists objects in a bucket with optional filtering and pagination 680 | func (o *Operations) ListObjects(ctx context.Context, req *ListObjectsRequest, resp *ListObjectsResponse) error { 681 | o.plugin.TrackOperation() 682 | defer o.plugin.CompleteOperation() 683 | 684 | start := time.Now() 685 | 686 | // Get bucket 687 | bucket, err := o.plugin.buckets.GetBucket(req.Bucket) 688 | if err != nil { 689 | o.plugin.metrics.RecordOperation(req.Bucket, "list", "error") 690 | o.plugin.metrics.RecordError(req.Bucket, ErrBucketNotFound) 691 | return NewBucketNotFoundError(req.Bucket) 692 | } 693 | 694 | bucket.Acquire() 695 | defer bucket.Release() 696 | 697 | // Set default max keys if not specified 698 | maxKeys := req.MaxKeys 699 | if maxKeys <= 0 { 700 | maxKeys = 1000 701 | } 702 | 703 | // Prepare prefix - include bucket prefix if configured 704 | prefix := bucket.GetFullPath(req.Prefix) 705 | 706 | // Prepare list objects input 707 | input := &s3.ListObjectsV2Input{ 708 | Bucket: aws.String(bucket.Config.Bucket), 709 | MaxKeys: aws.Int32(maxKeys), 710 | } 711 | 712 | // Add optional parameters 713 | if prefix != "" { 714 | input.Prefix = aws.String(prefix) 715 | } 716 | 717 | if req.Delimiter != "" { 718 | input.Delimiter = aws.String(req.Delimiter) 719 | } 720 | 721 | if req.ContinuationToken != "" { 722 | input.ContinuationToken = aws.String(req.ContinuationToken) 723 | } 724 | 725 | // List objects 726 | result, err := bucket.Client.ListObjectsV2(ctx, input) 727 | if err != nil { 728 | o.log.Error("failed to list objects", 729 | zap.String("bucket", req.Bucket), 730 | zap.String("prefix", req.Prefix), 731 | zap.Error(err), 732 | ) 733 | o.plugin.metrics.RecordOperation(req.Bucket, "list", "error") 734 | o.plugin.metrics.RecordError(req.Bucket, ErrS3Operation) 735 | return NewS3OperationError("list objects", err) 736 | } 737 | 738 | // Convert results to response format 739 | resp.Objects = make([]ObjectInfo, 0, len(result.Contents)) 740 | for _, obj := range result.Contents { 741 | // Remove bucket prefix from key if present 742 | key := *obj.Key 743 | if bucket.Config.Prefix != "" && strings.HasPrefix(key, bucket.Config.Prefix) { 744 | key = strings.TrimPrefix(key, bucket.Config.Prefix) 745 | } 746 | 747 | objectInfo := ObjectInfo{ 748 | Key: key, 749 | Size: *obj.Size, 750 | LastModified: obj.LastModified.Unix(), 751 | } 752 | 753 | if obj.ETag != nil { 754 | objectInfo.ETag = *obj.ETag 755 | } 756 | 757 | if obj.StorageClass != "" { 758 | objectInfo.StorageClass = string(obj.StorageClass) 759 | } 760 | 761 | resp.Objects = append(resp.Objects, objectInfo) 762 | } 763 | 764 | // Process common prefixes (directories) 765 | if len(result.CommonPrefixes) > 0 { 766 | resp.CommonPrefixes = make([]CommonPrefix, 0, len(result.CommonPrefixes)) 767 | for _, cp := range result.CommonPrefixes { 768 | prefix := *cp.Prefix 769 | // Remove bucket prefix if present 770 | if bucket.Config.Prefix != "" && strings.HasPrefix(prefix, bucket.Config.Prefix) { 771 | prefix = strings.TrimPrefix(prefix, bucket.Config.Prefix) 772 | } 773 | 774 | resp.CommonPrefixes = append(resp.CommonPrefixes, CommonPrefix{ 775 | Prefix: prefix, 776 | }) 777 | } 778 | } 779 | 780 | // Set pagination info 781 | resp.IsTruncated = result.IsTruncated != nil && *result.IsTruncated 782 | if result.NextContinuationToken != nil { 783 | resp.NextContinuationToken = *result.NextContinuationToken 784 | } 785 | resp.KeyCount = *result.KeyCount 786 | 787 | o.plugin.metrics.RecordOperation(req.Bucket, "list", "success") 788 | 789 | o.log.Debug("objects listed successfully", 790 | zap.String("bucket", req.Bucket), 791 | zap.String("prefix", req.Prefix), 792 | zap.Int32("count", resp.KeyCount), 793 | zap.Bool("truncated", resp.IsTruncated), 794 | zap.Duration("duration", time.Since(start)), 795 | ) 796 | 797 | return nil 798 | } 799 | 800 | // validatePathname validates a file pathname 801 | func (o *Operations) validatePathname(pathname string) error { 802 | if pathname == "" { 803 | return NewInvalidPathnameError(pathname, "pathname cannot be empty") 804 | } 805 | 806 | if strings.HasPrefix(pathname, "/") { 807 | return NewInvalidPathnameError(pathname, "pathname cannot start with '/'") 808 | } 809 | 810 | if strings.Contains(pathname, "..") { 811 | return NewInvalidPathnameError(pathname, "pathname cannot contain '..'") 812 | } 813 | 814 | return nil 815 | } 816 | 817 | // detectContentType attempts to detect content type from filename and content 818 | func (o *Operations) detectContentType(pathname string, content []byte) string { 819 | // Simple content type detection based on file extension 820 | ext := strings.ToLower(pathname[strings.LastIndex(pathname, ".")+1:]) 821 | 822 | contentTypes := map[string]string{ 823 | "jpg": "image/jpeg", 824 | "jpeg": "image/jpeg", 825 | "png": "image/png", 826 | "gif": "image/gif", 827 | "webp": "image/webp", 828 | "svg": "image/svg+xml", 829 | "pdf": "application/pdf", 830 | "txt": "text/plain", 831 | "html": "text/html", 832 | "css": "text/css", 833 | "js": "application/javascript", 834 | "json": "application/json", 835 | "xml": "application/xml", 836 | "zip": "application/zip", 837 | "mp4": "video/mp4", 838 | "mp3": "audio/mpeg", 839 | } 840 | 841 | if contentType, ok := contentTypes[ext]; ok { 842 | return contentType 843 | } 844 | 845 | return "application/octet-stream" 846 | } 847 | -------------------------------------------------------------------------------- /metrics.md: -------------------------------------------------------------------------------- 1 | # S3 Plugin Metrics - Complete Grafana Guide 2 | 3 | This guide provides ready-to-use PromQL queries for S3 Storage Plugin metrics, including panel configuration details (legend, min step, units). 4 | 5 | --- 6 | 7 | ## Overview 8 | 9 | The S3 plugin exposes two primary metrics to track file operations and errors: 10 | 11 | - `rr_s3_operations_total` - Counter tracking all S3 operations by type, bucket, and status 12 | - `rr_s3_errors_total` - Counter tracking errors by bucket and error type 13 | 14 | --- 15 | 16 | ## 1. Operation Metrics 17 | 18 | ### 1.1 Total Operations Per Second 19 | 20 | **Query:** 21 | 22 | ```promql 23 | sum(rate(rr_s3_operations_total[5m])) 24 | ``` 25 | 26 | **Configuration:** 27 | 28 | - **Legend:** `Total OPS` 29 | - **Min Step:** `15s` 30 | - **Unit:** `ops (operations/sec)` 31 | - **Panel Type:** Graph 32 | - **Description:** Overall S3 operation rate across all buckets 33 | 34 | --- 35 | 36 | ### 1.2 Operations Per Second by Bucket 37 | 38 | **Query:** 39 | 40 | ```promql 41 | sum by (bucket) (rate(rr_s3_operations_total[5m])) 42 | ``` 43 | 44 | **Configuration:** 45 | 46 | - **Legend:** `{{bucket}}` 47 | - **Min Step:** `15s` 48 | - **Unit:** `ops (operations/sec)` 49 | - **Panel Type:** Graph or Bar gauge 50 | - **Description:** Operation rate grouped by bucket 51 | 52 | --- 53 | 54 | ### 1.3 Operations Per Second by Type 55 | 56 | **Query:** 57 | 58 | ```promql 59 | sum by (operation) (rate(rr_s3_operations_total[5m])) 60 | ``` 61 | 62 | **Configuration:** 63 | 64 | - **Legend:** `{{operation}}` 65 | - **Min Step:** `15s` 66 | - **Unit:** `ops (operations/sec)` 67 | - **Panel Type:** Graph (stacked area) or Pie chart 68 | - **Description:** Operation distribution by type (write, read, delete, copy, move, list, exists, get_metadata, set_visibility, get_url) 69 | 70 | --- 71 | 72 | ### 1.4 Operations Per Second by Status 73 | 74 | **Query:** 75 | 76 | ```promql 77 | sum by (status) (rate(rr_s3_operations_total[5m])) 78 | ``` 79 | 80 | **Configuration:** 81 | 82 | - **Legend:** `{{status}}` 83 | - **Min Step:** `15s` 84 | - **Unit:** `ops (operations/sec)` 85 | - **Panel Type:** Graph (stacked area) 86 | - **Description:** Operation rate grouped by status (success, error) 87 | 88 | --- 89 | 90 | ### 1.5 Success Rate Percentage 91 | 92 | **Query:** 93 | 94 | ```promql 95 | sum(rate(rr_s3_operations_total{status="success"}[5m])) / sum(rate(rr_s3_operations_total[5m])) * 100 96 | ``` 97 | 98 | **Configuration:** 99 | 100 | - **Legend:** `Success Rate` 101 | - **Min Step:** `15s` 102 | - **Unit:** `percent (0-100)` 103 | - **Panel Type:** Gauge or Graph 104 | - **Thresholds:** Red < 95%, Yellow 95-99%, Green > 99% 105 | - **Description:** Percentage of successful S3 operations 106 | 107 | --- 108 | 109 | ### 1.6 Total Operations Count 110 | 111 | **Query:** 112 | 113 | ```promql 114 | sum(rr_s3_operations_total) 115 | ``` 116 | 117 | **Configuration:** 118 | 119 | - **Legend:** `Total Operations` 120 | - **Min Step:** `1m` 121 | - **Unit:** `short` 122 | - **Panel Type:** Stat 123 | - **Description:** Cumulative count of all S3 operations since start 124 | 125 | --- 126 | 127 | ## 2. Bucket Analysis 128 | 129 | ### 2.1 Most Active Buckets (by Total Operations) 130 | 131 | **Query:** 132 | 133 | ```promql 134 | topk(10, sum by (bucket) (rate(rr_s3_operations_total[5m]))) 135 | ``` 136 | 137 | **Configuration:** 138 | 139 | - **Legend:** `{{bucket}}` 140 | - **Min Step:** `15s` 141 | - **Unit:** `ops (operations/sec)` 142 | - **Panel Type:** Bar gauge (horizontal) or Table 143 | - **Description:** Top 10 buckets by operation rate 144 | 145 | --- 146 | 147 | ### 2.2 Most Active Buckets (by Write Operations) 148 | 149 | **Query:** 150 | 151 | ```promql 152 | topk(10, sum by (bucket) (rate(rr_s3_operations_total{operation="write"}[5m]))) 153 | ``` 154 | 155 | **Configuration:** 156 | 157 | - **Legend:** `{{bucket}}` 158 | - **Min Step:** `15s` 159 | - **Unit:** `ops (operations/sec)` 160 | - **Panel Type:** Bar gauge or Table 161 | - **Description:** Top 10 buckets by write operation rate 162 | 163 | --- 164 | 165 | ### 2.3 Most Active Buckets (by Read Operations) 166 | 167 | **Query:** 168 | 169 | ```promql 170 | topk(10, sum by (bucket) (rate(rr_s3_operations_total{operation="read"}[5m]))) 171 | ``` 172 | 173 | **Configuration:** 174 | 175 | - **Legend:** `{{bucket}}` 176 | - **Min Step:** `15s` 177 | - **Unit:** `ops (operations/sec)` 178 | - **Panel Type:** Bar gauge or Table 179 | - **Description:** Top 10 buckets by read operation rate 180 | 181 | --- 182 | 183 | ### 2.4 Bucket Performance Table 184 | 185 | **Query 1 (Total OPS):** 186 | 187 | ```promql 188 | sum by (bucket) (rate(rr_s3_operations_total[5m])) 189 | ``` 190 | 191 | **Query 2 (Write OPS):** 192 | 193 | ```promql 194 | sum by (bucket) (rate(rr_s3_operations_total{operation="write"}[5m])) 195 | ``` 196 | 197 | **Query 3 (Read OPS):** 198 | 199 | ```promql 200 | sum by (bucket) (rate(rr_s3_operations_total{operation="read"}[5m])) 201 | ``` 202 | 203 | **Query 4 (Error %):** 204 | 205 | ```promql 206 | sum by (bucket) (rate(rr_s3_operations_total{status="error"}[5m])) / sum by (bucket) (rate(rr_s3_operations_total[5m])) * 100 207 | ``` 208 | 209 | **Configuration:** 210 | 211 | - **Legend:** N/A (Table columns) 212 | - **Min Step:** `15s` 213 | - **Unit:** 214 | - Query 1-3: `ops (operations/sec)` 215 | - Query 4: `percent (0-100)` 216 | - **Panel Type:** Table 217 | - **Column Names:** `Bucket`, `Total OPS`, `Write OPS`, `Read OPS`, `Error Rate %` 218 | - **Description:** Comprehensive bucket performance overview 219 | 220 | --- 221 | 222 | ### 2.5 Read/Write Ratio by Bucket 223 | 224 | **Query:** 225 | 226 | ```promql 227 | sum by (bucket) (rate(rr_s3_operations_total{operation="read"}[5m])) / sum by (bucket) (rate(rr_s3_operations_total{operation="write"}[5m])) 228 | ``` 229 | 230 | **Configuration:** 231 | 232 | - **Legend:** `{{bucket}}` 233 | - **Min Step:** `15s` 234 | - **Unit:** `short` (ratio) 235 | - **Panel Type:** Graph or Table 236 | - **Description:** Read-to-write ratio per bucket (higher = more reads than writes) 237 | 238 | --- 239 | 240 | ## 3. Operation Type Analysis 241 | 242 | ### 3.1 Write Operations Rate 243 | 244 | **Query:** 245 | 246 | ```promql 247 | sum(rate(rr_s3_operations_total{operation="write"}[5m])) 248 | ``` 249 | 250 | **Configuration:** 251 | 252 | - **Legend:** `Writes/sec` 253 | - **Min Step:** `15s` 254 | - **Unit:** `ops (operations/sec)` 255 | - **Panel Type:** Graph 256 | - **Description:** Total file upload rate 257 | 258 | --- 259 | 260 | ### 3.2 Read Operations Rate 261 | 262 | **Query:** 263 | 264 | ```promql 265 | sum(rate(rr_s3_operations_total{operation="read"}[5m])) 266 | ``` 267 | 268 | **Configuration:** 269 | 270 | - **Legend:** `Reads/sec` 271 | - **Min Step:** `15s` 272 | - **Unit:** `ops (operations/sec)` 273 | - **Panel Type:** Graph 274 | - **Description:** Total file download rate 275 | 276 | --- 277 | 278 | ### 3.3 Delete Operations Rate 279 | 280 | **Query:** 281 | 282 | ```promql 283 | sum(rate(rr_s3_operations_total{operation="delete"}[5m])) 284 | ``` 285 | 286 | **Configuration:** 287 | 288 | - **Legend:** `Deletes/sec` 289 | - **Min Step:** `15s` 290 | - **Unit:** `ops (operations/sec)` 291 | - **Panel Type:** Graph 292 | - **Description:** Total file deletion rate 293 | 294 | --- 295 | 296 | ### 3.4 List Operations Rate 297 | 298 | **Query:** 299 | 300 | ```promql 301 | sum(rate(rr_s3_operations_total{operation="list"}[5m])) 302 | ``` 303 | 304 | **Configuration:** 305 | 306 | - **Legend:** `Lists/sec` 307 | - **Min Step:** `15s` 308 | - **Unit:** `ops (operations/sec)` 309 | - **Panel Type:** Graph 310 | - **Description:** Total object listing rate 311 | 312 | --- 313 | 314 | ### 3.5 Copy Operations Rate 315 | 316 | **Query:** 317 | 318 | ```promql 319 | sum(rate(rr_s3_operations_total{operation="copy"}[5m])) 320 | ``` 321 | 322 | **Configuration:** 323 | 324 | - **Legend:** `Copies/sec` 325 | - **Min Step:** `15s` 326 | - **Unit:** `ops (operations/sec)` 327 | - **Panel Type:** Graph 328 | - **Description:** Total file copy rate 329 | 330 | --- 331 | 332 | ### 3.6 Move Operations Rate 333 | 334 | **Query:** 335 | 336 | ```promql 337 | sum(rate(rr_s3_operations_total{operation="move"}[5m])) 338 | ``` 339 | 340 | **Configuration:** 341 | 342 | - **Legend:** `Moves/sec` 343 | - **Min Step:** `15s` 344 | - **Unit:** `ops (operations/sec)` 345 | - **Panel Type:** Graph 346 | - **Description:** Total file move rate 347 | 348 | --- 349 | 350 | ### 3.7 Exists Check Rate 351 | 352 | **Query:** 353 | 354 | ```promql 355 | sum(rate(rr_s3_operations_total{operation="exists"}[5m])) 356 | ``` 357 | 358 | **Configuration:** 359 | 360 | - **Legend:** `Exists checks/sec` 361 | - **Min Step:** `15s` 362 | - **Unit:** `ops (operations/sec)` 363 | - **Panel Type:** Graph 364 | - **Description:** Total file existence check rate 365 | 366 | --- 367 | 368 | ### 3.8 Metadata Operations Rate 369 | 370 | **Query:** 371 | 372 | ```promql 373 | sum(rate(rr_s3_operations_total{operation="get_metadata"}[5m])) 374 | ``` 375 | 376 | **Configuration:** 377 | 378 | - **Legend:** `Metadata ops/sec` 379 | - **Min Step:** `15s` 380 | - **Unit:** `ops (operations/sec)` 381 | - **Panel Type:** Graph 382 | - **Description:** Total metadata retrieval rate 383 | 384 | --- 385 | 386 | ### 3.9 Visibility Change Rate 387 | 388 | **Query:** 389 | 390 | ```promql 391 | sum(rate(rr_s3_operations_total{operation="set_visibility"}[5m])) 392 | ``` 393 | 394 | **Configuration:** 395 | 396 | - **Legend:** `Visibility changes/sec` 397 | - **Min Step:** `15s` 398 | - **Unit:** `ops (operations/sec)` 399 | - **Panel Type:** Graph 400 | - **Description:** Total ACL change rate 401 | 402 | --- 403 | 404 | ### 3.10 URL Generation Rate 405 | 406 | **Query:** 407 | 408 | ```promql 409 | sum(rate(rr_s3_operations_total{operation="get_url"}[5m])) 410 | ``` 411 | 412 | **Configuration:** 413 | 414 | - **Legend:** `URL gens/sec` 415 | - **Min Step:** `15s` 416 | - **Unit:** `ops (operations/sec)` 417 | - **Panel Type:** Graph 418 | - **Description:** Total URL generation rate (public/presigned) 419 | 420 | --- 421 | 422 | ### 3.11 Operation Distribution (Pie Chart) 423 | 424 | **Query:** 425 | 426 | ```promql 427 | sum by (operation) (rate(rr_s3_operations_total[5m])) / sum(rate(rr_s3_operations_total[5m])) * 100 428 | ``` 429 | 430 | **Configuration:** 431 | 432 | - **Legend:** `{{operation}}` 433 | - **Min Step:** `15s` 434 | - **Unit:** `percent (0-100)` 435 | - **Panel Type:** Pie chart 436 | - **Description:** Percentage breakdown of operations by type 437 | 438 | --- 439 | 440 | ### 3.12 Write vs Read Operations (Stacked) 441 | 442 | **Query 1 (Writes):** 443 | 444 | ```promql 445 | sum(rate(rr_s3_operations_total{operation="write"}[5m])) 446 | ``` 447 | 448 | **Query 2 (Reads):** 449 | 450 | ```promql 451 | sum(rate(rr_s3_operations_total{operation="read"}[5m])) 452 | ``` 453 | 454 | **Configuration:** 455 | 456 | - **Legend:** 457 | - Query 1: `Writes` 458 | - Query 2: `Reads` 459 | - **Min Step:** `15s` 460 | - **Unit:** `ops (operations/sec)` 461 | - **Panel Type:** Graph (stacked area) 462 | - **Description:** Visual comparison of write vs read operations 463 | 464 | --- 465 | 466 | ## 4. Error Tracking 467 | 468 | ### 4.1 Total Error Rate 469 | 470 | **Query:** 471 | 472 | ```promql 473 | sum(rate(rr_s3_errors_total[5m])) 474 | ``` 475 | 476 | **Configuration:** 477 | 478 | - **Legend:** `Errors/sec` 479 | - **Min Step:** `15s` 480 | - **Unit:** `errors/sec` 481 | - **Panel Type:** Graph 482 | - **Description:** Total S3 errors per second (all types) 483 | 484 | --- 485 | 486 | ### 4.2 Error Rate Percentage 487 | 488 | **Query:** 489 | 490 | ```promql 491 | sum(rate(rr_s3_operations_total{status="error"}[5m])) / sum(rate(rr_s3_operations_total[5m])) * 100 492 | ``` 493 | 494 | **Configuration:** 495 | 496 | - **Legend:** `Error Rate` 497 | - **Min Step:** `15s` 498 | - **Unit:** `percent (0-100)` 499 | - **Panel Type:** Gauge or Graph 500 | - **Thresholds:** Green < 1%, Yellow 1-5%, Red > 5% 501 | - **Description:** Percentage of operations that result in errors 502 | 503 | --- 504 | 505 | ### 4.3 Error Rate by Type 506 | 507 | **Query:** 508 | 509 | ```promql 510 | sum by (error_type) (rate(rr_s3_errors_total[5m])) 511 | ``` 512 | 513 | **Configuration:** 514 | 515 | - **Legend:** `{{error_type}}` 516 | - **Min Step:** `15s` 517 | - **Unit:** `errors/sec` 518 | - **Panel Type:** Graph (stacked) or Pie chart 519 | - **Description:** Errors grouped by classification (BUCKET_NOT_FOUND, FILE_NOT_FOUND, S3_OPERATION_FAILED, etc.) 520 | 521 | --- 522 | 523 | ### 4.4 Error Rate by Bucket 524 | 525 | **Query:** 526 | 527 | ```promql 528 | sum by (bucket) (rate(rr_s3_errors_total[5m])) 529 | ``` 530 | 531 | **Configuration:** 532 | 533 | - **Legend:** `{{bucket}}` 534 | - **Min Step:** `15s` 535 | - **Unit:** `errors/sec` 536 | - **Panel Type:** Graph or Table 537 | - **Description:** Errors grouped by bucket 538 | 539 | --- 540 | 541 | ### 4.5 Most Error-Prone Buckets (by Count) 542 | 543 | **Query:** 544 | 545 | ```promql 546 | topk(10, sum by (bucket) (rate(rr_s3_errors_total[5m]))) 547 | ``` 548 | 549 | **Configuration:** 550 | 551 | - **Legend:** `{{bucket}}` 552 | - **Min Step:** `15s` 553 | - **Unit:** `errors/sec` 554 | - **Panel Type:** Bar gauge or Table 555 | - **Description:** Buckets with highest error rate 556 | 557 | --- 558 | 559 | ### 4.6 Most Error-Prone Buckets (by Percentage) 560 | 561 | **Query:** 562 | 563 | ```promql 564 | topk(10, sum by (bucket) (rate(rr_s3_operations_total{status="error"}[5m])) / sum by (bucket) (rate(rr_s3_operations_total[5m])) * 100) 565 | ``` 566 | 567 | **Configuration:** 568 | 569 | - **Legend:** `{{bucket}}` 570 | - **Min Step:** `15s` 571 | - **Unit:** `percent (0-100)` 572 | - **Panel Type:** Bar gauge or Table 573 | - **Description:** Buckets with highest error percentage 574 | 575 | --- 576 | 577 | ### 4.7 Bucket Not Found Errors 578 | 579 | **Query:** 580 | 581 | ```promql 582 | sum(rate(rr_s3_errors_total{error_type="BUCKET_NOT_FOUND"}[5m])) 583 | ``` 584 | 585 | **Configuration:** 586 | 587 | - **Legend:** `Bucket Not Found` 588 | - **Min Step:** `15s` 589 | - **Unit:** `errors/sec` 590 | - **Panel Type:** Graph 591 | - **Description:** Rate of bucket not found errors 592 | 593 | --- 594 | 595 | ### 4.8 File Not Found Errors 596 | 597 | **Query:** 598 | 599 | ```promql 600 | sum(rate(rr_s3_errors_total{error_type="FILE_NOT_FOUND"}[5m])) 601 | ``` 602 | 603 | **Configuration:** 604 | 605 | - **Legend:** `File Not Found` 606 | - **Min Step:** `15s` 607 | - **Unit:** `errors/sec` 608 | - **Panel Type:** Graph 609 | - **Description:** Rate of file not found errors 610 | 611 | --- 612 | 613 | ### 4.9 S3 Operation Failed Errors 614 | 615 | **Query:** 616 | 617 | ```promql 618 | sum(rate(rr_s3_errors_total{error_type="S3_OPERATION_FAILED"}[5m])) 619 | ``` 620 | 621 | **Configuration:** 622 | 623 | - **Legend:** `S3 Operation Failed` 624 | - **Min Step:** `15s` 625 | - **Unit:** `errors/sec` 626 | - **Panel Type:** Graph 627 | - **Description:** Rate of S3 SDK operation failures 628 | 629 | --- 630 | 631 | ### 4.10 Permission Denied Errors 632 | 633 | **Query:** 634 | 635 | ```promql 636 | sum(rate(rr_s3_errors_total{error_type="PERMISSION_DENIED"}[5m])) 637 | ``` 638 | 639 | **Configuration:** 640 | 641 | - **Legend:** `Permission Denied` 642 | - **Min Step:** `15s` 643 | - **Unit:** `errors/sec` 644 | - **Panel Type:** Graph 645 | - **Description:** Rate of permission/access denied errors 646 | 647 | --- 648 | 649 | ### 4.11 Invalid Pathname Errors 650 | 651 | **Query:** 652 | 653 | ```promql 654 | sum(rate(rr_s3_errors_total{error_type="INVALID_PATHNAME"}[5m])) 655 | ``` 656 | 657 | **Configuration:** 658 | 659 | - **Legend:** `Invalid Pathname` 660 | - **Min Step:** `15s` 661 | - **Unit:** `errors/sec` 662 | - **Panel Type:** Graph 663 | - **Description:** Rate of invalid pathname errors 664 | 665 | --- 666 | 667 | ### 4.12 Operation Timeout Errors 668 | 669 | **Query:** 670 | 671 | ```promql 672 | sum(rate(rr_s3_errors_total{error_type="OPERATION_TIMEOUT"}[5m])) 673 | ``` 674 | 675 | **Configuration:** 676 | 677 | - **Legend:** `Timeouts` 678 | - **Min Step:** `15s` 679 | - **Unit:** `errors/sec` 680 | - **Panel Type:** Graph 681 | - **Thresholds:** Any value > 0 requires investigation 682 | - **Description:** Rate of operation timeout errors 683 | 684 | --- 685 | 686 | ### 4.13 Error Distribution (Pie Chart) 687 | 688 | **Query:** 689 | 690 | ```promql 691 | sum by (error_type) (rate(rr_s3_errors_total[5m])) / sum(rate(rr_s3_errors_total[5m])) * 100 692 | ``` 693 | 694 | **Configuration:** 695 | 696 | - **Legend:** `{{error_type}}` 697 | - **Min Step:** `15s` 698 | - **Unit:** `percent (0-100)` 699 | - **Panel Type:** Pie chart 700 | - **Description:** Percentage breakdown of errors by type 701 | 702 | --- 703 | 704 | ### 4.14 Error Heatmap (Bucket vs Error Type) 705 | 706 | **Query:** 707 | 708 | ```promql 709 | sum by (bucket, error_type) (rate(rr_s3_errors_total[5m])) 710 | ``` 711 | 712 | **Configuration:** 713 | 714 | - **Legend:** N/A (heatmap) 715 | - **Min Step:** `30s` 716 | - **Unit:** `errors/sec` 717 | - **Panel Type:** Heatmap 718 | - **Description:** Visual correlation between buckets and error types 719 | 720 | --- 721 | 722 | ## 5. Combined Operation & Error Analysis 723 | 724 | ### 5.1 Operations and Errors (Dual Axis) 725 | 726 | **Query 1 (Operations - Left Axis):** 727 | 728 | ```promql 729 | sum(rate(rr_s3_operations_total[5m])) 730 | ``` 731 | 732 | **Query 2 (Errors - Right Axis):** 733 | 734 | ```promql 735 | sum(rate(rr_s3_errors_total[5m])) 736 | ``` 737 | 738 | **Configuration:** 739 | 740 | - **Legend:** 741 | - Query 1: `Operations/sec` 742 | - Query 2: `Errors/sec` 743 | - **Min Step:** `15s` 744 | - **Unit:** 745 | - Left Axis: `ops (operations/sec)` 746 | - Right Axis: `errors/sec` 747 | - **Panel Type:** Graph (dual Y-axis) 748 | - **Description:** Correlation between operation rate and error rate 749 | 750 | --- 751 | 752 | ### 5.2 Success vs Error Rate (Stacked) 753 | 754 | **Query 1 (Success):** 755 | 756 | ```promql 757 | sum(rate(rr_s3_operations_total{status="success"}[5m])) 758 | ``` 759 | 760 | **Query 2 (Error):** 761 | 762 | ```promql 763 | sum(rate(rr_s3_operations_total{status="error"}[5m])) 764 | ``` 765 | 766 | **Configuration:** 767 | 768 | - **Legend:** 769 | - Query 1: `Success` 770 | - Query 2: `Error` 771 | - **Min Step:** `15s` 772 | - **Unit:** `ops (operations/sec)` 773 | - **Panel Type:** Graph (stacked area) 774 | - **Description:** Visual comparison of successful vs failed operations 775 | 776 | --- 777 | 778 | ### 5.3 Write Operation Success Rate 779 | 780 | **Query:** 781 | 782 | ```promql 783 | sum(rate(rr_s3_operations_total{operation="write",status="success"}[5m])) / sum(rate(rr_s3_operations_total{operation="write"}[5m])) * 100 784 | ``` 785 | 786 | **Configuration:** 787 | 788 | - **Legend:** `Write Success Rate` 789 | - **Min Step:** `15s` 790 | - **Unit:** `percent (0-100)` 791 | - **Panel Type:** Graph or Gauge 792 | - **Thresholds:** Red < 95%, Yellow 95-99%, Green > 99% 793 | - **Description:** Success rate for write operations only 794 | 795 | --- 796 | 797 | ### 5.4 Read Operation Success Rate 798 | 799 | **Query:** 800 | 801 | ```promql 802 | sum(rate(rr_s3_operations_total{operation="read",status="success"}[5m])) / sum(rate(rr_s3_operations_total{operation="read"}[5m])) * 100 803 | ``` 804 | 805 | **Configuration:** 806 | 807 | - **Legend:** `Read Success Rate` 808 | - **Min Step:** `15s` 809 | - **Unit:** `percent (0-100)` 810 | - **Panel Type:** Graph or Gauge 811 | - **Thresholds:** Red < 95%, Yellow 95-99%, Green > 99% 812 | - **Description:** Success rate for read operations only 813 | 814 | --- 815 | 816 | ### 5.5 Operation Success Rate by Type (Table) 817 | 818 | **Query 1 (Write):** 819 | 820 | ```promql 821 | sum(rate(rr_s3_operations_total{operation="write",status="success"}[5m])) / sum(rate(rr_s3_operations_total{operation="write"}[5m])) * 100 822 | ``` 823 | 824 | **Query 2 (Read):** 825 | 826 | ```promql 827 | sum(rate(rr_s3_operations_total{operation="read",status="success"}[5m])) / sum(rate(rr_s3_operations_total{operation="read"}[5m])) * 100 828 | ``` 829 | 830 | **Query 3 (Delete):** 831 | 832 | ```promql 833 | sum(rate(rr_s3_operations_total{operation="delete",status="success"}[5m])) / sum(rate(rr_s3_operations_total{operation="delete"}[5m])) * 100 834 | ``` 835 | 836 | **Query 4 (List):** 837 | 838 | ```promql 839 | sum(rate(rr_s3_operations_total{operation="list",status="success"}[5m])) / sum(rate(rr_s3_operations_total{operation="list"}[5m])) * 100 840 | ``` 841 | 842 | **Configuration:** 843 | 844 | - **Legend:** N/A (Table rows) 845 | - **Min Step:** `15s` 846 | - **Unit:** `percent (0-100)` 847 | - **Panel Type:** Table 848 | - **Row Names:** `Write`, `Read`, `Delete`, `List` 849 | - **Description:** Success rate breakdown by operation type 850 | 851 | --- 852 | 853 | ## 6. Advanced Analytics 854 | 855 | ### 6.1 Operation Rate Trend (Hour over Hour) 856 | 857 | **Query:** 858 | 859 | ```promql 860 | sum(rate(rr_s3_operations_total[1h])) / sum(rate(rr_s3_operations_total[1h] offset 24h)) 861 | ``` 862 | 863 | **Configuration:** 864 | 865 | - **Legend:** `HoH Change` 866 | - **Min Step:** `5m` 867 | - **Unit:** `short` (ratio) 868 | - **Panel Type:** Graph or Stat 869 | - **Description:** Current hour traffic vs same hour yesterday (1.0 = same, 2.0 = double) 870 | 871 | --- 872 | 873 | ### 6.2 Error Burst Detection 874 | 875 | **Query:** 876 | 877 | ```promql 878 | sum(rate(rr_s3_errors_total[1m])) > 2 * avg_over_time(sum(rate(rr_s3_errors_total[1m]))[10m:1m]) 879 | ``` 880 | 881 | **Configuration:** 882 | 883 | - **Legend:** `Error Burst` 884 | - **Min Step:** `15s` 885 | - **Unit:** `bool` (0 or 1) 886 | - **Panel Type:** Graph (binary) 887 | - **Thresholds:** Red when value = 1 888 | - **Description:** Detects sudden spikes in errors (>2x baseline) 889 | 890 | --- 891 | 892 | ### 6.3 Operations Per Bucket (Distribution) 893 | 894 | **Query:** 895 | 896 | ```promql 897 | sum by (bucket) (rr_s3_operations_total) 898 | ``` 899 | 900 | **Configuration:** 901 | 902 | - **Legend:** `{{bucket}}` 903 | - **Min Step:** `1m` 904 | - **Unit:** `short` 905 | - **Panel Type:** Pie chart or Bar gauge 906 | - **Description:** Total cumulative operations per bucket 907 | 908 | --- 909 | 910 | ### 6.4 Bucket Activity Timeline (Heatmap) 911 | 912 | **Query:** 913 | 914 | ```promql 915 | sum by (bucket) (rate(rr_s3_operations_total[5m])) 916 | ``` 917 | 918 | **Configuration:** 919 | 920 | - **Legend:** N/A (heatmap) 921 | - **Min Step:** `30s` 922 | - **Unit:** `ops (operations/sec)` 923 | - **Panel Type:** Heatmap 924 | - **Description:** Visual activity pattern across buckets over time 925 | 926 | --- 927 | 928 | ### 6.5 Write-Heavy vs Read-Heavy Buckets 929 | 930 | **Query:** 931 | 932 | ```promql 933 | (sum by (bucket) (rate(rr_s3_operations_total{operation="write"}[5m])) > sum by (bucket) (rate(rr_s3_operations_total{operation="read"}[5m]))) 934 | ``` 935 | 936 | **Configuration:** 937 | 938 | - **Legend:** `{{bucket}}` 939 | - **Min Step:** `15s` 940 | - **Unit:** `bool` (0 or 1) 941 | - **Panel Type:** Graph or Table 942 | - **Description:** Identifies write-heavy buckets (1 = more writes than reads) 943 | 944 | --- 945 | 946 | ### 6.6 Most Reliable Bucket 947 | 948 | **Query:** 949 | 950 | ```promql 951 | bottomk(1, sum by (bucket) (rate(rr_s3_operations_total{status="error"}[5m])) / sum by (bucket) (rate(rr_s3_operations_total[5m])) * 100) 952 | ``` 953 | 954 | **Configuration:** 955 | 956 | - **Legend:** `{{bucket}}` 957 | - **Min Step:** `15s` 958 | - **Unit:** `percent (0-100)` 959 | - **Panel Type:** Stat 960 | - **Description:** Bucket with lowest error rate 961 | 962 | --- 963 | 964 | ### 6.7 Least Reliable Bucket 965 | 966 | **Query:** 967 | 968 | ```promql 969 | topk(1, sum by (bucket) (rate(rr_s3_operations_total{status="error"}[5m])) / sum by (bucket) (rate(rr_s3_operations_total[5m])) * 100) 970 | ``` 971 | 972 | **Configuration:** 973 | 974 | - **Legend:** `{{bucket}}` 975 | - **Min Step:** `15s` 976 | - **Unit:** `percent (0-100)` 977 | - **Panel Type:** Stat 978 | - **Thresholds:** Red > 5%, Yellow 1-5%, Green < 1% 979 | - **Description:** Bucket with highest error rate 980 | 981 | --- 982 | 983 | ## 7. Dashboard Layout Recommendations 984 | 985 | ### Row 1: Key Metrics Overview (4 panels) 986 | 987 | 1. **Total Operations/sec** - Stat panel 988 | 2. **Success Rate %** - Gauge with thresholds 989 | 3. **Total Errors/sec** - Stat panel with threshold colors 990 | 4. **Active Buckets** - Stat (count of buckets with ops > 0) 991 | 992 | ### Row 2: Operation Analysis (2 panels) 993 | 994 | 1. **Operations by Type** - Stacked area graph 995 | 2. **Operations by Bucket** - Graph (time series) 996 | 997 | ### Row 3: Success vs Errors (2 panels) 998 | 999 | 1. **Success vs Error Rate** - Stacked area graph 1000 | 2. **Operation Success Rate by Type** - Table 1001 | 1002 | ### Row 4: Error Analysis (2 panels) 1003 | 1004 | 1. **Errors by Type** - Pie chart or Stacked area 1005 | 2. **Most Error-Prone Buckets** - Bar gauge 1006 | 1007 | ### Row 5: Bucket Performance (1 panel) 1008 | 1009 | 1. **Bucket Performance Table** - Table with multiple queries (Total OPS, Write OPS, Read OPS, Error %) 1010 | 1011 | ### Row 6: Advanced (2 panels) 1012 | 1013 | 1. **Error Heatmap (Bucket vs Type)** - Heatmap 1014 | 2. **Read/Write Ratio by Bucket** - Bar gauge 1015 | 1016 | --- 1017 | 1018 | ## 8. Unit Reference Guide 1019 | 1020 | ### Standard Grafana Units 1021 | 1022 | **Rate Units:** 1023 | 1024 | - `ops (operations/sec)` - for operation rates 1025 | - `errors/sec` - for error rates 1026 | 1027 | **Percentage:** 1028 | 1029 | - `percent (0-100)` - displays as 95% 1030 | - `percentunit (0.0-1.0)` - displays 0.95 as 95% 1031 | 1032 | **Count:** 1033 | 1034 | - `short` - auto-formats large numbers (1K, 1M) 1035 | - `none` - raw number 1036 | 1037 | **Boolean:** 1038 | 1039 | - `bool` - 0 or 1 1040 | - `bool_yes_no` - displays as Yes/No 1041 | 1042 | --- 1043 | 1044 | ## 9. Common Threshold Configurations 1045 | 1046 | ### Error Rate Thresholds 1047 | 1048 | ``` 1049 | Green: < 1% 1050 | Yellow: 1-5% 1051 | Red: > 5% 1052 | ``` 1053 | 1054 | ### Success Rate Thresholds 1055 | 1056 | ``` 1057 | Red: < 95% 1058 | Yellow: 95-99% 1059 | Green: > 99% 1060 | ``` 1061 | 1062 | ### Error Burst Detection 1063 | 1064 | ``` 1065 | Red: value = 1 (burst detected) 1066 | Green: value = 0 (normal) 1067 | ``` 1068 | 1069 | --- 1070 | 1071 | ## 10. Alert Rules (Prometheus) 1072 | 1073 | ### Critical Alerts 1074 | 1075 | **High Error Rate:** 1076 | 1077 | ```yaml 1078 | - alert: S3HighErrorRate 1079 | expr: sum(rate(rr_s3_operations_total{status="error"}[5m])) / sum(rate(rr_s3_operations_total[5m])) * 100 > 5 1080 | for: 5m 1081 | labels: 1082 | severity: critical 1083 | annotations: 1084 | summary: "S3 plugin error rate above 5%" 1085 | description: "Error rate is {{ $value }}% (threshold: 5%)" 1086 | ``` 1087 | 1088 | **Bucket Not Found:** 1089 | 1090 | ```yaml 1091 | - alert: S3BucketNotFoundErrors 1092 | expr: sum(rate(rr_s3_errors_total{error_type="BUCKET_NOT_FOUND"}[5m])) > 0 1093 | for: 2m 1094 | labels: 1095 | severity: critical 1096 | annotations: 1097 | summary: "S3 bucket not found errors detected" 1098 | description: "Configuration issue: bucket references don't exist" 1099 | ``` 1100 | 1101 | **Permission Denied:** 1102 | 1103 | ```yaml 1104 | - alert: S3PermissionDenied 1105 | expr: sum(rate(rr_s3_errors_total{error_type="PERMISSION_DENIED"}[5m])) > 0 1106 | for: 2m 1107 | labels: 1108 | severity: critical 1109 | annotations: 1110 | summary: "S3 permission denied errors" 1111 | description: "Credentials or IAM policy issue detected" 1112 | ``` 1113 | 1114 | ### Warning Alerts 1115 | 1116 | **Elevated Error Rate:** 1117 | 1118 | ```yaml 1119 | - alert: S3ElevatedErrorRate 1120 | expr: sum(rate(rr_s3_operations_total{status="error"}[5m])) / sum(rate(rr_s3_operations_total[5m])) * 100 > 1 1121 | for: 10m 1122 | labels: 1123 | severity: warning 1124 | annotations: 1125 | summary: "S3 plugin error rate elevated" 1126 | description: "Error rate is {{ $value }}% (threshold: 1%)" 1127 | ``` 1128 | 1129 | **Timeout Errors:** 1130 | 1131 | ```yaml 1132 | - alert: S3OperationTimeouts 1133 | expr: sum(rate(rr_s3_errors_total{error_type="OPERATION_TIMEOUT"}[5m])) > 0 1134 | for: 5m 1135 | labels: 1136 | severity: warning 1137 | annotations: 1138 | summary: "S3 operation timeouts detected" 1139 | description: "Network or S3 service performance issue" 1140 | ``` 1141 | 1142 | --- 1143 | 1144 | ## 11. Example Grafana Dashboard JSON 1145 | 1146 | See `grafana_dashboard_example.json` for a complete pre-configured dashboard including all key metrics and recommended visualizations. 1147 | 1148 | --- 1149 | 1150 | ## 12. Troubleshooting Guide 1151 | 1152 | ### No Metrics Appearing 1153 | 1154 | 1. Verify metrics plugin is enabled in `.rr.yaml`: 1155 | ```yaml 1156 | metrics: 1157 | address: 127.0.0.1:2112 1158 | ``` 1159 | 1160 | 2. Check metrics endpoint: 1161 | ```bash 1162 | curl http://localhost:2112/metrics | grep rr_s3 1163 | ``` 1164 | 1165 | 3. Ensure S3 plugin is performing operations (metrics only appear after first operation) 1166 | 1167 | ### High Error Rates 1168 | 1169 | 1. Check error types with: 1170 | ```promql 1171 | sum by (error_type) (rate(rr_s3_errors_total[5m])) 1172 | ``` 1173 | 1174 | 2. Identify problematic buckets: 1175 | ```promql 1176 | topk(5, sum by (bucket) (rate(rr_s3_errors_total[5m]))) 1177 | ``` 1178 | 1179 | 3. Review RoadRunner logs for detailed error messages 1180 | 1181 | ### Permission Issues 1182 | 1183 | - Check AWS credentials configuration 1184 | - Verify IAM policy has required permissions 1185 | - Review `PERMISSION_DENIED` error rate by bucket 1186 | 1187 | ### Performance Degradation 1188 | 1189 | - Monitor operation rate trends (Hour over Hour) 1190 | - Check for error bursts 1191 | - Review specific operation types (write/read/list) for bottlenecks 1192 | 1193 | --- 1194 | 1195 | ## 13. Integration with Other RoadRunner Metrics 1196 | 1197 | ### Combined RoadRunner + S3 Dashboard 1198 | 1199 | **HTTP Traffic vs S3 Operations:** 1200 | 1201 | ```promql 1202 | # HTTP RPS 1203 | sum(rate(rr_http_requests_total[5m])) 1204 | 1205 | # S3 OPS 1206 | sum(rate(rr_s3_operations_total[5m])) 1207 | ``` 1208 | 1209 | **Worker Pool vs S3 Activity:** 1210 | 1211 | ```promql 1212 | # Worker utilization 1213 | rr_http_worker_utilization_percent 1214 | 1215 | # S3 write operations 1216 | sum(rate(rr_s3_operations_total{operation="write"}[5m])) 1217 | ``` 1218 | 1219 | This correlation helps identify if S3 operations are causing worker pool pressure. 1220 | 1221 | --- 1222 | 1223 | ## 14. Best Practices 1224 | 1225 | ### Query Performance 1226 | 1227 | - Use `rate()` over `irate()` for smoother graphs 1228 | - Set appropriate `[5m]` intervals based on traffic volume 1229 | - Use `topk()` to limit cardinality in busy systems 1230 | 1231 | ### Alerting 1232 | 1233 | - Set up critical alerts for BUCKET_NOT_FOUND (config issues) 1234 | - Monitor PERMISSION_DENIED (security issues) 1235 | - Alert on error rate >5% sustained for 5 minutes 1236 | 1237 | ### Dashboard Organization 1238 | 1239 | - Group metrics by concern (operations, errors, buckets) 1240 | - Use consistent color schemes across panels 1241 | - Include both rate and percentage views 1242 | 1243 | ### Retention 1244 | 1245 | - Default Prometheus retention: 15 days 1246 | - Increase for long-term S3 usage analysis 1247 | - Consider recording rules for long-term aggregations 1248 | 1249 | --- 1250 | 1251 | ## 15. Recording Rules (Optional Optimization) 1252 | 1253 | For high-traffic systems, pre-compute common queries: 1254 | 1255 | ```yaml 1256 | groups: 1257 | - name: s3_recording_rules 1258 | interval: 30s 1259 | rules: 1260 | - record: s3:operations:rate5m 1261 | expr: sum(rate(rr_s3_operations_total[5m])) 1262 | 1263 | - record: s3:operations:rate5m:by_bucket 1264 | expr: sum by (bucket) (rate(rr_s3_operations_total[5m])) 1265 | 1266 | - record: s3:errors:rate5m 1267 | expr: sum(rate(rr_s3_errors_total[5m])) 1268 | 1269 | - record: s3:success_rate:percent 1270 | expr: sum(rate(rr_s3_operations_total{status="success"}[5m])) / sum(rate(rr_s3_operations_total[5m])) * 100 1271 | ``` 1272 | 1273 | Then query using: `s3:operations:rate5m` instead of full expression. 1274 | 1275 | --- 1276 | 1277 | For additional support or questions about S3 plugin metrics, refer to the main RoadRunner documentation or open an issue on GitHub. 1278 | --------------------------------------------------------------------------------