├── .dockerignore ├── booking ├── ticket.go ├── concert.go ├── order.go ├── services │ ├── instrument.go │ ├── task.go │ └── booking.go ├── stores │ ├── show.go │ ├── order.go │ └── order_test.go └── handler │ └── booking.go ├── .gitignore ├── hack └── observability │ ├── grafana │ ├── provisioning │ │ ├── datasources │ │ │ └── demo.yml │ │ └── dashboards │ │ │ └── demo.yml │ └── dashboards │ │ └── app-monitoring.json │ ├── prometheus.yaml │ └── otel-config.yaml ├── cmd ├── main.go └── commands │ ├── root.go │ ├── worker.go │ └── server.go ├── .env.example ├── k8s ├── podmonitoring.yaml ├── secret.yaml.example ├── hey.yaml ├── asynqmon.yaml ├── worker.yaml ├── app.yaml ├── otel-collector.yaml └── redis-values.yml ├── Dockerfile ├── Makefile ├── server ├── middleware │ └── request_id.go ├── db.go ├── telemetry.go ├── route.go └── server.go ├── internal ├── telemetry │ ├── metric │ │ ├── exporter │ │ │ └── otlp.go │ │ └── builder.go │ ├── trace │ │ ├── exporter │ │ │ └── otlp.go │ │ └── builder.go │ └── span.go └── store │ └── gorm │ └── postgres │ └── db.go ├── worker ├── middleware │ └── tracing.go ├── handler.go ├── telemetry.go └── worker.go ├── README.md ├── docker-compose.yml └── go.mod /.dockerignore: -------------------------------------------------------------------------------- 1 | hack 2 | k8s -------------------------------------------------------------------------------- /booking/ticket.go: -------------------------------------------------------------------------------- 1 | package booking 2 | 3 | type Ticket struct { 4 | } 5 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea/ 2 | .vscode/ 3 | .DS_STORE 4 | 5 | api/.idea/ 6 | hack/observability/secrets/* 7 | k8s/secret.yaml -------------------------------------------------------------------------------- /booking/concert.go: -------------------------------------------------------------------------------- 1 | package booking 2 | 3 | import ( 4 | "github.com/google/uuid" 5 | "gorm.io/gorm" 6 | ) 7 | 8 | type Show struct { 9 | gorm.Model 10 | ID uuid.UUID `gorm:"type:uuid;not null;primaryKey"` 11 | RemainingTickets int 12 | } 13 | -------------------------------------------------------------------------------- /hack/observability/grafana/provisioning/datasources/demo.yml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | datasources: 3 | - access: proxy 4 | editable: false 5 | isDefault: true 6 | name: Prometheus 7 | type: prometheus 8 | url: http://prometheus:9090 9 | version: 1 10 | -------------------------------------------------------------------------------- /cmd/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "github.com/rs/zerolog/log" 5 | 6 | "github.com/imrenagi/concurrent-booking/cmd/commands" 7 | ) 8 | 9 | func main() { 10 | err := commands.NewRootCommand().Execute() 11 | if err != nil { 12 | log.Fatal().Msg(err.Error()) 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /.env.example: -------------------------------------------------------------------------------- 1 | DB_HOST=127.0.0.1 2 | DB_PORT=5436 3 | DB_USER=booking 4 | DB_PASSWORD=booking 5 | DB_NAME=booking 6 | 7 | ENVIRONMENT=development 8 | OTEL_RECEIVER_OTLP_ENDPOINT=0.0.0.0:4317 9 | 10 | OTEL_NEW_RELIC_EXPORTER_OTLP_ENDPOINT=https://otlp.nr-data.net:4318 11 | OTEL_NEW_RELIC_EXPORTER_API_KEY= -------------------------------------------------------------------------------- /hack/observability/grafana/provisioning/dashboards/demo.yml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | providers: 3 | - allowUiUpdates: true 4 | disableDeletion: false 5 | name: demo 6 | options: 7 | foldersFromFilesStructure: true 8 | path: /etc/grafana/demo-dashboards 9 | updateIntervalSeconds: 10 10 | -------------------------------------------------------------------------------- /k8s/podmonitoring.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.googleapis.com/v1 2 | kind: PodMonitoring 3 | metadata: 4 | name: otel-collector 5 | namespace: booking 6 | spec: 7 | selector: 8 | matchLabels: 9 | app.kubernetes.io/name: otel-collector 10 | endpoints: 11 | - port: 8889 12 | interval: 5s -------------------------------------------------------------------------------- /hack/observability/prometheus.yaml: -------------------------------------------------------------------------------- 1 | scrape_configs: 2 | - job_name: 'otel-collector' 3 | scrape_interval: 5s 4 | static_configs: 5 | - targets: [ 'otel-collector:8889' ] 6 | - targets: [ 'otel-collector:8888' ] 7 | - job_name: 'asynqmon' 8 | scrape_interval: 5s 9 | static_configs: 10 | - targets: [ 'asynqmon:8080' ] 11 | -------------------------------------------------------------------------------- /k8s/secret.yaml.example: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: otel-collector-newrelic-secret 5 | namespace: booking 6 | type: Opaque 7 | data: 8 | apikey: "" 9 | --- 10 | apiVersion: v1 11 | kind: Secret 12 | metadata: 13 | name: booking-db-secret 14 | namespace: booking 15 | type: Opaque 16 | data: 17 | name: "" 18 | username: "" 19 | userpassword: "" -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.18 as golang 2 | RUN mkdir -p / 3 | WORKDIR / 4 | COPY . . 5 | RUN make build 6 | 7 | FROM alpine:3 as alpine 8 | RUN apk update && apk add --no-cache ca-certificates tzdata && update-ca-certificates 9 | 10 | FROM alpine:3 11 | ENTRYPOINT [] 12 | WORKDIR / 13 | COPY --from=alpine /usr/share/zoneinfo /usr/share/zoneinfo 14 | COPY --from=alpine /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ 15 | COPY --from=alpine /etc/passwd /etc/passwd 16 | COPY --from=golang /bin/booking . -------------------------------------------------------------------------------- /cmd/commands/root.go: -------------------------------------------------------------------------------- 1 | package commands 2 | 3 | import ( 4 | "github.com/spf13/cobra" 5 | ) 6 | 7 | var ( 8 | cliName = "booking" 9 | ) 10 | 11 | // NewRootCommand returns a new instance of an command 12 | func NewRootCommand() *cobra.Command { 13 | 14 | var command = &cobra.Command{ 15 | Use: cliName, 16 | Short: "Run service", 17 | Run: func(c *cobra.Command, args []string) { 18 | c.HelpFunc()(c, args) 19 | }, 20 | } 21 | command.AddCommand(serverCmd(), workerCmd()) 22 | return command 23 | } 24 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | APP_NAME=booking 2 | IMAGE_REGISTRY=docker.io/imrenagi 3 | IMAGE_NAME=$(IMAGE_REGISTRY)/$(APP_NAME) 4 | IMAGE_TAG=$(shell git rev-parse --short HEAD) 5 | 6 | .PHONY: build test docker 7 | 8 | build: 9 | go build -a -ldflags "-linkmode external -extldflags '-static' -s -w" -o bin/$(APP_NAME) cmd/main.go 10 | 11 | test: 12 | go test ./... -cover -vet -all 13 | 14 | docker-build: 15 | docker build -t $(IMAGE_NAME):$(IMAGE_TAG) . 16 | docker tag $(IMAGE_NAME):$(IMAGE_TAG) $(IMAGE_NAME):latest 17 | 18 | docker-push: 19 | docker push $(IMAGE_NAME):latest -------------------------------------------------------------------------------- /booking/order.go: -------------------------------------------------------------------------------- 1 | package booking 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/google/uuid" 7 | "gorm.io/gorm" 8 | ) 9 | 10 | var ( 11 | ErrTicketIsNotAvailable = fmt.Errorf("ticket is not available") 12 | ) 13 | 14 | type OrderStatus string 15 | 16 | const ( 17 | Created OrderStatus = "created" 18 | Reserved OrderStatus = "reserved" 19 | Rejected OrderStatus = "rejected" 20 | ) 21 | 22 | type Order struct { 23 | gorm.Model 24 | ID uuid.UUID `gorm:"type:uuid;not null;primaryKey"` 25 | ShowID uuid.UUID `gorm:"type:uuid;not null"` 26 | Status OrderStatus 27 | } 28 | -------------------------------------------------------------------------------- /server/middleware/request_id.go: -------------------------------------------------------------------------------- 1 | package middleware 2 | 3 | import ( 4 | "net/http" 5 | 6 | "github.com/google/uuid" 7 | "github.com/rs/zerolog/log" 8 | ) 9 | 10 | func RequestID(h http.Handler) http.Handler { 11 | return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 12 | if r.Header.Get("X-Request-Id") == "" { 13 | r.Header.Set("X-Request-Id", uuid.New().String()) 14 | } 15 | 16 | log := log.With(). 17 | Str("request_id", r.Header.Get("X-Request-Id")). 18 | Logger() 19 | 20 | ctx := log.WithContext(r.Context()) 21 | h.ServeHTTP(w, r.WithContext(ctx)) 22 | }) 23 | } 24 | -------------------------------------------------------------------------------- /internal/telemetry/metric/exporter/otlp.go: -------------------------------------------------------------------------------- 1 | package exporter 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/rs/zerolog/log" 7 | "go.opentelemetry.io/otel/exporters/otlp/otlpmetric" 8 | "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" 9 | ) 10 | 11 | func NewOTLP(endpoint string) *otlpmetric.Exporter { 12 | ctx := context.Background() 13 | metricClient := otlpmetricgrpc.NewClient( 14 | otlpmetricgrpc.WithInsecure(), 15 | otlpmetricgrpc.WithEndpoint(endpoint)) 16 | 17 | metricExp, err := otlpmetric.New(ctx, metricClient) 18 | if err != nil { 19 | log.Fatal().Err(err).Msgf("Failed to create the collector metric exporter") 20 | } 21 | 22 | return metricExp 23 | } 24 | -------------------------------------------------------------------------------- /booking/services/instrument.go: -------------------------------------------------------------------------------- 1 | package services 2 | 3 | import ( 4 | "go.opentelemetry.io/otel" 5 | "go.opentelemetry.io/otel/attribute" 6 | "go.opentelemetry.io/otel/metric/global" 7 | "go.opentelemetry.io/otel/metric/instrument" 8 | "go.opentelemetry.io/otel/metric/unit" 9 | ) 10 | 11 | var tracer = otel.Tracer("github.com/imrenagi/concurrent-booking/booking/services") 12 | 13 | var meter = global.Meter("github.com/imrenagi/concurrent-booking/booking/services") 14 | 15 | var orderCounter, _ = meter.SyncInt64().Counter("order", 16 | instrument.WithDescription("number of order with its status"), 17 | instrument.WithUnit(unit.Dimensionless)) 18 | 19 | var orderStatusKey = attribute.Key("status") 20 | -------------------------------------------------------------------------------- /server/db.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "os" 5 | 6 | "github.com/rs/zerolog/log" 7 | "gorm.io/gorm" 8 | 9 | "github.com/imrenagi/concurrent-booking/booking" 10 | gormpg "github.com/imrenagi/concurrent-booking/internal/store/gorm/postgres" 11 | ) 12 | 13 | func db() *gorm.DB { 14 | db := gormpg.NewDB(gormpg.Config{ 15 | Host: os.Getenv("DB_HOST"), 16 | Port: os.Getenv("DB_PORT"), 17 | User: os.Getenv("DB_USER"), 18 | Password: os.Getenv("DB_PASSWORD"), 19 | Name: os.Getenv("DB_NAME"), 20 | }) 21 | if err := db.AutoMigrate(&booking.Show{}, &booking.Order{}); err != nil { 22 | log.Fatal().Err(err).Msgf("database migration failed") 23 | } 24 | return db 25 | } 26 | -------------------------------------------------------------------------------- /booking/stores/show.go: -------------------------------------------------------------------------------- 1 | package stores 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/google/uuid" 7 | "gorm.io/gorm" 8 | 9 | "github.com/imrenagi/concurrent-booking/booking" 10 | ) 11 | 12 | func NewShow(db *gorm.DB) *Show { 13 | return &Show{ 14 | db: db, 15 | } 16 | } 17 | 18 | type Show struct { 19 | db *gorm.DB 20 | } 21 | 22 | func (c Show) FindConcertByID(ctx context.Context, ID uuid.UUID) (*booking.Show, error) { 23 | var show *booking.Show 24 | err := c.db.WithContext(ctx). 25 | Where("id = ?", ID). 26 | First(&show).Error 27 | return show, err 28 | } 29 | 30 | func (c Show) Save(ctx context.Context, concert *booking.Show) error { 31 | return c.db.WithContext(ctx).Save(&concert).Error 32 | } 33 | -------------------------------------------------------------------------------- /k8s/hey.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: CronJob 3 | metadata: 4 | name: load-generator 5 | spec: 6 | schedule: "*/5 * * * *" 7 | jobTemplate: 8 | spec: 9 | template: 10 | spec: 11 | containers: 12 | - name: hey 13 | image: williamyeh/hey 14 | args: 15 | - "-z" 16 | - "5m" 17 | - "-c" 18 | - "3" 19 | - "-q" 20 | - "2" 21 | - "-m" 22 | - "POST" 23 | - "-d" 24 | - '{"show_id": "b9b0d5da-98a4-4b09-b5f5-83dc0c3b9964"}' 25 | - "http://booking-service.booking:9999/api/v2/booking" 26 | restartPolicy: Never 27 | -------------------------------------------------------------------------------- /internal/telemetry/trace/exporter/otlp.go: -------------------------------------------------------------------------------- 1 | package exporter 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/rs/zerolog/log" 7 | "go.opentelemetry.io/otel/exporters/otlp/otlptrace" 8 | "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" 9 | "google.golang.org/grpc" 10 | ) 11 | 12 | func NewOTLP(endpoint string) *otlptrace.Exporter { 13 | ctx := context.Background() 14 | traceClient := otlptracegrpc.NewClient( 15 | otlptracegrpc.WithInsecure(), 16 | otlptracegrpc.WithEndpoint(endpoint), 17 | otlptracegrpc.WithDialOption(grpc.WithBlock())) 18 | traceExp, err := otlptrace.New(ctx, traceClient) 19 | if err != nil { 20 | log.Fatal().Err(err).Msgf("Failed to create the collector trace exporter") 21 | } 22 | return traceExp 23 | } 24 | -------------------------------------------------------------------------------- /worker/middleware/tracing.go: -------------------------------------------------------------------------------- 1 | package middleware 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | 7 | "github.com/hibiken/asynq" 8 | "go.opentelemetry.io/otel/trace" 9 | 10 | "github.com/imrenagi/concurrent-booking/booking/services" 11 | "github.com/imrenagi/concurrent-booking/internal/telemetry" 12 | ) 13 | 14 | func SpanPropagator(next asynq.Handler) asynq.Handler { 15 | return asynq.HandlerFunc(func(ctx context.Context, t *asynq.Task) error { 16 | var tp services.TaskPayload 17 | err := json.Unmarshal(t.Payload(), &tp) 18 | if err != nil { 19 | return err 20 | } 21 | spanCtx, _ := telemetry.ConstructNewSpanContext(telemetry.NewRequest{ 22 | TraceID: tp.Context.TraceID, 23 | SpanID: tp.Context.SpanID, 24 | }) 25 | ctx = trace.ContextWithSpanContext(ctx, spanCtx) 26 | payload, err := json.Marshal(tp.Data) 27 | if err != nil { 28 | return err 29 | } 30 | return next.ProcessTask(ctx, asynq.NewTask(t.Type(), payload)) 31 | }) 32 | } 33 | -------------------------------------------------------------------------------- /cmd/commands/worker.go: -------------------------------------------------------------------------------- 1 | package commands 2 | 3 | import ( 4 | "context" 5 | "os" 6 | "os/signal" 7 | "syscall" 8 | 9 | "github.com/rs/zerolog/log" 10 | "github.com/spf13/cobra" 11 | 12 | "github.com/imrenagi/concurrent-booking/worker" 13 | ) 14 | 15 | func workerCmd() *cobra.Command { 16 | 17 | var command = &cobra.Command{ 18 | Use: "worker", 19 | Short: "Run the worker", 20 | Run: func(c *cobra.Command, args []string) { 21 | worker := worker.NewWorker() 22 | 23 | ctx := context.Background() 24 | ctx, cancel := context.WithCancel(ctx) 25 | 26 | ch := make(chan os.Signal, 1) 27 | signal.Notify(ch, os.Interrupt) 28 | signal.Notify(ch, syscall.SIGTERM) 29 | 30 | go func() { 31 | oscall := <-ch 32 | log.Debug().Msgf("system call:%+v", oscall) 33 | cancel() 34 | }() 35 | 36 | // TODO Move shutdown handling inside object and remove context parameter. 37 | worker.Run(ctx) 38 | }, 39 | } 40 | 41 | return command 42 | 43 | } 44 | -------------------------------------------------------------------------------- /internal/telemetry/span.go: -------------------------------------------------------------------------------- 1 | package telemetry 2 | 3 | import ( 4 | "fmt" 5 | 6 | "go.opentelemetry.io/otel/trace" 7 | ) 8 | 9 | type NewRequest struct { 10 | Requestid string `json: "requestid"` 11 | TraceID string 12 | SpanID string 13 | } 14 | 15 | func ConstructNewSpanContext(request NewRequest) (spanContext trace.SpanContext, err error) { 16 | var traceID trace.TraceID 17 | traceID, err = trace.TraceIDFromHex(request.TraceID) 18 | if err != nil { 19 | fmt.Println("error: ", err) 20 | return spanContext, err 21 | } 22 | var spanID trace.SpanID 23 | spanID, err = trace.SpanIDFromHex(request.SpanID) 24 | if err != nil { 25 | fmt.Println("error: ", err) 26 | return spanContext, err 27 | } 28 | var spanContextConfig trace.SpanContextConfig 29 | spanContextConfig.TraceID = traceID 30 | spanContextConfig.SpanID = spanID 31 | spanContextConfig.TraceFlags = 01 32 | spanContextConfig.Remote = false 33 | spanContext = trace.NewSpanContext(spanContextConfig) 34 | return spanContext, nil 35 | } 36 | -------------------------------------------------------------------------------- /booking/services/task.go: -------------------------------------------------------------------------------- 1 | package services 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | 7 | "github.com/hibiken/asynq" 8 | "go.opentelemetry.io/otel/trace" 9 | 10 | "github.com/imrenagi/concurrent-booking/booking" 11 | ) 12 | 13 | // A list of task types. 14 | const ( 15 | TypeReservation = "booking:reserve" 16 | ) 17 | 18 | func NewReservationTask(ctx context.Context, order booking.Order) (*asynq.Task, error) { 19 | span := trace.SpanFromContext(ctx) 20 | defer span.End() 21 | 22 | data := &TaskPayload{ 23 | Context: TaskContext{ 24 | SpanID: span.SpanContext().SpanID().String(), 25 | TraceID: span.SpanContext().TraceID().String(), 26 | }, 27 | Data: order, 28 | } 29 | payload, err := json.Marshal(data) 30 | if err != nil { 31 | return nil, err 32 | } 33 | return asynq.NewTask(TypeReservation, payload), nil 34 | } 35 | 36 | type TaskContext struct { 37 | SpanID string 38 | TraceID string 39 | } 40 | 41 | type TaskPayload struct { 42 | Context TaskContext 43 | Data interface{} 44 | } 45 | -------------------------------------------------------------------------------- /k8s/asynqmon.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app: asynqmon 6 | name: asynqmon 7 | namespace: booking 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: asynqmon 13 | template: 14 | metadata: 15 | labels: 16 | app: asynqmon 17 | spec: 18 | containers: 19 | - image: hibiken/asynqmon 20 | args: 21 | - "--redis-addr=redis-master.redis:6379" 22 | name: asynqmon 23 | resources: 24 | requests: 25 | memory: "256Mi" 26 | cpu: "100m" 27 | limits: 28 | memory: "512Mi" 29 | cpu: "500m" 30 | --- 31 | apiVersion: v1 32 | kind: Service 33 | metadata: 34 | name: asynqmon 35 | namespace: booking 36 | labels: 37 | app: asynqmon 38 | spec: 39 | ports: 40 | - name: http 41 | port: 8080 42 | protocol: TCP 43 | targetPort: 8080 44 | selector: 45 | app: asynqmon 46 | type: ClusterIP -------------------------------------------------------------------------------- /cmd/commands/server.go: -------------------------------------------------------------------------------- 1 | package commands 2 | 3 | import ( 4 | "context" 5 | "os" 6 | "os/signal" 7 | "syscall" 8 | 9 | "github.com/rs/zerolog/log" 10 | "github.com/spf13/cobra" 11 | 12 | "github.com/imrenagi/concurrent-booking/server" 13 | ) 14 | 15 | func serverCmd() *cobra.Command { 16 | var ( 17 | listenPort int 18 | ) 19 | var command = &cobra.Command{ 20 | Use: "server", 21 | Short: "Run the API server", 22 | Long: "Run the API server", 23 | Run: func(c *cobra.Command, args []string) { 24 | api := server.NewServer() 25 | 26 | ctx := context.Background() 27 | ctx, cancel := context.WithCancel(ctx) 28 | 29 | ch := make(chan os.Signal, 1) 30 | signal.Notify(ch, os.Interrupt) 31 | signal.Notify(ch, syscall.SIGTERM) 32 | 33 | go func() { 34 | oscall := <-ch 35 | log.Debug().Msgf("system call:%+v", oscall) 36 | cancel() 37 | }() 38 | 39 | // TODO Move shutdown handling inside object and remove context parameter. 40 | api.Run(ctx, listenPort) 41 | }, 42 | } 43 | 44 | command.Flags().IntVar(&listenPort, "port", 9999, "Listen on given port") 45 | return command 46 | 47 | } 48 | -------------------------------------------------------------------------------- /internal/store/gorm/postgres/db.go: -------------------------------------------------------------------------------- 1 | package postgres 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/rs/zerolog/log" 7 | "github.com/uptrace/opentelemetry-go-extra/otelgorm" 8 | "gorm.io/driver/postgres" 9 | "gorm.io/gorm" 10 | ) 11 | 12 | type Config struct { 13 | Host string 14 | Port string 15 | User string 16 | Password string 17 | // Name is used for database name 18 | Name string 19 | } 20 | 21 | func (d Config) String() string { 22 | return fmt.Sprintf("host=%s port=%s user=%s DB.name=%s password=%s sslmode=disable", d.Host, d.Port, d.User, d.Name, d.Password) 23 | } 24 | 25 | func NewDB(config Config) *gorm.DB { 26 | dsn := config.String() 27 | 28 | db, err := gorm.Open(postgres.New(postgres.Config{DSN: dsn}), &gorm.Config{}) 29 | if err != nil { 30 | log.Fatal().Err(err).Msg("unable to open db connection") 31 | } 32 | 33 | err = db.Use(otelgorm.NewPlugin(otelgorm.WithDBName(config.Name))) 34 | if err != nil { 35 | log.Fatal().Err(err).Msg("failed to set gorm plugin for opentelemetry ") 36 | } 37 | 38 | sqlDB, err := db.DB() 39 | if err != nil { 40 | log.Fatal().Err(err).Msg("failed to get sql db") 41 | } 42 | 43 | // Hardcode the max open connection for now 44 | sqlDB.SetMaxOpenConns(200) 45 | return db 46 | } 47 | -------------------------------------------------------------------------------- /hack/observability/otel-config.yaml: -------------------------------------------------------------------------------- 1 | receivers: 2 | otlp: 3 | protocols: 4 | grpc: 5 | 6 | exporters: 7 | googlecloud: 8 | project: io-extended-2022 9 | retry_on_failure: 10 | enabled: false 11 | prometheus: 12 | endpoint: "0.0.0.0:8889" 13 | const_labels: { } 14 | jaeger: 15 | endpoint: jaeger:14250 16 | tls: 17 | insecure: true 18 | otlp: 19 | endpoint: ${OTEL_NEW_RELIC_EXPORTER_OTLP_ENDPOINT} 20 | headers: 21 | api-key: ${OTEL_NEW_RELIC_EXPORTER_API_KEY} 22 | 23 | processors: 24 | batch: 25 | memory_limiter: 26 | check_interval: 1s 27 | limit_percentage: 65 28 | spike_limit_percentage: 50 29 | resourcedetection: 30 | detectors: [gcp] 31 | timeout: 10s 32 | 33 | extensions: 34 | health_check: 35 | pprof: 36 | endpoint: :1888 37 | zpages: 38 | endpoint: :55679 39 | 40 | service: 41 | extensions: [ pprof, zpages, health_check ] 42 | pipelines: 43 | traces: 44 | receivers: [ otlp ] 45 | processors: 46 | - batch 47 | exporters: 48 | - jaeger 49 | - googlecloud 50 | - otlp 51 | metrics: 52 | receivers: [ otlp ] 53 | processors: 54 | - memory_limiter 55 | - batch 56 | exporters: 57 | - prometheus 58 | - googlecloud 59 | - otlp 60 | -------------------------------------------------------------------------------- /booking/stores/order.go: -------------------------------------------------------------------------------- 1 | package stores 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/google/uuid" 7 | "github.com/rs/zerolog/log" 8 | "gorm.io/gorm" 9 | "gorm.io/gorm/clause" 10 | 11 | "github.com/imrenagi/concurrent-booking/booking" 12 | ) 13 | 14 | func NewOrder(db *gorm.DB) *Order { 15 | return &Order{db: db} 16 | } 17 | 18 | type Order struct { 19 | db *gorm.DB 20 | } 21 | 22 | func (o Order) FindOrderByID(ctx context.Context, ID uuid.UUID) (*booking.Order, error) { 23 | var order *booking.Order 24 | err := o.db.WithContext(ctx). 25 | Where("id = ?", ID). 26 | First(&order).Error 27 | if err != nil { 28 | return nil, err 29 | } 30 | return order, nil 31 | } 32 | 33 | func (o Order) Save(ctx context.Context, order *booking.Order) error { 34 | return o.db.WithContext(ctx).Save(&order).Error 35 | } 36 | 37 | func (o Order) Create(ctx context.Context, order *booking.Order) error { 38 | return o.db.WithContext(ctx).Create(&order).Error 39 | } 40 | 41 | func (o Order) Reserve(ctx context.Context, showID uuid.UUID) error { 42 | return o.db.Transaction(func(tx *gorm.DB) error { 43 | var show *booking.Show 44 | err := tx.WithContext(ctx). 45 | Clauses(clause.Locking{Strength: "NO KEY UPDATE"}). 46 | Where("id = ?", showID). 47 | First(&show).Error 48 | if err != nil { 49 | return err 50 | } 51 | 52 | if show.RemainingTickets-1 < 0 { 53 | return booking.ErrTicketIsNotAvailable 54 | } 55 | show.RemainingTickets -= 1 56 | 57 | log.Debug().Int("remaining_tickets", show.RemainingTickets).Msg("remaining tickets have been decreased") 58 | 59 | return tx.WithContext(ctx).Save(&show).Error 60 | }) 61 | } 62 | -------------------------------------------------------------------------------- /worker/handler.go: -------------------------------------------------------------------------------- 1 | package worker 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "os" 7 | 8 | "github.com/hibiken/asynq" 9 | 10 | "github.com/imrenagi/concurrent-booking/booking" 11 | "github.com/imrenagi/concurrent-booking/booking/services" 12 | "github.com/imrenagi/concurrent-booking/booking/stores" 13 | gormpg "github.com/imrenagi/concurrent-booking/internal/store/gorm/postgres" 14 | ) 15 | 16 | type bookingService interface { 17 | Reserve(ctx context.Context, req services.ReservationRequest) (*booking.Ticket, error) 18 | } 19 | 20 | func newHandler() *handler { 21 | db := gormpg.NewDB(gormpg.Config{ 22 | Host: os.Getenv("DB_HOST"), 23 | Port: os.Getenv("DB_PORT"), 24 | User: os.Getenv("DB_USER"), 25 | Password: os.Getenv("DB_PASSWORD"), 26 | Name: os.Getenv("DB_NAME"), 27 | }) 28 | 29 | bookingService := services.Booking{ 30 | BookingRepository: stores.NewOrder(db), 31 | ShowRepository: stores.NewShow(db), 32 | } 33 | 34 | return &handler{ 35 | bookingService: bookingService, 36 | } 37 | } 38 | 39 | type handler struct { 40 | bookingService bookingService 41 | } 42 | 43 | func (h handler) HandleReservation(ctx context.Context, t *asynq.Task) error { 44 | ctx, span := trc.Start(ctx, "handler.HandleReservation") 45 | defer span.End() 46 | 47 | var order booking.Order 48 | err := json.Unmarshal(t.Payload(), &order) 49 | if err != nil { 50 | return err 51 | } 52 | 53 | _, err = h.bookingService.Reserve(ctx, services.ReservationRequest{ 54 | ShowID: order.ShowID, 55 | OrderID: order.ID, 56 | }) 57 | if err != nil && err != booking.ErrTicketIsNotAvailable { 58 | return err 59 | } 60 | 61 | return nil 62 | } 63 | -------------------------------------------------------------------------------- /internal/telemetry/trace/builder.go: -------------------------------------------------------------------------------- 1 | package trace 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | "go.opentelemetry.io/otel/sdk/resource" 8 | "go.opentelemetry.io/otel/sdk/trace" 9 | semconv "go.opentelemetry.io/otel/semconv/v1.4.0" 10 | ) 11 | 12 | type CloseFunc func(ctx context.Context) error 13 | 14 | func NewTraceProviderBuilder(name string) *traceProviderBuilder { 15 | return &traceProviderBuilder{ 16 | name: name, 17 | } 18 | } 19 | 20 | type traceProviderBuilder struct { 21 | name string 22 | exporter trace.SpanExporter 23 | } 24 | 25 | func (b *traceProviderBuilder) SetExporter(exp trace.SpanExporter) *traceProviderBuilder { 26 | b.exporter = exp 27 | return b 28 | } 29 | 30 | func (b *traceProviderBuilder) Build() (*trace.TracerProvider, CloseFunc, error) { 31 | ctx := context.Background() 32 | res, err := resource.New(ctx, 33 | resource.WithFromEnv(), 34 | // resource.WithProcess(), 35 | resource.WithTelemetrySDK(), 36 | resource.WithHost(), 37 | resource.WithAttributes( 38 | // the service name used to display traces in backend 39 | semconv.ServiceNameKey.String(b.name), 40 | ), 41 | ) 42 | if err != nil { 43 | return nil, nil, err 44 | } 45 | 46 | bsp := trace.NewBatchSpanProcessor(b.exporter) 47 | tracerProvider := trace.NewTracerProvider( 48 | trace.WithSampler(trace.AlwaysSample()), 49 | trace.WithResource(res), 50 | trace.WithSpanProcessor(bsp), 51 | ) 52 | 53 | return tracerProvider, func(ctx context.Context) error { 54 | cxt, cancel := context.WithTimeout(ctx, 5*time.Second) 55 | defer cancel() 56 | if err := b.exporter.Shutdown(cxt); err != nil { 57 | return err 58 | } 59 | return err 60 | }, nil 61 | } 62 | -------------------------------------------------------------------------------- /server/telemetry.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "github.com/rs/zerolog/log" 5 | "go.opentelemetry.io/otel" 6 | "go.opentelemetry.io/otel/metric/global" 7 | "go.opentelemetry.io/otel/propagation" 8 | 9 | "github.com/imrenagi/concurrent-booking/internal/telemetry/metric" 10 | metricExporter "github.com/imrenagi/concurrent-booking/internal/telemetry/metric/exporter" 11 | ttrace "github.com/imrenagi/concurrent-booking/internal/telemetry/trace" 12 | traceExporter "github.com/imrenagi/concurrent-booking/internal/telemetry/trace/exporter" 13 | ) 14 | 15 | func (s *Server) InitGlobalProvider(name, endpoint string) { 16 | metricExp := metricExporter.NewOTLP(endpoint) 17 | pusher, pusherCloseFn, err := metric.NewMeterProviderBuilder(). 18 | SetExporter(metricExp). 19 | SetHistogramBoundaries([]float64{5, 10, 25, 50, 100, 200, 400, 800, 1000}). 20 | Build() 21 | if err != nil { 22 | log.Fatal().Err(err).Msgf("failed initializing the meter provider") 23 | } 24 | s.metricProviderCloseFn = append(s.metricProviderCloseFn, pusherCloseFn) 25 | global.SetMeterProvider(pusher) 26 | 27 | spanExporter := traceExporter.NewOTLP(endpoint) 28 | tracerProvider, tracerProviderCloseFn, err := ttrace.NewTraceProviderBuilder(name). 29 | SetExporter(spanExporter). 30 | Build() 31 | if err != nil { 32 | log.Fatal().Err(err).Msgf("failed initializing the tracer provider") 33 | } 34 | s.traceProviderCloseFn = append(s.traceProviderCloseFn, tracerProviderCloseFn) 35 | 36 | // set global propagator to tracecontext (the default is no-op). 37 | otel.SetTextMapPropagator(propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}, propagation.Baggage{})) 38 | otel.SetTracerProvider(tracerProvider) 39 | } 40 | -------------------------------------------------------------------------------- /worker/telemetry.go: -------------------------------------------------------------------------------- 1 | package worker 2 | 3 | import ( 4 | "github.com/rs/zerolog/log" 5 | "go.opentelemetry.io/otel" 6 | "go.opentelemetry.io/otel/metric/global" 7 | "go.opentelemetry.io/otel/propagation" 8 | 9 | "github.com/imrenagi/concurrent-booking/internal/telemetry/metric" 10 | metricExporter "github.com/imrenagi/concurrent-booking/internal/telemetry/metric/exporter" 11 | ttrace "github.com/imrenagi/concurrent-booking/internal/telemetry/trace" 12 | traceExporter "github.com/imrenagi/concurrent-booking/internal/telemetry/trace/exporter" 13 | ) 14 | 15 | func (w *Worker) InitGlobalProvider(name, endpoint string) { 16 | metricExp := metricExporter.NewOTLP(endpoint) 17 | pusher, pusherCloseFn, err := metric.NewMeterProviderBuilder(). 18 | SetExporter(metricExp). 19 | SetHistogramBoundaries([]float64{5, 10, 25, 50, 100, 200, 400, 800, 1000}). 20 | Build() 21 | if err != nil { 22 | log.Fatal().Err(err).Msgf("failed initializing the meter provider") 23 | } 24 | w.metricProviderCloseFn = append(w.metricProviderCloseFn, pusherCloseFn) 25 | global.SetMeterProvider(pusher) 26 | 27 | spanExporter := traceExporter.NewOTLP(endpoint) 28 | tracerProvider, tracerProviderCloseFn, err := ttrace.NewTraceProviderBuilder(name). 29 | SetExporter(spanExporter). 30 | Build() 31 | if err != nil { 32 | log.Fatal().Err(err).Msgf("failed initializing the tracer provider") 33 | } 34 | w.traceProviderCloseFn = append(w.traceProviderCloseFn, tracerProviderCloseFn) 35 | 36 | // set global propagator to tracecontext (the default is no-op). 37 | otel.SetTextMapPropagator(propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}, propagation.Baggage{})) 38 | otel.SetTracerProvider(tracerProvider) 39 | } 40 | -------------------------------------------------------------------------------- /k8s/worker.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app: reservation-worker 6 | name: reservation-worker 7 | namespace: booking 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: reservation-worker 13 | template: 14 | metadata: 15 | labels: 16 | app: reservation-worker 17 | spec: 18 | containers: 19 | - image: imrenagi/booking:v1 20 | args: 21 | - ./booking 22 | - worker 23 | name: booking 24 | resources: 25 | requests: 26 | memory: "256Mi" 27 | cpu: "100m" 28 | limits: 29 | memory: "512Mi" 30 | cpu: "500m" 31 | env: 32 | - name: DB_HOST 33 | value: "10.35.113.3" 34 | - name: DB_PORT 35 | value: "5432" 36 | - name: DB_USER 37 | valueFrom: 38 | secretKeyRef: 39 | name: booking-db-secret 40 | key: username 41 | - name: DB_NAME 42 | valueFrom: 43 | secretKeyRef: 44 | name: booking-db-secret 45 | key: name 46 | - name: DB_PASSWORD 47 | valueFrom: 48 | secretKeyRef: 49 | name: booking-db-secret 50 | key: userpassword 51 | - name: ENVIRONMENT 52 | value: development 53 | - name: OTEL_RECEIVER_OTLP_ENDPOINT 54 | value: "otel-collector.booking:4317" 55 | - name: ASYNQ_REDIS_HOST 56 | value: "redis-master.redis:6379" 57 | --- 58 | -------------------------------------------------------------------------------- /booking/handler/booking.go: -------------------------------------------------------------------------------- 1 | package handler 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "net/http" 7 | 8 | "github.com/rs/zerolog/log" 9 | 10 | "github.com/imrenagi/concurrent-booking/booking" 11 | "github.com/imrenagi/concurrent-booking/booking/services" 12 | ) 13 | 14 | type BookingService interface { 15 | Book(ctx context.Context, req services.BookingRequest) (*booking.Ticket, error) 16 | BookV2(ctx context.Context, req services.BookingRequest) (*booking.Order, error) 17 | } 18 | 19 | type Handler struct { 20 | Service BookingService 21 | } 22 | 23 | func (h Handler) Booking() http.HandlerFunc { 24 | return func(w http.ResponseWriter, r *http.Request) { 25 | 26 | var body services.BookingRequest 27 | err := json.NewDecoder(r.Body).Decode(&body) 28 | if err != nil { 29 | log.Debug().Err(err).Msg("unable to parse the update addon request") 30 | http.Error(w, err.Error(), http.StatusInternalServerError) 31 | return 32 | } 33 | 34 | _, err = h.Service.Book(r.Context(), body) 35 | if err != nil { 36 | log.Debug().Err(err).Msg("failed process booking") 37 | http.Error(w, err.Error(), http.StatusBadRequest) 38 | return 39 | } 40 | 41 | w.WriteHeader(http.StatusOK) 42 | } 43 | } 44 | 45 | func (h Handler) BookingV2() http.HandlerFunc { 46 | return func(w http.ResponseWriter, r *http.Request) { 47 | 48 | var body services.BookingRequest 49 | err := json.NewDecoder(r.Body).Decode(&body) 50 | if err != nil { 51 | log.Debug().Err(err).Msg("unable to parse the update addon request") 52 | http.Error(w, err.Error(), http.StatusInternalServerError) 53 | return 54 | } 55 | 56 | order, err := h.Service.BookV2(r.Context(), body) 57 | if err != nil { 58 | log.Debug().Err(err).Msg("failed process booking") 59 | http.Error(w, err.Error(), http.StatusBadRequest) 60 | return 61 | } 62 | 63 | b, _ := json.Marshal(order) 64 | w.Header().Add("Content-Type", "application/json") 65 | w.WriteHeader(http.StatusOK) 66 | w.Write(b) 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /server/route.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "net/http" 5 | 6 | "github.com/rs/zerolog/log" 7 | "go.opentelemetry.io/contrib/instrumentation/github.com/gorilla/mux/otelmux" 8 | "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" 9 | "go.opentelemetry.io/otel/attribute" 10 | "go.opentelemetry.io/otel/metric/global" 11 | "go.opentelemetry.io/otel/metric/instrument" 12 | "go.opentelemetry.io/otel/metric/unit" 13 | ) 14 | 15 | func (s *Server) routes() { 16 | // healthcheck 17 | s.Router.HandleFunc("/", s.healthcheckHandler) 18 | s.Router.HandleFunc("/healthz", s.healthcheckHandler) 19 | s.Router.HandleFunc("/readyz", s.readinessHandler) 20 | 21 | // serve api 22 | api := s.Router.PathPrefix("/api/v1/").Subrouter() 23 | api.Use( 24 | otelmux.Middleware(name), 25 | ) 26 | api.Handle("/booking", otelhttp.WithRouteTag("/api/v1/booking", 27 | otelhttp.NewHandler(s.bookingHandler.Booking(), name, otelhttp.WithHTTPRouteTag("/api/v1/booking")))) 28 | 29 | apiV2 := s.Router.PathPrefix("/api/v2/").Subrouter() 30 | apiV2.Use( 31 | otelmux.Middleware(name), 32 | ) 33 | apiV2.Handle("/booking", otelhttp.WithRouteTag("/api/v2/booking", 34 | otelhttp.NewHandler(s.bookingHandler.BookingV2(), name, otelhttp.WithHTTPRouteTag("/api/v2/booking")))) 35 | } 36 | 37 | var meter = global.Meter("ex.com/basic") 38 | var lemonsKey = attribute.Key("ex.com/lemons") 39 | var commonAttrs = []attribute.KeyValue{lemonsKey.Int(10), attribute.String("A", "1"), attribute.String("B", "2"), attribute.String("C", "3")} 40 | 41 | func (s *Server) healthcheckHandler(w http.ResponseWriter, r *http.Request) { 42 | 43 | counter, _ := meter.SyncFloat64().Counter("server.healthcheck", 44 | instrument.WithDescription("testing healthcheck count"), 45 | instrument.WithUnit(unit.Dimensionless), 46 | ) 47 | counter.Add(r.Context(), 1.0, commonAttrs...) 48 | 49 | w.Write([]byte("im alive")) 50 | } 51 | 52 | func (s *Server) readinessHandler(w http.ResponseWriter, r *http.Request) { 53 | log.Debug().Msg("test") 54 | w.Write([]byte("im ready to face the world")) 55 | } -------------------------------------------------------------------------------- /internal/telemetry/metric/builder.go: -------------------------------------------------------------------------------- 1 | package metric 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "time" 7 | 8 | "go.opentelemetry.io/otel/metric" 9 | "go.opentelemetry.io/otel/sdk/metric/aggregator/histogram" 10 | controller "go.opentelemetry.io/otel/sdk/metric/controller/basic" 11 | "go.opentelemetry.io/otel/sdk/metric/export" 12 | processor "go.opentelemetry.io/otel/sdk/metric/processor/basic" 13 | "go.opentelemetry.io/otel/sdk/metric/selector/simple" 14 | ) 15 | 16 | type CloseFunc func(ctx context.Context) error 17 | 18 | func NewMeterProviderBuilder() *meterProviderBuilder { 19 | return &meterProviderBuilder{} 20 | } 21 | 22 | type meterProviderBuilder struct { 23 | exporter export.Exporter 24 | histogramBoundaries []float64 25 | } 26 | 27 | func (b *meterProviderBuilder) SetExporter(exp export.Exporter) *meterProviderBuilder { 28 | b.exporter = exp 29 | return b 30 | } 31 | 32 | func (b *meterProviderBuilder) SetHistogramBoundaries(explicitBoundaries []float64) *meterProviderBuilder { 33 | b.histogramBoundaries = explicitBoundaries 34 | return b 35 | } 36 | 37 | func (b meterProviderBuilder) Build() (metric.MeterProvider, CloseFunc, error) { 38 | var histogramOptions []histogram.Option 39 | if len(b.histogramBoundaries) > 0 { 40 | histogramOptions = append(histogramOptions, histogram.WithExplicitBoundaries(b.histogramBoundaries)) 41 | } 42 | 43 | if b.exporter == nil { 44 | return nil, nil, fmt.Errorf("exporter is not set") 45 | } 46 | 47 | pusher := controller.New( 48 | processor.NewFactory( 49 | simple.NewWithHistogramDistribution(histogramOptions...), 50 | b.exporter, 51 | ), 52 | controller.WithExporter(b.exporter), 53 | controller.WithCollectPeriod(5*time.Second), 54 | ) 55 | 56 | if err := pusher.Start(context.Background()); err != nil { 57 | return nil, nil, err 58 | } 59 | 60 | return pusher, func(ctx context.Context) error { 61 | cxt, cancel := context.WithTimeout(ctx, 5*time.Second) 62 | defer cancel() 63 | if err := pusher.Stop(cxt); err != nil { 64 | return err 65 | } 66 | return nil 67 | }, nil 68 | } 69 | -------------------------------------------------------------------------------- /k8s/app.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app: booking-service 6 | name: booking-service 7 | namespace: booking 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: booking-service 13 | template: 14 | metadata: 15 | labels: 16 | app: booking-service 17 | spec: 18 | containers: 19 | - image: imrenagi/booking:v1 20 | livenessProbe: 21 | httpGet: 22 | path: /healthz 23 | port: 9999 24 | initialDelaySeconds: 5 25 | periodSeconds: 5 26 | readinessProbe: 27 | httpGet: 28 | path: /healthz 29 | port: 9999 30 | initialDelaySeconds: 5 31 | periodSeconds: 5 32 | args: 33 | - ./booking 34 | - server 35 | - --port=9999 36 | name: booking 37 | resources: 38 | requests: 39 | memory: "256Mi" 40 | cpu: "100m" 41 | limits: 42 | memory: "512Mi" 43 | cpu: "500m" 44 | env: 45 | - name: DB_HOST 46 | value: "10.35.113.3" 47 | - name: DB_PORT 48 | value: "5432" 49 | - name: DB_USER 50 | valueFrom: 51 | secretKeyRef: 52 | name: booking-db-secret 53 | key: username 54 | - name: DB_NAME 55 | valueFrom: 56 | secretKeyRef: 57 | name: booking-db-secret 58 | key: name 59 | - name: DB_PASSWORD 60 | valueFrom: 61 | secretKeyRef: 62 | name: booking-db-secret 63 | key: userpassword 64 | - name: ENVIRONMENT 65 | value: development 66 | - name: TESTYA 67 | value: testya 68 | - name: OTEL_RECEIVER_OTLP_ENDPOINT 69 | value: "otel-collector.booking:4317" 70 | - name: ASYNQ_REDIS_HOST 71 | value: "redis-master.redis:6379" 72 | --- 73 | apiVersion: v1 74 | kind: Service 75 | metadata: 76 | name: booking-service 77 | namespace: booking 78 | labels: 79 | app: booking-service 80 | spec: 81 | ports: 82 | - name: http 83 | port: 9999 84 | protocol: TCP 85 | targetPort: 9999 86 | selector: 87 | app: booking-service 88 | type: ClusterIP -------------------------------------------------------------------------------- /worker/worker.go: -------------------------------------------------------------------------------- 1 | package worker 2 | 3 | import ( 4 | "context" 5 | "os" 6 | "time" 7 | 8 | "github.com/hibiken/asynq" 9 | "github.com/rs/zerolog/log" 10 | "go.opentelemetry.io/otel" 11 | 12 | tmetric "github.com/imrenagi/concurrent-booking/internal/telemetry/metric" 13 | ttrace "github.com/imrenagi/concurrent-booking/internal/telemetry/trace" 14 | "github.com/imrenagi/concurrent-booking/worker/middleware" 15 | ) 16 | 17 | var name = "reservation-worker" 18 | var trc = otel.Tracer("github.com/imrenagi/concurrent-booking/cmd/worker") 19 | 20 | func NewWorker() *Worker { 21 | 22 | asynqRedisHost, ok := os.LookupEnv("ASYNQ_REDIS_HOST") 23 | if !ok { 24 | log.Fatal().Msg("ASYNC_REDIS_HOST is not set") 25 | } 26 | 27 | otelAgentAddr, ok := os.LookupEnv("OTEL_RECEIVER_OTLP_ENDPOINT") 28 | if !ok { 29 | log.Fatal().Msg("OTEL_RECEIVER_OTLP_ENDPOINT is not set") 30 | } 31 | 32 | srv := asynq.NewServer( 33 | asynq.RedisClientOpt{Addr: asynqRedisHost}, 34 | asynq.Config{ 35 | Concurrency: 20, 36 | Queues: map[string]int{ 37 | "critical": 6, 38 | "default": 3, 39 | "low": 1, 40 | }, 41 | }, 42 | ) 43 | 44 | w := &Worker{ 45 | // Router: asynq.NewServeMux(), 46 | server: srv, 47 | mux: asynq.NewServeMux(), 48 | handler: newHandler(), 49 | } 50 | 51 | w.routes() 52 | w.InitGlobalProvider(name, otelAgentAddr) 53 | 54 | return w 55 | } 56 | 57 | type Worker struct { 58 | mux *asynq.ServeMux 59 | server *asynq.Server 60 | handler *handler 61 | 62 | metricProviderCloseFn []tmetric.CloseFunc 63 | traceProviderCloseFn []ttrace.CloseFunc 64 | } 65 | 66 | func (w *Worker) routes() { 67 | w.mux.Use(middleware.SpanPropagator) 68 | w.mux.HandleFunc("booking:reserve", w.handler.HandleReservation) 69 | 70 | } 71 | 72 | func (w *Worker) Run(ctx context.Context) error { 73 | 74 | go func() { 75 | if err := w.server.Run(w.mux); err != nil { 76 | log.Fatal().Msgf("could not run server: %v", err) 77 | } 78 | }() 79 | 80 | <-ctx.Done() 81 | 82 | ctxShutDown, cancel := context.WithTimeout(context.Background(), 30*time.Second) 83 | defer func() { 84 | cancel() 85 | }() 86 | 87 | log.Debug().Msg("stopping worker") 88 | 89 | w.server.Stop() 90 | w.server.Shutdown() 91 | 92 | log.Debug().Msg("worker has been stopped") 93 | 94 | for _, closeFn := range w.metricProviderCloseFn { 95 | go func() { 96 | err := closeFn(ctxShutDown) 97 | if err != nil { 98 | log.Error().Err(err).Msgf("Unable to close metric provider") 99 | } 100 | }() 101 | } 102 | for _, closeFn := range w.traceProviderCloseFn { 103 | go func() { 104 | err := closeFn(ctxShutDown) 105 | if err != nil { 106 | log.Error().Err(err).Msgf("Unable to close trace provider") 107 | } 108 | }() 109 | } 110 | 111 | return nil 112 | } 113 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Simple Concert Ticketing 2 | 3 | This application is used for demonstrating the application instrumentation, especially trace and metrics. 4 | 5 | ## Running it locally 6 | 7 | ### Enable new relic (optional) 8 | 9 | To enable new relic instrumentation: 10 | 11 | 1. Create free newrelic account and grab the license key. 12 | 13 | 1. Set these following env var: 14 | 15 | * `OTEL_NEW_RELIC_EXPORTER_OTLP_ENDPOINT` to `https://otlp.nr-data.net:4317` 16 | * `OTEL_NEW_RELIC_EXPORTER_API_KEY` to ingest license key from newrelic account 17 | 18 | ### Enable Google Cloud Tracing and Monitoring 19 | 20 | 1. Create GCP service account and ensure that service account has (at minimum) `roles/monitoring.metricWriter` and `roles/cloudtrace.agent` role. 21 | 22 | 1. Update value of `exporters.googlecloud.project` in [otel-config file](./hack/observability/otel-config.yaml) to your google cloud project id/ 23 | 24 | ### Run Docker Compose 25 | 26 | ```shell 27 | docker-compose up 28 | ``` 29 | 30 | ### Open Dashboard 31 | 32 | 1. Jaeger Tracing Dashboard: [http://127.0.0.1:16686/](http://127.0.0.1:16686/) 33 | 2. Prometheus Dashboard: [http://127.0.0.1:9090/](http://127.0.0.1:9090/) 34 | 3. Grafana Dashboard: [http://127.0.0.1:3000/](http://127.0.0.1:3000/) 35 | 4. Asynqmon Dashboard: [http://127.0.0.1:8011/](http://127.0.0.1:8011/) 36 | 5. Newrelic dashboard (optional) 37 | 6. Google Cloud Monitoring 38 | 7. Google Cloud Tracing 39 | 40 | ### Run load generator 41 | 42 | 1. Install hey 43 | 44 | 1. Run load generator 45 | 46 | ```shell 47 | hey -z 1m -c 2 -q 2 -m POST -d '{"show_id": "b9b0d5da-98a4-4b09-b5f5-83dc0c3b9964"}' http://localhost:9999/api/v1/booking 48 | hey -z 1m -c 2 -q 2 -m POST -d '{"show_id": "b9b0d5da-98a4-4b09-b5f5-83dc0c3b9964"}' http://localhost:9999/api/v2/booking 49 | ``` 50 | 51 | ## Running it on kubernetes 52 | 53 | WARNING: This setup is not properly documented. 54 | 55 | 1. Setup CloudSQL Postgres Instance. Update `secret.yaml.example`, `app.yaml`, and `worker.yaml` with correct value for database config. 56 | ```shell 57 | docker run -d \ 58 | -v ${PWD}/hack/observability/secrets/psql-local.json:/config/key.json \ 59 | -p 127.0.0.1:5432:5432 \ 60 | gcr.io/cloudsql-docker/gce-proxy:1.31.0 /cloud_sql_proxy \ 61 | -instances=io-extended-2022:asia-southeast1:booking=tcp:0.0.0.0:5432 -credential_file=/config/key.json 62 | ``` 63 | 64 | 1. Install bitnami/helm chart 65 | 66 | ```shell 67 | helm repo add bitnami https://charts.bitnami.com/bitnami 68 | helm install redis --namespace redis --values redis-values.yml bitnami/redis 69 | ``` 70 | 71 | 1. Apply all manifest in `k8s` directory. 72 | 73 | 1. Port forward prometheus dashboard 74 | 75 | ```shell 76 | kubectl -n gmp-test port-forward svc/frontend 9091:9090 77 | ``` 78 | 79 | 1. Run load test 80 | 81 | ```shell 82 | k apply -f k8s/hey.yaml 83 | ``` 84 | 85 | 86 | ## Demo Script 87 | 88 | ```shell 89 | hey -n 1 -c 1 -q 1 -m POST -d '{"show_id": "b9b0d5da-98a4-4b09-b5f5-83dc0c3b9964"}' http://localhost:9999/api/v1/booking 90 | 91 | hey -z 5s -c 100 -m POST -d '{"show_id": "b9b0d5da-98a4-4b09-b5f5-83dc0c3b9964"}' http://localhost:9999/api/v1/booking 92 | ``` -------------------------------------------------------------------------------- /booking/stores/order_test.go: -------------------------------------------------------------------------------- 1 | package stores_test 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "testing" 7 | "time" 8 | 9 | "github.com/google/uuid" 10 | "github.com/rs/zerolog/log" 11 | "github.com/stretchr/testify/assert" 12 | "github.com/testcontainers/testcontainers-go" 13 | "github.com/testcontainers/testcontainers-go/wait" 14 | "gorm.io/driver/postgres" 15 | "gorm.io/gorm" 16 | 17 | "github.com/imrenagi/concurrent-booking/booking" 18 | "github.com/imrenagi/concurrent-booking/booking/stores" 19 | ) 20 | 21 | func postgresC() (testcontainers.Container, error) { 22 | req := testcontainers.ContainerRequest{ 23 | Image: "postgres:13-alpine", 24 | ExposedPorts: []string{"5432/tcp"}, 25 | Env: map[string]string{ 26 | "POSTGRES_DB": "booking", 27 | "POSTGRES_USER": "booking", 28 | "POSTGRES_PASSWORD": "booking", 29 | }, 30 | WaitingFor: wait.ForLog("database system is ready to accept connections").WithPollInterval(1 * time.Second), 31 | } 32 | 33 | return testcontainers.GenericContainer(context.TODO(), testcontainers.GenericContainerRequest{ 34 | ContainerRequest: req, 35 | Started: true, 36 | }) 37 | } 38 | 39 | func gormFromContainer(ctx context.Context, container testcontainers.Container) (*gorm.DB, error) { 40 | port, err := container.Ports(ctx) 41 | log.Debug().Msgf("%v %v", port["5432/tcp"][0].HostIP, port["5432/tcp"][0].HostPort) 42 | 43 | dsn := fmt.Sprintf("host=%s port=%s user=%s DB.name=%s password=%s sslmode=disable", 44 | port["5432/tcp"][0].HostIP, 45 | port["5432/tcp"][0].HostPort, 46 | "booking", 47 | "booking", 48 | "booking") 49 | 50 | db, err := gorm.Open(postgres.New(postgres.Config{DSN: dsn}), &gorm.Config{}) 51 | if err != nil { 52 | log.Fatal().Err(err).Msg("unable to open db connection") 53 | } 54 | 55 | err = db.AutoMigrate(&booking.Show{}) 56 | return db, err 57 | } 58 | 59 | func TestConcurrentBooking(t *testing.T) { 60 | ctx := context.TODO() 61 | postgresC, err := postgresC() 62 | assert.NoError(t, err) 63 | defer postgresC.Terminate(ctx) 64 | 65 | db, err := gormFromContainer(ctx, postgresC) 66 | assert.NoError(t, err) 67 | 68 | repo := stores.NewShow(db) 69 | id := uuid.New() 70 | err = repo.Save(context.TODO(), &booking.Show{ 71 | ID: id, 72 | RemainingTickets: 10, 73 | }) 74 | assert.NoError(t, err) 75 | 76 | ticketResultChan := make(chan booking.Ticket, 10) 77 | 78 | orderRepo := stores.NewOrder(db) 79 | 80 | for i := 0; i < 10; i++ { 81 | go func(i int) { 82 | err := orderRepo.Reserve(context.TODO(), id) 83 | if err != nil { 84 | log.Fatal().Msg("error") 85 | } 86 | ticketResultChan <- booking.Ticket{} 87 | }(i) 88 | } 89 | 90 | doneChan := make(chan bool) 91 | go func() { 92 | <-time.After(3 * time.Second) 93 | doneChan <- true 94 | }() 95 | 96 | ticketCount := 0 97 | jump: 98 | for { 99 | select { 100 | case <-ticketResultChan: 101 | ticketCount++ 102 | case <-doneChan: 103 | break jump 104 | } 105 | } 106 | assert.Equal(t, 10, ticketCount) 107 | show, err := repo.FindConcertByID(context.TODO(), id) 108 | assert.NoError(t, err) 109 | assert.Equal(t, 0, show.RemainingTickets) 110 | } 111 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.9' 2 | services: 3 | api: 4 | build: 5 | context: . 6 | command: ["./booking", "server", "--port=9999"] 7 | environment: 8 | DB_HOST: postgres 9 | DB_PORT: 5432 10 | DB_USER: booking 11 | DB_PASSWORD: booking 12 | DB_NAME: booking 13 | ENVIRONMENT: development 14 | OTEL_RECEIVER_OTLP_ENDPOINT: otel-collector:4317 15 | ASYNQ_REDIS_HOST: redis:6379 16 | expose: 17 | - "9999" 18 | ports: 19 | - "9999:9999" 20 | restart: always 21 | worker: 22 | build: 23 | context: . 24 | command: ["./booking", "worker"] 25 | environment: 26 | DB_HOST: postgres 27 | DB_PORT: 5432 28 | DB_USER: booking 29 | DB_PASSWORD: booking 30 | DB_NAME: booking 31 | ENVIRONMENT: development 32 | OTEL_RECEIVER_OTLP_ENDPOINT: otel-collector:4317 33 | ASYNQ_REDIS_HOST: redis:6379 34 | restart: always 35 | postgres: 36 | image: postgres:13-alpine 37 | command: postgres -c 'max_connections=500' 38 | environment: 39 | POSTGRES_DB: booking 40 | POSTGRES_USER: booking 41 | POSTGRES_PASSWORD: booking 42 | expose: 43 | - "5432" 44 | ports: 45 | - "5436:5432" 46 | restart: always 47 | volumes: 48 | - postgres:/var/lib/postgresql/data/ 49 | redis: 50 | image: redis:6 51 | expose: 52 | - "6379" 53 | ports: 54 | - "6379:6379" 55 | restart: always 56 | jaeger: 57 | image: jaegertracing/all-in-one:1.27 58 | environment: 59 | COLLECTOR_ZIPKIN_HOST_PORT: 9411 60 | expose: 61 | - "16686" 62 | ports: 63 | - 5775:5775/udp 64 | - 6831:6831/udp 65 | - 6832:6832/udp 66 | - 5778:5778 67 | - 16686:16686 68 | - 14268:14268 69 | - 14250:14250 70 | - 9411:9411 71 | restart: always 72 | otel-collector: 73 | image: otel/opentelemetry-collector-contrib:latest 74 | expose: 75 | - "4317" 76 | ports: 77 | - "1888:1888" # pprof extension 78 | - "8888:8888" # Prometheus metrics exposed by the collector 79 | - "8889:8889" # Prometheus exporter metrics 80 | - "13133:13133" # health_check extension 81 | - "4317:4317" # OTLP gRPC receiver 82 | - "55679:55679" # zpages extension 83 | volumes: 84 | - "${PWD}/hack/observability:/observability" 85 | - ./hack/observability/secrets/otel-collector.json:/etc/otel/key.json 86 | command: ["--config=/observability/otel-config.yaml", "${OTELCOL_ARGS}"] 87 | restart: always 88 | environment: 89 | - OTEL_NEW_RELIC_EXPORTER_OTLP_ENDPOINT=${OTEL_NEW_RELIC_EXPORTER_OTLP_ENDPOINT} 90 | - OTEL_NEW_RELIC_EXPORTER_API_KEY=${OTEL_NEW_RELIC_EXPORTER_API_KEY} 91 | - GOOGLE_APPLICATION_CREDENTIALS=/etc/otel/key.json 92 | depends_on: 93 | - jaeger 94 | grafana: 95 | image: grafana/grafana:7.1.5 96 | ports: 97 | - 3000:3000 98 | volumes: 99 | - ./hack/observability/grafana/provisioning:/etc/grafana/provisioning 100 | - ./hack/observability/grafana/dashboards:/etc/grafana/demo-dashboards 101 | - grafana_data:/var/lib/grafana 102 | prometheus: 103 | image: prom/prometheus:latest 104 | volumes: 105 | - ./hack/observability/prometheus.yaml:/etc/prometheus/prometheus.yml 106 | - prometheus_data:/prometheus 107 | ports: 108 | - "9090:9090" 109 | asynqmon: 110 | image: hibiken/asynqmon 111 | command: 112 | - "--redis-addr=redis:6379" 113 | - "--enable-metrics-exporter" 114 | - "--prometheus-addr=http://prometheus:9090" 115 | ports: 116 | - 8011:8080 117 | 118 | volumes: 119 | postgres: 120 | grafana_data: 121 | prometheus_data: -------------------------------------------------------------------------------- /server/server.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "net/http" 7 | "os" 8 | "time" 9 | 10 | "github.com/gorilla/mux" 11 | "github.com/hibiken/asynq" 12 | "github.com/rs/cors" 13 | "github.com/rs/zerolog/log" 14 | "gorm.io/gorm" 15 | 16 | "github.com/imrenagi/concurrent-booking/booking/handler" 17 | "github.com/imrenagi/concurrent-booking/booking/services" 18 | "github.com/imrenagi/concurrent-booking/booking/stores" 19 | tmetric "github.com/imrenagi/concurrent-booking/internal/telemetry/metric" 20 | ttrace "github.com/imrenagi/concurrent-booking/internal/telemetry/trace" 21 | "github.com/imrenagi/concurrent-booking/server/middleware" 22 | ) 23 | 24 | var name = "booking-service" 25 | 26 | type BookingHandler interface { 27 | Booking() http.HandlerFunc 28 | BookingV2() http.HandlerFunc 29 | } 30 | 31 | // NewServer ... 32 | func NewServer() *Server { 33 | 34 | db := db() 35 | 36 | otelAgentAddr, ok := os.LookupEnv("OTEL_RECEIVER_OTLP_ENDPOINT") 37 | if !ok { 38 | log.Fatal().Msg("OTEL_RECEIVER_OTLP_ENDPOINT is not set") 39 | } 40 | 41 | asynqRedisHost, ok := os.LookupEnv("ASYNQ_REDIS_HOST") 42 | if !ok { 43 | log.Fatal().Msg("ASYNC_REDIS_HOST is not set") 44 | } 45 | 46 | bookingService := services.Booking{ 47 | BookingRepository: stores.NewOrder(db), 48 | ShowRepository: stores.NewShow(db), 49 | Dispatcher: asynq.NewClient(asynq.RedisClientOpt{Addr: asynqRedisHost}), 50 | } 51 | 52 | srv := &Server{ 53 | Router: mux.NewRouter(), 54 | db: db, 55 | bookingHandler: &handler.Handler{Service: bookingService}, 56 | } 57 | 58 | srv.InitGlobalProvider(name, otelAgentAddr) 59 | srv.routes() 60 | 61 | return srv 62 | } 63 | 64 | type Server struct { 65 | Router *mux.Router 66 | db *gorm.DB 67 | 68 | metricProviderCloseFn []tmetric.CloseFunc 69 | traceProviderCloseFn []ttrace.CloseFunc 70 | 71 | bookingHandler BookingHandler 72 | } 73 | 74 | // Run ... 75 | func (s *Server) Run(ctx context.Context, port int) error { 76 | 77 | httpS := http.Server{ 78 | Addr: fmt.Sprintf(":%d", port), 79 | Handler: s.cors().Handler(middleware.RequestID(s.Router)), 80 | } 81 | 82 | log.Info().Msgf("server serving on port %d ", port) 83 | 84 | go func() { 85 | if err := httpS.ListenAndServe(); err != nil && err != http.ErrServerClosed { 86 | log.Fatal().Msgf("listen:%+s\n", err) 87 | } 88 | }() 89 | 90 | <-ctx.Done() 91 | 92 | log.Printf("server stopped") 93 | 94 | ctxShutDown, cancel := context.WithTimeout(context.Background(), 30*time.Second) 95 | defer func() { 96 | cancel() 97 | }() 98 | 99 | err := httpS.Shutdown(ctxShutDown) 100 | if err != nil { 101 | log.Fatal().Msgf("server Shutdown Failed:%+s", err) 102 | } 103 | 104 | log.Printf("server exited properly") 105 | 106 | if err == http.ErrServerClosed { 107 | err = nil 108 | } 109 | 110 | sql, err := s.db.DB() 111 | if err != nil { 112 | log.Fatal().Msgf("unable to get db driver") 113 | } 114 | 115 | if err = sql.Close(); err != nil { 116 | log.Fatal().Msgf("unable close db connection") 117 | } 118 | 119 | for _, closeFn := range s.metricProviderCloseFn { 120 | go func() { 121 | err = closeFn(ctxShutDown) 122 | if err != nil { 123 | log.Error().Err(err).Msgf("Unable to close metric provider") 124 | } 125 | }() 126 | } 127 | for _, closeFn := range s.traceProviderCloseFn { 128 | go func() { 129 | err = closeFn(ctxShutDown) 130 | if err != nil { 131 | log.Error().Err(err).Msgf("Unable to close trace provider") 132 | } 133 | }() 134 | } 135 | 136 | return err 137 | } 138 | 139 | func (s *Server) cors() *cors.Cors { 140 | return cors.New(cors.Options{ 141 | AllowedOrigins: []string{"*"}, 142 | AllowedMethods: []string{"POST", "GET", "PUT", "DELETE", "HEAD", "OPTIONS"}, 143 | AllowedHeaders: []string{"Accept", "Content-Type", "Content-Length", "Accept-Encoding", "X-CSRF-Token", "Authorization"}, 144 | MaxAge: 60, // 1 minutes 145 | AllowCredentials: true, 146 | OptionsPassthrough: false, 147 | Debug: false, 148 | }) 149 | } 150 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/imrenagi/concurrent-booking 2 | 3 | go 1.18 4 | 5 | require ( 6 | github.com/google/uuid v1.3.0 7 | github.com/gorilla/mux v1.8.0 8 | github.com/hibiken/asynq v0.23.0 9 | github.com/rs/cors v1.8.2 10 | github.com/rs/zerolog v1.27.0 11 | github.com/stretchr/testify v1.8.0 12 | github.com/testcontainers/testcontainers-go v0.13.0 13 | github.com/uptrace/opentelemetry-go-extra/otelgorm v0.1.14 14 | go.opentelemetry.io/contrib/instrumentation/github.com/gorilla/mux/otelmux v0.33.0 15 | go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.33.0 16 | go.opentelemetry.io/otel v1.8.0 17 | go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.31.0 18 | go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.31.0 19 | go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.8.0 20 | go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.8.0 21 | go.opentelemetry.io/otel/metric v0.31.0 22 | go.opentelemetry.io/otel/sdk v1.8.0 23 | go.opentelemetry.io/otel/sdk/metric v0.31.0 24 | go.opentelemetry.io/otel/trace v1.8.0 25 | google.golang.org/grpc v1.46.2 26 | gorm.io/driver/postgres v1.3.8 27 | gorm.io/gorm v1.23.8 28 | ) 29 | 30 | require ( 31 | github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect 32 | github.com/Microsoft/go-winio v0.4.17 // indirect 33 | github.com/Microsoft/hcsshim v0.8.23 // indirect 34 | github.com/cenkalti/backoff/v4 v4.1.3 // indirect 35 | github.com/cespare/xxhash/v2 v2.1.1 // indirect 36 | github.com/containerd/cgroups v1.0.1 // indirect 37 | github.com/containerd/containerd v1.5.9 // indirect 38 | github.com/davecgh/go-spew v1.1.1 // indirect 39 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect 40 | github.com/docker/distribution v2.7.1+incompatible // indirect 41 | github.com/docker/docker v20.10.11+incompatible // indirect 42 | github.com/docker/go-connections v0.4.0 // indirect 43 | github.com/docker/go-units v0.4.0 // indirect 44 | github.com/felixge/httpsnoop v1.0.3 // indirect 45 | github.com/go-logr/logr v1.2.3 // indirect 46 | github.com/go-logr/stdr v1.2.2 // indirect 47 | github.com/go-redis/redis/v8 v8.11.2 // indirect 48 | github.com/gogo/protobuf v1.3.2 // indirect 49 | github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect 50 | github.com/golang/protobuf v1.5.2 // indirect 51 | github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect 52 | github.com/inconshreveable/mousetrap v1.0.0 // indirect 53 | github.com/jackc/chunkreader/v2 v2.0.1 // indirect 54 | github.com/jackc/pgconn v1.12.1 // indirect 55 | github.com/jackc/pgio v1.0.0 // indirect 56 | github.com/jackc/pgpassfile v1.0.0 // indirect 57 | github.com/jackc/pgproto3/v2 v2.3.0 // indirect 58 | github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b // indirect 59 | github.com/jackc/pgtype v1.11.0 // indirect 60 | github.com/jackc/pgx/v4 v4.16.1 // indirect 61 | github.com/jinzhu/inflection v1.0.0 // indirect 62 | github.com/jinzhu/now v1.1.5 // indirect 63 | github.com/magiconair/properties v1.8.5 // indirect 64 | github.com/mattn/go-colorable v0.1.12 // indirect 65 | github.com/mattn/go-isatty v0.0.14 // indirect 66 | github.com/moby/sys/mount v0.2.0 // indirect 67 | github.com/moby/sys/mountinfo v0.5.0 // indirect 68 | github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect 69 | github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c // indirect 70 | github.com/opencontainers/go-digest v1.0.0 // indirect 71 | github.com/opencontainers/image-spec v1.0.2 // indirect 72 | github.com/opencontainers/runc v1.0.2 // indirect 73 | github.com/pkg/errors v0.9.1 // indirect 74 | github.com/pmezard/go-difflib v1.0.0 // indirect 75 | github.com/robfig/cron/v3 v3.0.1 // indirect 76 | github.com/sirupsen/logrus v1.8.1 // indirect 77 | github.com/spf13/cast v1.3.1 // indirect 78 | github.com/spf13/cobra v1.5.0 // indirect 79 | github.com/spf13/pflag v1.0.5 // indirect 80 | github.com/uptrace/opentelemetry-go-extra/otelsql v0.1.14 // indirect 81 | go.opencensus.io v0.22.4 // indirect 82 | go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.8.0 // indirect 83 | go.opentelemetry.io/proto/otlp v0.18.0 // indirect 84 | golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 // indirect 85 | golang.org/x/net v0.0.0-20211108170745-6635138e15ea // indirect 86 | golang.org/x/sys v0.0.0-20211124211545-fe61309f8881 // indirect 87 | golang.org/x/text v0.3.7 // indirect 88 | golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e // indirect 89 | google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1 // indirect 90 | google.golang.org/protobuf v1.28.0 // indirect 91 | gopkg.in/yaml.v3 v3.0.1 // indirect 92 | ) 93 | 94 | replace go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.33.0 => github.com/imrenagi/opentelemetry-go-contrib/instrumentation/net/http/otelhttp v0.33.1-0.20220718014305-990bd3eb8544 95 | -------------------------------------------------------------------------------- /booking/services/booking.go: -------------------------------------------------------------------------------- 1 | package services 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/google/uuid" 7 | "github.com/hibiken/asynq" 8 | "github.com/rs/zerolog/log" 9 | "go.opentelemetry.io/otel/attribute" 10 | "go.opentelemetry.io/otel/trace" 11 | 12 | "github.com/imrenagi/concurrent-booking/booking" 13 | ) 14 | 15 | type BookingRepository interface { 16 | FindOrderByID(ctx context.Context, ID uuid.UUID) (*booking.Order, error) 17 | Reserve(ctx context.Context, ID uuid.UUID) error 18 | Save(ctx context.Context, order *booking.Order) error 19 | Create(ctx context.Context, order *booking.Order) error 20 | } 21 | 22 | type ShowRepository interface { 23 | FindConcertByID(ctx context.Context, ID uuid.UUID) (*booking.Show, error) 24 | } 25 | 26 | type Booking struct { 27 | BookingRepository BookingRepository 28 | ShowRepository ShowRepository 29 | Dispatcher *asynq.Client 30 | } 31 | 32 | type BookingRequest struct { 33 | ShowID uuid.UUID `json:"show_id"` 34 | } 35 | 36 | func (b Booking) Book(ctx context.Context, req BookingRequest) (*booking.Ticket, error) { 37 | ctx, parentSpan := tracer.Start(ctx, "booking.BookV1") 38 | defer parentSpan.End() 39 | 40 | err := b.BookingRepository.Reserve(ctx, req.ShowID) 41 | if err != nil { 42 | return nil, err 43 | } 44 | 45 | attrs := []attribute.KeyValue{ 46 | orderStatusKey.String(string(booking.Reserved)), 47 | } 48 | orderCounter.Add(ctx, 1, attrs...) 49 | 50 | return &booking.Ticket{}, nil 51 | } 52 | 53 | func (b Booking) BookV2(ctx context.Context, req BookingRequest) (*booking.Order, error) { 54 | ctx, parentSpan := tracer.Start(ctx, "booking.BookV2") 55 | defer parentSpan.End() 56 | 57 | parentSpan.AddEvent("creating new order id") 58 | order := booking.Order{ 59 | ID: uuid.New(), 60 | ShowID: req.ShowID, 61 | Status: booking.Created, 62 | } 63 | 64 | err := b.BookingRepository.Create(ctx, &order) 65 | if err != nil { 66 | parentSpan.RecordError(err) 67 | return nil, err 68 | } 69 | 70 | parentSpan.AddEvent("creating asynq task") 71 | task, err := NewReservationTask(ctx, order) 72 | if err != nil { 73 | parentSpan.RecordError(err) 74 | return nil, err 75 | } 76 | 77 | parentSpan.AddEvent("Adding task to queue") 78 | taskInfo, err := b.Dispatcher.EnqueueContext(ctx, task, asynq.Queue("critical")) 79 | if err != nil { 80 | parentSpan.RecordError(err) 81 | return nil, err 82 | } 83 | 84 | parentSpan.AddEvent("task is created", trace.WithAttributes(attribute.String("task_info_id", taskInfo.ID))) 85 | 86 | attrs := []attribute.KeyValue{ 87 | orderStatusKey.String(string(order.Status)), 88 | } 89 | orderCounter.Add(ctx, 1, attrs...) 90 | 91 | return &order, nil 92 | } 93 | 94 | type ReservationRequest struct { 95 | ShowID uuid.UUID `json:"show_id"` 96 | OrderID uuid.UUID `json:"order_id"` 97 | } 98 | 99 | func (b Booking) Reserve(ctx context.Context, req ReservationRequest) (*booking.Ticket, error) { 100 | ctx, parentSpan := tracer.Start(ctx, "booking.Reserve") 101 | defer parentSpan.End() 102 | err := b.BookingRepository.Reserve(ctx, req.ShowID) 103 | if err != nil && err != booking.ErrTicketIsNotAvailable { 104 | return nil, err 105 | } 106 | 107 | if err == booking.ErrTicketIsNotAvailable { 108 | log.Debug().Msgf("order is rejected") 109 | err = b.setOrderToRejected(ctx, req.OrderID) 110 | if err != nil { 111 | return nil, err 112 | } 113 | } else { 114 | err = b.setOrderToReserved(ctx, req.OrderID) 115 | if err != nil { 116 | return nil, err 117 | } 118 | } 119 | 120 | return &booking.Ticket{}, nil 121 | } 122 | 123 | func (b Booking) setOrderToReserved(ctx context.Context, id uuid.UUID) error { 124 | ctx, parentSpan := tracer.Start(ctx, "booking.setOrderToReserved") 125 | defer parentSpan.End() 126 | order, err := b.BookingRepository.FindOrderByID(ctx, id) 127 | if err != nil { 128 | return err 129 | } 130 | order.Status = booking.Reserved 131 | 132 | err = b.BookingRepository.Save(ctx, order) 133 | if err != nil { 134 | return err 135 | } 136 | 137 | attrs := []attribute.KeyValue{ 138 | orderStatusKey.String(string(booking.Reserved)), 139 | } 140 | orderCounter.Add(ctx, 1, attrs...) 141 | 142 | return nil 143 | } 144 | 145 | func (b Booking) setOrderToRejected(ctx context.Context, id uuid.UUID) error { 146 | ctx, parentSpan := tracer.Start(ctx, "booking.setOrderToRejected") 147 | defer parentSpan.End() 148 | order, err := b.BookingRepository.FindOrderByID(ctx, id) 149 | if err != nil { 150 | return err 151 | } 152 | order.Status = booking.Rejected 153 | 154 | err = b.BookingRepository.Save(ctx, order) 155 | if err != nil { 156 | return err 157 | } 158 | 159 | attrs := []attribute.KeyValue{ 160 | orderStatusKey.String(string(booking.Reserved)), 161 | } 162 | orderCounter.Add(ctx, 1, attrs...) 163 | 164 | return nil 165 | } 166 | -------------------------------------------------------------------------------- /k8s/otel-collector.yaml: -------------------------------------------------------------------------------- 1 | 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: otel-collector 6 | namespace: booking 7 | labels: 8 | app.kubernetes.io/name: otel-collector 9 | app.kubernetes.io/component: otel-collector 10 | data: 11 | otel-collector-config: | 12 | receivers: 13 | otlp: 14 | protocols: 15 | grpc: 16 | 17 | exporters: 18 | googlecloud: 19 | project: io-extended-2022 20 | retry_on_failure: 21 | enabled: false 22 | prometheus: 23 | endpoint: "0.0.0.0:8889" 24 | const_labels: { } 25 | otlp: 26 | endpoint: ${OTEL_NEW_RELIC_EXPORTER_OTLP_ENDPOINT} 27 | headers: 28 | api-key: ${OTEL_NEW_RELIC_EXPORTER_API_KEY} 29 | 30 | logging: 31 | 32 | processors: 33 | batch: 34 | memory_limiter: 35 | check_interval: 1s 36 | limit_percentage: 65 37 | spike_limit_percentage: 50 38 | resourcedetection: 39 | detectors: [gcp] 40 | timeout: 10s 41 | 42 | extensions: 43 | health_check: 44 | pprof: 45 | endpoint: :1888 46 | zpages: 47 | endpoint: :55679 48 | 49 | service: 50 | extensions: [ pprof, zpages, health_check ] 51 | pipelines: 52 | traces: 53 | receivers: [ otlp ] 54 | processors: 55 | - batch 56 | exporters: 57 | - logging 58 | - googlecloud 59 | - otlp 60 | metrics: 61 | receivers: [ otlp ] 62 | processors: 63 | - batch 64 | exporters: 65 | - logging 66 | - prometheus 67 | --- 68 | apiVersion: v1 69 | kind: Service 70 | metadata: 71 | name: otel-collector 72 | namespace: booking 73 | labels: 74 | app.kubernetes.io/name: otel-collector 75 | app.kubernetes.io/component: otel-collector 76 | spec: 77 | ports: 78 | - name: otlp # Default endpoint for otlp receiver. 79 | port: 4317 80 | protocol: TCP 81 | targetPort: 4317 82 | - name: metrics # Default endpoint for metrics. 83 | port: 8889 84 | protocol: TCP 85 | targetPort: 8889 86 | - name: otel-metrics # Default endpoint for metrics. 87 | port: 8888 88 | protocol: TCP 89 | targetPort: 8888 90 | selector: 91 | app.kubernetes.io/component: otel-collector 92 | type: ClusterIP 93 | --- 94 | apiVersion: apps/v1 95 | kind: Deployment 96 | metadata: 97 | name: otel-collector 98 | namespace: booking 99 | labels: 100 | app.kubernetes.io/name: otel-collector 101 | app.kubernetes.io/component: otel-collector 102 | spec: 103 | selector: 104 | matchLabels: 105 | app.kubernetes.io/name: otel-collector 106 | app.kubernetes.io/component: otel-collector 107 | minReadySeconds: 5 108 | progressDeadlineSeconds: 120 109 | replicas: 1 110 | template: 111 | metadata: 112 | labels: 113 | app.kubernetes.io/name: otel-collector 114 | app.kubernetes.io/component: otel-collector 115 | spec: 116 | containers: 117 | - args: ["--config=/etc/otel/conf/otel-collector-config.yaml", "${OTELCOL_ARGS}"] 118 | env: 119 | # - name: GOGC 120 | # value: "80" 121 | - name: OTEL_NEW_RELIC_EXPORTER_OTLP_ENDPOINT 122 | value: "https://otlp.nr-data.net:4317" 123 | - name: GOOGLE_APPLICATION_CREDENTIALS 124 | value: /etc/otel/gcp/secret/otel-collector.json 125 | - name: OTEL_NEW_RELIC_EXPORTER_API_KEY 126 | valueFrom: 127 | secretKeyRef: 128 | name: otel-collector-newrelic-secret 129 | key: apikey 130 | image: otel/opentelemetry-collector-contrib:latest 131 | name: otel-collector 132 | resources: 133 | limits: 134 | cpu: 400m 135 | memory: 2Gi 136 | requests: 137 | cpu: 200m 138 | memory: 1Gi 139 | ports: 140 | - containerPort: 4317 # Default endpoint for otlp receiver. 141 | - containerPort: 8889 # Default endpoint for querying metrics. 142 | - containerPort: 8888 # Prometheus metrics exposed by the collector 143 | volumeMounts: 144 | - name: otel-collector-config-vol 145 | mountPath: /etc/otel/conf 146 | - name: otel-collector-gcp-secret-vol 147 | mountPath: /etc/otel/gcp/secret 148 | volumes: 149 | - configMap: 150 | name: otel-collector 151 | items: 152 | - key: otel-collector-config 153 | path: otel-collector-config.yaml 154 | name: otel-collector-config-vol 155 | - name: otel-collector-gcp-secret-vol 156 | secret: 157 | secretName: otel-collector-gcp-secret -------------------------------------------------------------------------------- /hack/observability/grafana/dashboards/app-monitoring.json: -------------------------------------------------------------------------------- 1 | { 2 | "annotations": { 3 | "list": [ 4 | { 5 | "builtIn": 1, 6 | "datasource": "-- Grafana --", 7 | "enable": true, 8 | "hide": true, 9 | "iconColor": "rgba(0, 211, 255, 1)", 10 | "name": "Annotations & Alerts", 11 | "type": "dashboard" 12 | } 13 | ] 14 | }, 15 | "editable": true, 16 | "gnetId": null, 17 | "graphTooltip": 0, 18 | "id": 1, 19 | "links": [], 20 | "panels": [ 21 | { 22 | "aliasColors": {}, 23 | "bars": false, 24 | "dashLength": 10, 25 | "dashes": false, 26 | "datasource": null, 27 | "fieldConfig": { 28 | "defaults": { 29 | "custom": {} 30 | }, 31 | "overrides": [] 32 | }, 33 | "fill": 1, 34 | "fillGradient": 0, 35 | "gridPos": { 36 | "h": 8, 37 | "w": 8, 38 | "x": 0, 39 | "y": 0 40 | }, 41 | "hiddenSeries": false, 42 | "id": 6, 43 | "legend": { 44 | "avg": false, 45 | "current": false, 46 | "max": false, 47 | "min": false, 48 | "show": true, 49 | "total": false, 50 | "values": false 51 | }, 52 | "lines": true, 53 | "linewidth": 1, 54 | "nullPointMode": "null", 55 | "percentage": false, 56 | "pluginVersion": "7.1.5", 57 | "pointradius": 2, 58 | "points": false, 59 | "renderer": "flot", 60 | "seriesOverrides": [], 61 | "spaceLength": 10, 62 | "stack": false, 63 | "steppedLine": false, 64 | "targets": [ 65 | { 66 | "expr": "rate(http_server_duration_bucket{http_server_name=\"booking-service\", http_status_code=~\"2..\", le=\"+Inf\"}[5m])", 67 | "interval": "", 68 | "legendFormat": "2xx", 69 | "refId": "A" 70 | }, 71 | { 72 | "expr": "rate(http_server_duration_bucket{http_server_name=\"booking-service\", http_status_code=~\"4..\", le=\"+Inf\"}[5m])", 73 | "interval": "", 74 | "legendFormat": "4xx", 75 | "refId": "B" 76 | }, 77 | { 78 | "expr": "rate(http_server_duration_bucket{http_server_name=\"booking-service\", http_status_code=~\"5..\", le=\"+Inf\"}[5m])", 79 | "interval": "", 80 | "legendFormat": "5xx", 81 | "refId": "C" 82 | } 83 | ], 84 | "thresholds": [], 85 | "timeFrom": null, 86 | "timeRegions": [], 87 | "timeShift": null, 88 | "title": "Total Request", 89 | "tooltip": { 90 | "shared": true, 91 | "sort": 0, 92 | "value_type": "individual" 93 | }, 94 | "type": "graph", 95 | "xaxis": { 96 | "buckets": null, 97 | "mode": "time", 98 | "name": null, 99 | "show": true, 100 | "values": [] 101 | }, 102 | "yaxes": [ 103 | { 104 | "format": "short", 105 | "label": null, 106 | "logBase": 1, 107 | "max": null, 108 | "min": null, 109 | "show": true 110 | }, 111 | { 112 | "format": "short", 113 | "label": null, 114 | "logBase": 1, 115 | "max": null, 116 | "min": null, 117 | "show": true 118 | } 119 | ], 120 | "yaxis": { 121 | "align": false, 122 | "alignLevel": null 123 | } 124 | }, 125 | { 126 | "aliasColors": {}, 127 | "bars": false, 128 | "dashLength": 10, 129 | "dashes": false, 130 | "datasource": null, 131 | "fieldConfig": { 132 | "defaults": { 133 | "custom": {} 134 | }, 135 | "overrides": [] 136 | }, 137 | "fill": 1, 138 | "fillGradient": 0, 139 | "gridPos": { 140 | "h": 8, 141 | "w": 8, 142 | "x": 8, 143 | "y": 0 144 | }, 145 | "hiddenSeries": false, 146 | "id": 4, 147 | "legend": { 148 | "avg": false, 149 | "current": false, 150 | "max": false, 151 | "min": false, 152 | "show": true, 153 | "total": false, 154 | "values": false 155 | }, 156 | "lines": true, 157 | "linewidth": 1, 158 | "nullPointMode": "null", 159 | "percentage": false, 160 | "pluginVersion": "7.1.5", 161 | "pointradius": 2, 162 | "points": false, 163 | "renderer": "flot", 164 | "seriesOverrides": [], 165 | "spaceLength": 10, 166 | "stack": false, 167 | "steppedLine": false, 168 | "targets": [ 169 | { 170 | "expr": "histogram_quantile(0.95, sum(rate(http_server_duration_bucket{}[5m])) by(le, job))", 171 | "interval": "", 172 | "legendFormat": "all", 173 | "refId": "A" 174 | }, 175 | { 176 | "expr": "histogram_quantile(0.95, sum(rate(http_server_duration_bucket{http_status_code=\"200\"}[5m])) by(le, job))", 177 | "interval": "", 178 | "legendFormat": "200", 179 | "refId": "B" 180 | }, 181 | { 182 | "expr": "histogram_quantile(0.95, sum(rate(http_server_duration_bucket{http_status_code!=\"200\"}[5m])) by(le, job))", 183 | "interval": "", 184 | "legendFormat": "!=200", 185 | "refId": "C" 186 | } 187 | ], 188 | "thresholds": [], 189 | "timeFrom": null, 190 | "timeRegions": [], 191 | "timeShift": null, 192 | "title": "P95 Request Latency", 193 | "tooltip": { 194 | "shared": true, 195 | "sort": 0, 196 | "value_type": "individual" 197 | }, 198 | "type": "graph", 199 | "xaxis": { 200 | "buckets": null, 201 | "mode": "time", 202 | "name": null, 203 | "show": true, 204 | "values": [] 205 | }, 206 | "yaxes": [ 207 | { 208 | "format": "short", 209 | "label": null, 210 | "logBase": 1, 211 | "max": null, 212 | "min": null, 213 | "show": true 214 | }, 215 | { 216 | "format": "short", 217 | "label": null, 218 | "logBase": 1, 219 | "max": null, 220 | "min": null, 221 | "show": true 222 | } 223 | ], 224 | "yaxis": { 225 | "align": false, 226 | "alignLevel": null 227 | } 228 | }, 229 | { 230 | "aliasColors": {}, 231 | "bars": false, 232 | "dashLength": 10, 233 | "dashes": false, 234 | "datasource": "Prometheus", 235 | "fieldConfig": { 236 | "defaults": { 237 | "custom": {} 238 | }, 239 | "overrides": [] 240 | }, 241 | "fill": 1, 242 | "fillGradient": 0, 243 | "gridPos": { 244 | "h": 8, 245 | "w": 8, 246 | "x": 16, 247 | "y": 0 248 | }, 249 | "hiddenSeries": false, 250 | "id": 2, 251 | "legend": { 252 | "avg": false, 253 | "current": false, 254 | "max": false, 255 | "min": false, 256 | "show": true, 257 | "total": false, 258 | "values": false 259 | }, 260 | "lines": true, 261 | "linewidth": 1, 262 | "nullPointMode": "null", 263 | "percentage": false, 264 | "pluginVersion": "7.1.5", 265 | "pointradius": 2, 266 | "points": false, 267 | "renderer": "flot", 268 | "seriesOverrides": [], 269 | "spaceLength": 10, 270 | "stack": false, 271 | "steppedLine": false, 272 | "targets": [ 273 | { 274 | "expr": "sum(rate(http_server_duration_bucket{http_server_name=\"booking-service\", le=\"200\"}[5m])) by (job, http_server_name)\n/\nsum(rate(http_server_duration_count{http_server_name=\"booking-service\"}[5m])) by (job, http_server_name)", 275 | "interval": "", 276 | "legendFormat": "{{http_server_name}}", 277 | "refId": "A" 278 | } 279 | ], 280 | "thresholds": [], 281 | "timeFrom": null, 282 | "timeRegions": [], 283 | "timeShift": null, 284 | "title": "SLO Latency Percentile 200ms", 285 | "tooltip": { 286 | "shared": true, 287 | "sort": 0, 288 | "value_type": "individual" 289 | }, 290 | "type": "graph", 291 | "xaxis": { 292 | "buckets": null, 293 | "mode": "time", 294 | "name": null, 295 | "show": true, 296 | "values": [] 297 | }, 298 | "yaxes": [ 299 | { 300 | "format": "short", 301 | "label": null, 302 | "logBase": 1, 303 | "max": null, 304 | "min": null, 305 | "show": true 306 | }, 307 | { 308 | "format": "short", 309 | "label": null, 310 | "logBase": 1, 311 | "max": null, 312 | "min": null, 313 | "show": true 314 | } 315 | ], 316 | "yaxis": { 317 | "align": false, 318 | "alignLevel": null 319 | } 320 | } 321 | ], 322 | "refresh": false, 323 | "schemaVersion": 26, 324 | "style": "dark", 325 | "tags": [], 326 | "templating": { 327 | "list": [] 328 | }, 329 | "time": { 330 | "from": "now-5m", 331 | "to": "now" 332 | }, 333 | "timepicker": { 334 | "refresh_intervals": [ 335 | "5s", 336 | "10s", 337 | "30s", 338 | "1m", 339 | "5m", 340 | "15m", 341 | "30m", 342 | "1h", 343 | "2h", 344 | "1d" 345 | ] 346 | }, 347 | "timezone": "", 348 | "title": "App Monitoring", 349 | "uid": "oFbdvog4k", 350 | "version": 4 351 | } -------------------------------------------------------------------------------- /k8s/redis-values.yml: -------------------------------------------------------------------------------- 1 | ## @section Global parameters 2 | ## Global Docker image parameters 3 | ## Please, note that this will override the image parameters, including dependencies, configured to use the global value 4 | ## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass 5 | ## 6 | 7 | ## @param global.imageRegistry Global Docker image registry 8 | ## @param global.imagePullSecrets Global Docker registry secret names as an array 9 | ## @param global.storageClass Global StorageClass for Persistent Volume(s) 10 | ## @param global.redis.password Global Redis® password (overrides `auth.password`) 11 | ## 12 | global: 13 | imageRegistry: "" 14 | ## E.g. 15 | ## imagePullSecrets: 16 | ## - myRegistryKeySecretName 17 | ## 18 | imagePullSecrets: [] 19 | storageClass: "" 20 | redis: 21 | password: "" 22 | 23 | ## @section Common parameters 24 | ## 25 | 26 | ## @param kubeVersion Override Kubernetes version 27 | ## 28 | kubeVersion: "" 29 | ## @param nameOverride String to partially override common.names.fullname 30 | ## 31 | nameOverride: "" 32 | ## @param fullnameOverride String to fully override common.names.fullname 33 | ## 34 | fullnameOverride: "" 35 | ## @param commonLabels Labels to add to all deployed objects 36 | ## 37 | commonLabels: {} 38 | ## @param commonAnnotations Annotations to add to all deployed objects 39 | ## 40 | commonAnnotations: {} 41 | ## @param secretAnnotations Annotations to add to secret 42 | ## 43 | secretAnnotations: {} 44 | ## @param clusterDomain Kubernetes cluster domain name 45 | ## 46 | clusterDomain: cluster.local 47 | ## @param extraDeploy Array of extra objects to deploy with the release 48 | ## 49 | extraDeploy: [] 50 | 51 | ## Enable diagnostic mode in the deployment 52 | ## 53 | diagnosticMode: 54 | ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) 55 | ## 56 | enabled: false 57 | ## @param diagnosticMode.command Command to override all containers in the deployment 58 | ## 59 | command: 60 | - sleep 61 | ## @param diagnosticMode.args Args to override all containers in the deployment 62 | ## 63 | args: 64 | - infinity 65 | 66 | ## @section Redis® Image parameters 67 | ## 68 | 69 | ## Bitnami Redis® image 70 | ## ref: https://hub.docker.com/r/bitnami/redis/tags/ 71 | ## @param image.registry Redis® image registry 72 | ## @param image.repository Redis® image repository 73 | ## @param image.tag Redis® image tag (immutable tags are recommended) 74 | ## @param image.pullPolicy Redis® image pull policy 75 | ## @param image.pullSecrets Redis® image pull secrets 76 | ## @param image.debug Enable image debug mode 77 | ## 78 | image: 79 | registry: docker.io 80 | repository: bitnami/redis 81 | tag: 7.0.4-debian-11-r3 82 | ## Specify a imagePullPolicy 83 | ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' 84 | ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images 85 | ## 86 | pullPolicy: IfNotPresent 87 | ## Optionally specify an array of imagePullSecrets. 88 | ## Secrets must be manually created in the namespace. 89 | ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ 90 | ## e.g: 91 | ## pullSecrets: 92 | ## - myRegistryKeySecretName 93 | ## 94 | pullSecrets: [] 95 | ## Enable debug mode 96 | ## 97 | debug: false 98 | 99 | ## @section Redis® common configuration parameters 100 | ## https://github.com/bitnami/bitnami-docker-redis#configuration 101 | ## 102 | 103 | ## @param architecture Redis® architecture. Allowed values: `standalone` or `replication` 104 | ## 105 | architecture: standalone 106 | ## Redis® Authentication parameters 107 | ## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run 108 | ## 109 | auth: 110 | ## @param auth.enabled Enable password authentication 111 | ## 112 | enabled: false 113 | ## @param auth.sentinel Enable password authentication on sentinels too 114 | ## 115 | sentinel: false 116 | ## @param auth.password Redis® password 117 | ## Defaults to a random 10-character alphanumeric string if not set 118 | ## 119 | password: "" 120 | ## @param auth.existingSecret The name of an existing secret with Redis® credentials 121 | ## NOTE: When it's set, the previous `auth.password` parameter is ignored 122 | ## 123 | existingSecret: "" 124 | ## @param auth.existingSecretPasswordKey Password key to be retrieved from existing secret 125 | ## NOTE: ignored unless `auth.existingSecret` parameter is set 126 | ## 127 | existingSecretPasswordKey: "" 128 | ## @param auth.usePasswordFiles Mount credentials as files instead of using an environment variable 129 | ## 130 | usePasswordFiles: false 131 | 132 | ## @param commonConfiguration [string] Common configuration to be added into the ConfigMap 133 | ## ref: https://redis.io/topics/config 134 | ## 135 | commonConfiguration: |- 136 | # Enable AOF https://redis.io/topics/persistence#append-only-file 137 | appendonly yes 138 | # Disable RDB persistence, AOF persistence already enabled. 139 | save "" 140 | ## @param existingConfigmap The name of an existing ConfigMap with your custom configuration for Redis® nodes 141 | ## 142 | existingConfigmap: "" 143 | 144 | ## @section Redis® master configuration parameters 145 | ## 146 | 147 | master: 148 | ## @param master.count Number of Redis® master instances to deploy (experimental, requires additional configuration) 149 | ## 150 | count: 1 151 | ## @param master.configuration Configuration for Redis® master nodes 152 | ## ref: https://redis.io/topics/config 153 | ## 154 | configuration: "" 155 | ## @param master.disableCommands Array with Redis® commands to disable on master nodes 156 | ## Commands will be completely disabled by renaming each to an empty string. 157 | ## ref: https://redis.io/topics/security#disabling-of-specific-commands 158 | ## 159 | disableCommands: 160 | - FLUSHDB 161 | - FLUSHALL 162 | ## @param master.command Override default container command (useful when using custom images) 163 | ## 164 | command: [] 165 | ## @param master.args Override default container args (useful when using custom images) 166 | ## 167 | args: [] 168 | ## @param master.preExecCmds Additional commands to run prior to starting Redis® master 169 | ## 170 | preExecCmds: [] 171 | ## @param master.extraFlags Array with additional command line flags for Redis® master 172 | ## e.g: 173 | ## extraFlags: 174 | ## - "--maxmemory-policy volatile-ttl" 175 | ## - "--repl-backlog-size 1024mb" 176 | ## 177 | extraFlags: [] 178 | ## @param master.extraEnvVars Array with extra environment variables to add to Redis® master nodes 179 | ## e.g: 180 | ## extraEnvVars: 181 | ## - name: FOO 182 | ## value: "bar" 183 | ## 184 | extraEnvVars: [] 185 | ## @param master.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Redis® master nodes 186 | ## 187 | extraEnvVarsCM: "" 188 | ## @param master.extraEnvVarsSecret Name of existing Secret containing extra env vars for Redis® master nodes 189 | ## 190 | extraEnvVarsSecret: "" 191 | ## @param master.containerPorts.redis Container port to open on Redis® master nodes 192 | ## 193 | containerPorts: 194 | redis: 6379 195 | ## Configure extra options for Redis® containers' liveness and readiness probes 196 | ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes 197 | ## @param master.startupProbe.enabled Enable startupProbe on Redis® master nodes 198 | ## @param master.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe 199 | ## @param master.startupProbe.periodSeconds Period seconds for startupProbe 200 | ## @param master.startupProbe.timeoutSeconds Timeout seconds for startupProbe 201 | ## @param master.startupProbe.failureThreshold Failure threshold for startupProbe 202 | ## @param master.startupProbe.successThreshold Success threshold for startupProbe 203 | ## 204 | startupProbe: 205 | enabled: false 206 | initialDelaySeconds: 20 207 | periodSeconds: 5 208 | timeoutSeconds: 5 209 | successThreshold: 1 210 | failureThreshold: 5 211 | ## @param master.livenessProbe.enabled Enable livenessProbe on Redis® master nodes 212 | ## @param master.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe 213 | ## @param master.livenessProbe.periodSeconds Period seconds for livenessProbe 214 | ## @param master.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe 215 | ## @param master.livenessProbe.failureThreshold Failure threshold for livenessProbe 216 | ## @param master.livenessProbe.successThreshold Success threshold for livenessProbe 217 | ## 218 | livenessProbe: 219 | enabled: true 220 | initialDelaySeconds: 20 221 | periodSeconds: 5 222 | timeoutSeconds: 5 223 | successThreshold: 1 224 | failureThreshold: 5 225 | ## @param master.readinessProbe.enabled Enable readinessProbe on Redis® master nodes 226 | ## @param master.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe 227 | ## @param master.readinessProbe.periodSeconds Period seconds for readinessProbe 228 | ## @param master.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe 229 | ## @param master.readinessProbe.failureThreshold Failure threshold for readinessProbe 230 | ## @param master.readinessProbe.successThreshold Success threshold for readinessProbe 231 | ## 232 | readinessProbe: 233 | enabled: true 234 | initialDelaySeconds: 20 235 | periodSeconds: 5 236 | timeoutSeconds: 1 237 | successThreshold: 1 238 | failureThreshold: 5 239 | ## @param master.customStartupProbe Custom startupProbe that overrides the default one 240 | ## 241 | customStartupProbe: {} 242 | ## @param master.customLivenessProbe Custom livenessProbe that overrides the default one 243 | ## 244 | customLivenessProbe: {} 245 | ## @param master.customReadinessProbe Custom readinessProbe that overrides the default one 246 | ## 247 | customReadinessProbe: {} 248 | ## Redis® master resource requests and limits 249 | ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ 250 | ## @param master.resources.limits The resources limits for the Redis® master containers 251 | ## @param master.resources.requests The requested resources for the Redis® master containers 252 | ## 253 | resources: 254 | requests: 255 | memory: "256Mi" 256 | cpu: "250m" 257 | limits: 258 | memory: "512Mi" 259 | cpu: "500m" 260 | ## Configure Pods Security Context 261 | ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod 262 | ## @param master.podSecurityContext.enabled Enabled Redis® master pods' Security Context 263 | ## @param master.podSecurityContext.fsGroup Set Redis® master pod's Security Context fsGroup 264 | ## 265 | podSecurityContext: 266 | enabled: true 267 | fsGroup: 1001 268 | ## Configure Container Security Context 269 | ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod 270 | ## @param master.containerSecurityContext.enabled Enabled Redis® master containers' Security Context 271 | ## @param master.containerSecurityContext.runAsUser Set Redis® master containers' Security Context runAsUser 272 | ## 273 | containerSecurityContext: 274 | enabled: true 275 | runAsUser: 1001 276 | ## @param master.kind Use either Deployment or StatefulSet (default) 277 | ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/ 278 | ## 279 | kind: StatefulSet 280 | ## @param master.schedulerName Alternate scheduler for Redis® master pods 281 | ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ 282 | ## 283 | schedulerName: "" 284 | ## @param master.updateStrategy.type Redis® master statefulset strategy type 285 | ## @skip master.updateStrategy.rollingUpdate 286 | ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies 287 | ## 288 | updateStrategy: 289 | ## StrategyType 290 | ## Can be set to RollingUpdate or OnDelete 291 | ## 292 | type: RollingUpdate 293 | rollingUpdate: {} 294 | ## @param master.priorityClassName Redis® master pods' priorityClassName 295 | ## 296 | priorityClassName: "" 297 | ## @param master.hostAliases Redis® master pods host aliases 298 | ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ 299 | ## 300 | hostAliases: [] 301 | ## @param master.podLabels Extra labels for Redis® master pods 302 | ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ 303 | ## 304 | podLabels: {} 305 | ## @param master.podAnnotations Annotations for Redis® master pods 306 | ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ 307 | ## 308 | podAnnotations: {} 309 | ## @param master.shareProcessNamespace Share a single process namespace between all of the containers in Redis® master pods 310 | ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/ 311 | ## 312 | shareProcessNamespace: false 313 | ## @param master.podAffinityPreset Pod affinity preset. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` 314 | ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity 315 | ## 316 | podAffinityPreset: "" 317 | ## @param master.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` 318 | ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity 319 | ## 320 | podAntiAffinityPreset: soft 321 | ## Node master.affinity preset 322 | ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity 323 | ## 324 | nodeAffinityPreset: 325 | ## @param master.nodeAffinityPreset.type Node affinity preset type. Ignored if `master.affinity` is set. Allowed values: `soft` or `hard` 326 | ## 327 | type: "" 328 | ## @param master.nodeAffinityPreset.key Node label key to match. Ignored if `master.affinity` is set 329 | ## 330 | key: "" 331 | ## @param master.nodeAffinityPreset.values Node label values to match. Ignored if `master.affinity` is set 332 | ## E.g. 333 | ## values: 334 | ## - e2e-az1 335 | ## - e2e-az2 336 | ## 337 | values: [] 338 | ## @param master.affinity Affinity for Redis® master pods assignment 339 | ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity 340 | ## NOTE: `master.podAffinityPreset`, `master.podAntiAffinityPreset`, and `master.nodeAffinityPreset` will be ignored when it's set 341 | ## 342 | affinity: {} 343 | ## @param master.nodeSelector Node labels for Redis® master pods assignment 344 | ## ref: https://kubernetes.io/docs/user-guide/node-selection/ 345 | ## 346 | nodeSelector: {} 347 | ## @param master.tolerations Tolerations for Redis® master pods assignment 348 | ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ 349 | ## 350 | tolerations: [] 351 | ## @param master.topologySpreadConstraints Spread Constraints for Redis® master pod assignment 352 | ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ 353 | ## E.g. 354 | ## topologySpreadConstraints: 355 | ## - maxSkew: 1 356 | ## topologyKey: node 357 | ## whenUnsatisfiable: DoNotSchedule 358 | ## 359 | topologySpreadConstraints: [] 360 | ## @param master.dnsPolicy DNS Policy for Redis® master pod 361 | ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/ 362 | ## E.g. 363 | ## dnsPolicy: ClusterFirst 364 | dnsPolicy: "" 365 | ## @param master.dnsConfig DNS Configuration for Redis® master pod 366 | ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/ 367 | ## E.g. 368 | ## dnsConfig: 369 | ## options: 370 | ## - name: ndots 371 | ## value: "4" 372 | ## - name: single-request-reopen 373 | dnsConfig: {} 374 | ## @param master.lifecycleHooks for the Redis® master container(s) to automate configuration before or after startup 375 | ## 376 | lifecycleHooks: {} 377 | ## @param master.extraVolumes Optionally specify extra list of additional volumes for the Redis® master pod(s) 378 | ## 379 | extraVolumes: [] 380 | ## @param master.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Redis® master container(s) 381 | ## 382 | extraVolumeMounts: [] 383 | ## @param master.sidecars Add additional sidecar containers to the Redis® master pod(s) 384 | ## e.g: 385 | ## sidecars: 386 | ## - name: your-image-name 387 | ## image: your-image 388 | ## imagePullPolicy: Always 389 | ## ports: 390 | ## - name: portname 391 | ## containerPort: 1234 392 | ## 393 | sidecars: [] 394 | ## @param master.initContainers Add additional init containers to the Redis® master pod(s) 395 | ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ 396 | ## e.g: 397 | ## initContainers: 398 | ## - name: your-image-name 399 | ## image: your-image 400 | ## imagePullPolicy: Always 401 | ## command: ['sh', '-c', 'echo "hello world"'] 402 | ## 403 | initContainers: [] 404 | ## Persistence parameters 405 | ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ 406 | ## 407 | persistence: 408 | ## @param master.persistence.enabled Enable persistence on Redis® master nodes using Persistent Volume Claims 409 | ## 410 | enabled: true 411 | ## @param master.persistence.medium Provide a medium for `emptyDir` volumes. 412 | ## 413 | medium: "" 414 | ## @param master.persistence.sizeLimit Set this to enable a size limit for `emptyDir` volumes. 415 | ## 416 | sizeLimit: "" 417 | ## @param master.persistence.path The path the volume will be mounted at on Redis® master containers 418 | ## NOTE: Useful when using different Redis® images 419 | ## 420 | path: /data 421 | ## @param master.persistence.subPath The subdirectory of the volume to mount on Redis® master containers 422 | ## NOTE: Useful in dev environments 423 | ## 424 | subPath: "" 425 | ## @param master.persistence.storageClass Persistent Volume storage class 426 | ## If defined, storageClassName: 427 | ## If set to "-", storageClassName: "", which disables dynamic provisioning 428 | ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner 429 | ## 430 | storageClass: "" 431 | ## @param master.persistence.accessModes Persistent Volume access modes 432 | ## 433 | accessModes: 434 | - ReadWriteOnce 435 | ## @param master.persistence.size Persistent Volume size 436 | ## 437 | size: 8Gi 438 | ## @param master.persistence.annotations Additional custom annotations for the PVC 439 | ## 440 | annotations: {} 441 | ## @param master.persistence.selector Additional labels to match for the PVC 442 | ## e.g: 443 | ## selector: 444 | ## matchLabels: 445 | ## app: my-app 446 | ## 447 | selector: {} 448 | ## @param master.persistence.dataSource Custom PVC data source 449 | ## 450 | dataSource: {} 451 | ## @param master.persistence.existingClaim Use a existing PVC which must be created manually before bound 452 | ## NOTE: requires master.persistence.enabled: true 453 | ## 454 | existingClaim: "" 455 | ## Redis® master service parameters 456 | ## 457 | service: 458 | ## @param master.service.type Redis® master service type 459 | ## 460 | type: ClusterIP 461 | ## @param master.service.ports.redis Redis® master service port 462 | ## 463 | ports: 464 | redis: 6379 465 | ## @param master.service.nodePorts.redis Node port for Redis® master 466 | ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport 467 | ## NOTE: choose port between <30000-32767> 468 | ## 469 | nodePorts: 470 | redis: "" 471 | ## @param master.service.externalTrafficPolicy Redis® master service external traffic policy 472 | ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip 473 | ## 474 | externalTrafficPolicy: Cluster 475 | ## @param master.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) 476 | ## 477 | extraPorts: [] 478 | ## @param master.service.internalTrafficPolicy Redis® master service internal traffic policy (requires Kubernetes v1.22 or greater to be usable) 479 | ## ref: https://kubernetes.io/docs/concepts/services-networking/service-traffic-policy/ 480 | ## 481 | internalTrafficPolicy: Cluster 482 | ## @param master.service.clusterIP Redis® master service Cluster IP 483 | ## 484 | clusterIP: "" 485 | ## @param master.service.loadBalancerIP Redis® master service Load Balancer IP 486 | ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer 487 | ## 488 | loadBalancerIP: "" 489 | ## @param master.service.loadBalancerSourceRanges Redis® master service Load Balancer sources 490 | ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service 491 | ## e.g. 492 | ## loadBalancerSourceRanges: 493 | ## - 10.10.10.0/24 494 | ## 495 | loadBalancerSourceRanges: [] 496 | ## @param master.service.annotations Additional custom annotations for Redis® master service 497 | ## 498 | annotations: {} 499 | ## @param master.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" 500 | ## If "ClientIP", consecutive client requests will be directed to the same Pod 501 | ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies 502 | ## 503 | sessionAffinity: None 504 | ## @param master.service.sessionAffinityConfig Additional settings for the sessionAffinity 505 | ## sessionAffinityConfig: 506 | ## clientIP: 507 | ## timeoutSeconds: 300 508 | ## 509 | sessionAffinityConfig: {} 510 | ## @param master.terminationGracePeriodSeconds Integer setting the termination grace period for the redis-master pods 511 | ## 512 | terminationGracePeriodSeconds: 30 513 | 514 | ## @section Redis® replicas configuration parameters 515 | ## 516 | 517 | replica: 518 | ## @param replica.replicaCount Number of Redis® replicas to deploy 519 | ## 520 | replicaCount: 0 521 | ## @param replica.configuration Configuration for Redis® replicas nodes 522 | ## ref: https://redis.io/topics/config 523 | ## 524 | configuration: "" 525 | ## @param replica.disableCommands Array with Redis® commands to disable on replicas nodes 526 | ## Commands will be completely disabled by renaming each to an empty string. 527 | ## ref: https://redis.io/topics/security#disabling-of-specific-commands 528 | ## 529 | disableCommands: 530 | - FLUSHDB 531 | - FLUSHALL 532 | ## @param replica.command Override default container command (useful when using custom images) 533 | ## 534 | command: [] 535 | ## @param replica.args Override default container args (useful when using custom images) 536 | ## 537 | args: [] 538 | ## @param replica.preExecCmds Additional commands to run prior to starting Redis® replicas 539 | ## 540 | preExecCmds: [] 541 | ## @param replica.extraFlags Array with additional command line flags for Redis® replicas 542 | ## e.g: 543 | ## extraFlags: 544 | ## - "--maxmemory-policy volatile-ttl" 545 | ## - "--repl-backlog-size 1024mb" 546 | ## 547 | extraFlags: [] 548 | ## @param replica.extraEnvVars Array with extra environment variables to add to Redis® replicas nodes 549 | ## e.g: 550 | ## extraEnvVars: 551 | ## - name: FOO 552 | ## value: "bar" 553 | ## 554 | extraEnvVars: [] 555 | ## @param replica.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Redis® replicas nodes 556 | ## 557 | extraEnvVarsCM: "" 558 | ## @param replica.extraEnvVarsSecret Name of existing Secret containing extra env vars for Redis® replicas nodes 559 | ## 560 | extraEnvVarsSecret: "" 561 | ## @param replica.externalMaster.enabled Use external master for bootstrapping 562 | ## @param replica.externalMaster.host External master host to bootstrap from 563 | ## @param replica.externalMaster.port Port for Redis service external master host 564 | ## 565 | externalMaster: 566 | enabled: false 567 | host: "" 568 | port: 6379 569 | ## @param replica.containerPorts.redis Container port to open on Redis® replicas nodes 570 | ## 571 | containerPorts: 572 | redis: 6379 573 | ## Configure extra options for Redis® containers' liveness and readiness probes 574 | ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes 575 | ## @param replica.startupProbe.enabled Enable startupProbe on Redis® replicas nodes 576 | ## @param replica.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe 577 | ## @param replica.startupProbe.periodSeconds Period seconds for startupProbe 578 | ## @param replica.startupProbe.timeoutSeconds Timeout seconds for startupProbe 579 | ## @param replica.startupProbe.failureThreshold Failure threshold for startupProbe 580 | ## @param replica.startupProbe.successThreshold Success threshold for startupProbe 581 | ## 582 | startupProbe: 583 | enabled: true 584 | initialDelaySeconds: 10 585 | periodSeconds: 10 586 | timeoutSeconds: 5 587 | successThreshold: 1 588 | failureThreshold: 22 589 | ## @param replica.livenessProbe.enabled Enable livenessProbe on Redis® replicas nodes 590 | ## @param replica.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe 591 | ## @param replica.livenessProbe.periodSeconds Period seconds for livenessProbe 592 | ## @param replica.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe 593 | ## @param replica.livenessProbe.failureThreshold Failure threshold for livenessProbe 594 | ## @param replica.livenessProbe.successThreshold Success threshold for livenessProbe 595 | ## 596 | livenessProbe: 597 | enabled: true 598 | initialDelaySeconds: 20 599 | periodSeconds: 5 600 | timeoutSeconds: 5 601 | successThreshold: 1 602 | failureThreshold: 5 603 | ## @param replica.readinessProbe.enabled Enable readinessProbe on Redis® replicas nodes 604 | ## @param replica.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe 605 | ## @param replica.readinessProbe.periodSeconds Period seconds for readinessProbe 606 | ## @param replica.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe 607 | ## @param replica.readinessProbe.failureThreshold Failure threshold for readinessProbe 608 | ## @param replica.readinessProbe.successThreshold Success threshold for readinessProbe 609 | ## 610 | readinessProbe: 611 | enabled: true 612 | initialDelaySeconds: 20 613 | periodSeconds: 5 614 | timeoutSeconds: 1 615 | successThreshold: 1 616 | failureThreshold: 5 617 | ## @param replica.customStartupProbe Custom startupProbe that overrides the default one 618 | ## 619 | customStartupProbe: {} 620 | ## @param replica.customLivenessProbe Custom livenessProbe that overrides the default one 621 | ## 622 | customLivenessProbe: {} 623 | ## @param replica.customReadinessProbe Custom readinessProbe that overrides the default one 624 | ## 625 | customReadinessProbe: {} 626 | ## Redis® replicas resource requests and limits 627 | ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ 628 | ## @param replica.resources.limits The resources limits for the Redis® replicas containers 629 | ## @param replica.resources.requests The requested resources for the Redis® replicas containers 630 | ## 631 | resources: 632 | # We usually recommend not to specify default resources and to leave this as a conscious 633 | # choice for the user. This also increases chances charts run on environments with little 634 | # resources, such as Minikube. If you do want to specify resources, uncomment the following 635 | # lines, adjust them as necessary, and remove the curly braces after 'resources:'. 636 | limits: {} 637 | # cpu: 250m 638 | # memory: 256Mi 639 | requests: {} 640 | # cpu: 250m 641 | # memory: 256Mi 642 | ## Configure Pods Security Context 643 | ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod 644 | ## @param replica.podSecurityContext.enabled Enabled Redis® replicas pods' Security Context 645 | ## @param replica.podSecurityContext.fsGroup Set Redis® replicas pod's Security Context fsGroup 646 | ## 647 | podSecurityContext: 648 | enabled: true 649 | fsGroup: 1001 650 | ## Configure Container Security Context 651 | ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod 652 | ## @param replica.containerSecurityContext.enabled Enabled Redis® replicas containers' Security Context 653 | ## @param replica.containerSecurityContext.runAsUser Set Redis® replicas containers' Security Context runAsUser 654 | ## 655 | containerSecurityContext: 656 | enabled: true 657 | runAsUser: 1001 658 | ## @param replica.schedulerName Alternate scheduler for Redis® replicas pods 659 | ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ 660 | ## 661 | schedulerName: "" 662 | ## @param replica.updateStrategy.type Redis® replicas statefulset strategy type 663 | ## @skip replica.updateStrategy.rollingUpdate 664 | ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies 665 | ## 666 | updateStrategy: 667 | ## StrategyType 668 | ## Can be set to RollingUpdate or OnDelete 669 | ## 670 | type: RollingUpdate 671 | rollingUpdate: {} 672 | ## @param replica.priorityClassName Redis® replicas pods' priorityClassName 673 | ## 674 | priorityClassName: "" 675 | ## @param replica.podManagementPolicy podManagementPolicy to manage scaling operation of %%MAIN_CONTAINER_NAME%% pods 676 | ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies 677 | ## 678 | podManagementPolicy: "" 679 | ## @param replica.hostAliases Redis® replicas pods host aliases 680 | ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ 681 | ## 682 | hostAliases: [] 683 | ## @param replica.podLabels Extra labels for Redis® replicas pods 684 | ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ 685 | ## 686 | podLabels: {} 687 | ## @param replica.podAnnotations Annotations for Redis® replicas pods 688 | ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ 689 | ## 690 | podAnnotations: {} 691 | ## @param replica.shareProcessNamespace Share a single process namespace between all of the containers in Redis® replicas pods 692 | ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/ 693 | ## 694 | shareProcessNamespace: false 695 | ## @param replica.podAffinityPreset Pod affinity preset. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard` 696 | ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity 697 | ## 698 | podAffinityPreset: "" 699 | ## @param replica.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard` 700 | ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity 701 | ## 702 | podAntiAffinityPreset: soft 703 | ## Node affinity preset 704 | ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity 705 | ## 706 | nodeAffinityPreset: 707 | ## @param replica.nodeAffinityPreset.type Node affinity preset type. Ignored if `replica.affinity` is set. Allowed values: `soft` or `hard` 708 | ## 709 | type: "" 710 | ## @param replica.nodeAffinityPreset.key Node label key to match. Ignored if `replica.affinity` is set 711 | ## 712 | key: "" 713 | ## @param replica.nodeAffinityPreset.values Node label values to match. Ignored if `replica.affinity` is set 714 | ## E.g. 715 | ## values: 716 | ## - e2e-az1 717 | ## - e2e-az2 718 | ## 719 | values: [] 720 | ## @param replica.affinity Affinity for Redis® replicas pods assignment 721 | ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity 722 | ## NOTE: `replica.podAffinityPreset`, `replica.podAntiAffinityPreset`, and `replica.nodeAffinityPreset` will be ignored when it's set 723 | ## 724 | affinity: {} 725 | ## @param replica.nodeSelector Node labels for Redis® replicas pods assignment 726 | ## ref: https://kubernetes.io/docs/user-guide/node-selection/ 727 | ## 728 | nodeSelector: {} 729 | ## @param replica.tolerations Tolerations for Redis® replicas pods assignment 730 | ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ 731 | ## 732 | tolerations: [] 733 | ## @param replica.topologySpreadConstraints Spread Constraints for Redis® replicas pod assignment 734 | ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ 735 | ## E.g. 736 | ## topologySpreadConstraints: 737 | ## - maxSkew: 1 738 | ## topologyKey: node 739 | ## whenUnsatisfiable: DoNotSchedule 740 | ## 741 | topologySpreadConstraints: [] 742 | ## @param replica.dnsPolicy DNS Policy for Redis® replica pods 743 | ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/ 744 | ## E.g. 745 | ## dnsPolicy: ClusterFirst 746 | dnsPolicy: "" 747 | ## @param replica.dnsConfig DNS Configuration for Redis® replica pods 748 | ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/ 749 | ## E.g. 750 | ## dnsConfig: 751 | ## options: 752 | ## - name: ndots 753 | ## value: "4" 754 | ## - name: single-request-reopen 755 | dnsConfig: {} 756 | ## @param replica.lifecycleHooks for the Redis® replica container(s) to automate configuration before or after startup 757 | ## 758 | lifecycleHooks: {} 759 | ## @param replica.extraVolumes Optionally specify extra list of additional volumes for the Redis® replicas pod(s) 760 | ## 761 | extraVolumes: [] 762 | ## @param replica.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Redis® replicas container(s) 763 | ## 764 | extraVolumeMounts: [] 765 | ## @param replica.sidecars Add additional sidecar containers to the Redis® replicas pod(s) 766 | ## e.g: 767 | ## sidecars: 768 | ## - name: your-image-name 769 | ## image: your-image 770 | ## imagePullPolicy: Always 771 | ## ports: 772 | ## - name: portname 773 | ## containerPort: 1234 774 | ## 775 | sidecars: [] 776 | ## @param replica.initContainers Add additional init containers to the Redis® replicas pod(s) 777 | ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ 778 | ## e.g: 779 | ## initContainers: 780 | ## - name: your-image-name 781 | ## image: your-image 782 | ## imagePullPolicy: Always 783 | ## command: ['sh', '-c', 'echo "hello world"'] 784 | ## 785 | initContainers: [] 786 | ## Persistence Parameters 787 | ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ 788 | ## 789 | persistence: 790 | ## @param replica.persistence.enabled Enable persistence on Redis® replicas nodes using Persistent Volume Claims 791 | ## 792 | enabled: true 793 | ## @param replica.persistence.medium Provide a medium for `emptyDir` volumes. 794 | ## 795 | medium: "" 796 | ## @param replica.persistence.sizeLimit Set this to enable a size limit for `emptyDir` volumes. 797 | ## 798 | sizeLimit: "" 799 | ## @param replica.persistence.path The path the volume will be mounted at on Redis® replicas containers 800 | ## NOTE: Useful when using different Redis® images 801 | ## 802 | path: /data 803 | ## @param replica.persistence.subPath The subdirectory of the volume to mount on Redis® replicas containers 804 | ## NOTE: Useful in dev environments 805 | ## 806 | subPath: "" 807 | ## @param replica.persistence.storageClass Persistent Volume storage class 808 | ## If defined, storageClassName: 809 | ## If set to "-", storageClassName: "", which disables dynamic provisioning 810 | ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner 811 | ## 812 | storageClass: "" 813 | ## @param replica.persistence.accessModes Persistent Volume access modes 814 | ## 815 | accessModes: 816 | - ReadWriteOnce 817 | ## @param replica.persistence.size Persistent Volume size 818 | ## 819 | size: 8Gi 820 | ## @param replica.persistence.annotations Additional custom annotations for the PVC 821 | ## 822 | annotations: {} 823 | ## @param replica.persistence.selector Additional labels to match for the PVC 824 | ## e.g: 825 | ## selector: 826 | ## matchLabels: 827 | ## app: my-app 828 | ## 829 | selector: {} 830 | ## @param replica.persistence.dataSource Custom PVC data source 831 | ## 832 | dataSource: {} 833 | ## @param replica.persistence.existingClaim Use a existing PVC which must be created manually before bound 834 | ## NOTE: requires replica.persistence.enabled: true 835 | ## 836 | existingClaim: "" 837 | ## Redis® replicas service parameters 838 | ## 839 | service: 840 | ## @param replica.service.type Redis® replicas service type 841 | ## 842 | type: ClusterIP 843 | ## @param replica.service.ports.redis Redis® replicas service port 844 | ## 845 | ports: 846 | redis: 6379 847 | ## @param replica.service.nodePorts.redis Node port for Redis® replicas 848 | ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport 849 | ## NOTE: choose port between <30000-32767> 850 | ## 851 | nodePorts: 852 | redis: "" 853 | ## @param replica.service.externalTrafficPolicy Redis® replicas service external traffic policy 854 | ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip 855 | ## 856 | externalTrafficPolicy: Cluster 857 | ## @param replica.service.internalTrafficPolicy Redis® replicas service internal traffic policy (requires Kubernetes v1.22 or greater to be usable) 858 | ## ref: https://kubernetes.io/docs/concepts/services-networking/service-traffic-policy/ 859 | ## 860 | internalTrafficPolicy: Cluster 861 | ## @param replica.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) 862 | ## 863 | extraPorts: [] 864 | ## @param replica.service.clusterIP Redis® replicas service Cluster IP 865 | ## 866 | clusterIP: "" 867 | ## @param replica.service.loadBalancerIP Redis® replicas service Load Balancer IP 868 | ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer 869 | ## 870 | loadBalancerIP: "" 871 | ## @param replica.service.loadBalancerSourceRanges Redis® replicas service Load Balancer sources 872 | ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service 873 | ## e.g. 874 | ## loadBalancerSourceRanges: 875 | ## - 10.10.10.0/24 876 | ## 877 | loadBalancerSourceRanges: [] 878 | ## @param replica.service.annotations Additional custom annotations for Redis® replicas service 879 | ## 880 | annotations: {} 881 | ## @param replica.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" 882 | ## If "ClientIP", consecutive client requests will be directed to the same Pod 883 | ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies 884 | ## 885 | sessionAffinity: None 886 | ## @param replica.service.sessionAffinityConfig Additional settings for the sessionAffinity 887 | ## sessionAffinityConfig: 888 | ## clientIP: 889 | ## timeoutSeconds: 300 890 | ## 891 | sessionAffinityConfig: {} 892 | ## @param replica.terminationGracePeriodSeconds Integer setting the termination grace period for the redis-replicas pods 893 | ## 894 | terminationGracePeriodSeconds: 30 895 | ## Autoscaling configuration 896 | ## 897 | autoscaling: 898 | ## @param replica.autoscaling.enabled Enable replica autoscaling settings 899 | ## 900 | enabled: false 901 | ## @param replica.autoscaling.minReplicas Minimum replicas for the pod autoscaling 902 | ## 903 | minReplicas: 1 904 | ## @param replica.autoscaling.maxReplicas Maximum replicas for the pod autoscaling 905 | ## 906 | maxReplicas: 11 907 | ## @param replica.autoscaling.targetCPU Percentage of CPU to consider when autoscaling 908 | ## 909 | targetCPU: "" 910 | ## @param replica.autoscaling.targetMemory Percentage of Memory to consider when autoscaling 911 | ## 912 | targetMemory: "" 913 | 914 | ## @section Redis® Sentinel configuration parameters 915 | ## 916 | 917 | sentinel: 918 | ## @param sentinel.enabled Use Redis® Sentinel on Redis® pods. 919 | ## IMPORTANT: this will disable the master and replicas services and 920 | ## create a single Redis® service exposing both the Redis and Sentinel ports 921 | ## 922 | enabled: false 923 | ## Bitnami Redis® Sentinel image version 924 | ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ 925 | ## @param sentinel.image.registry Redis® Sentinel image registry 926 | ## @param sentinel.image.repository Redis® Sentinel image repository 927 | ## @param sentinel.image.tag Redis® Sentinel image tag (immutable tags are recommended) 928 | ## @param sentinel.image.pullPolicy Redis® Sentinel image pull policy 929 | ## @param sentinel.image.pullSecrets Redis® Sentinel image pull secrets 930 | ## @param sentinel.image.debug Enable image debug mode 931 | ## 932 | image: 933 | registry: docker.io 934 | repository: bitnami/redis-sentinel 935 | tag: 7.0.4-debian-11-r0 936 | ## Specify a imagePullPolicy 937 | ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' 938 | ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images 939 | ## 940 | pullPolicy: IfNotPresent 941 | ## Optionally specify an array of imagePullSecrets. 942 | ## Secrets must be manually created in the namespace. 943 | ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ 944 | ## e.g: 945 | ## pullSecrets: 946 | ## - myRegistryKeySecretName 947 | ## 948 | pullSecrets: [] 949 | ## Enable debug mode 950 | ## 951 | debug: false 952 | ## @param sentinel.masterSet Master set name 953 | ## 954 | masterSet: mymaster 955 | ## @param sentinel.quorum Sentinel Quorum 956 | ## 957 | quorum: 2 958 | ## @param sentinel.getMasterTimeout Amount of time to allow before get_sentinel_master_info() times out. 959 | ## NOTE: This is directly related to the startupProbes which are configured to run every 10 seconds for a total of 22 failures. If adjusting this value, also adjust the startupProbes. 960 | getMasterTimeout: 220 961 | ## @param sentinel.automateClusterRecovery Automate cluster recovery in cases where the last replica is not considered a good replica and Sentinel won't automatically failover to it. 962 | ## This also prevents any new replica from starting until the last remaining replica is elected as master to guarantee that it is the one to be elected by Sentinel, and not a newly started replica with no data. 963 | ## NOTE: This feature requires a "downAfterMilliseconds" value less or equal to 2000. 964 | ## 965 | automateClusterRecovery: false 966 | ## Sentinel timing restrictions 967 | ## @param sentinel.downAfterMilliseconds Timeout for detecting a Redis® node is down 968 | ## @param sentinel.failoverTimeout Timeout for performing a election failover 969 | ## 970 | downAfterMilliseconds: 60000 971 | failoverTimeout: 18000 972 | ## @param sentinel.parallelSyncs Number of replicas that can be reconfigured in parallel to use the new master after a failover 973 | ## 974 | parallelSyncs: 1 975 | ## @param sentinel.configuration Configuration for Redis® Sentinel nodes 976 | ## ref: https://redis.io/topics/sentinel 977 | ## 978 | configuration: "" 979 | ## @param sentinel.command Override default container command (useful when using custom images) 980 | ## 981 | command: [] 982 | ## @param sentinel.args Override default container args (useful when using custom images) 983 | ## 984 | args: [] 985 | ## @param sentinel.preExecCmds Additional commands to run prior to starting Redis® Sentinel 986 | ## 987 | preExecCmds: [] 988 | ## @param sentinel.extraEnvVars Array with extra environment variables to add to Redis® Sentinel nodes 989 | ## e.g: 990 | ## extraEnvVars: 991 | ## - name: FOO 992 | ## value: "bar" 993 | ## 994 | extraEnvVars: [] 995 | ## @param sentinel.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for Redis® Sentinel nodes 996 | ## 997 | extraEnvVarsCM: "" 998 | ## @param sentinel.extraEnvVarsSecret Name of existing Secret containing extra env vars for Redis® Sentinel nodes 999 | ## 1000 | extraEnvVarsSecret: "" 1001 | ## @param sentinel.externalMaster.enabled Use external master for bootstrapping 1002 | ## @param sentinel.externalMaster.host External master host to bootstrap from 1003 | ## @param sentinel.externalMaster.port Port for Redis service external master host 1004 | ## 1005 | externalMaster: 1006 | enabled: false 1007 | host: "" 1008 | port: 6379 1009 | ## @param sentinel.containerPorts.sentinel Container port to open on Redis® Sentinel nodes 1010 | ## 1011 | containerPorts: 1012 | sentinel: 26379 1013 | ## Configure extra options for Redis® containers' liveness and readiness probes 1014 | ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes 1015 | ## @param sentinel.startupProbe.enabled Enable startupProbe on Redis® Sentinel nodes 1016 | ## @param sentinel.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe 1017 | ## @param sentinel.startupProbe.periodSeconds Period seconds for startupProbe 1018 | ## @param sentinel.startupProbe.timeoutSeconds Timeout seconds for startupProbe 1019 | ## @param sentinel.startupProbe.failureThreshold Failure threshold for startupProbe 1020 | ## @param sentinel.startupProbe.successThreshold Success threshold for startupProbe 1021 | ## 1022 | startupProbe: 1023 | enabled: true 1024 | initialDelaySeconds: 10 1025 | periodSeconds: 10 1026 | timeoutSeconds: 5 1027 | successThreshold: 1 1028 | failureThreshold: 22 1029 | ## @param sentinel.livenessProbe.enabled Enable livenessProbe on Redis® Sentinel nodes 1030 | ## @param sentinel.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe 1031 | ## @param sentinel.livenessProbe.periodSeconds Period seconds for livenessProbe 1032 | ## @param sentinel.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe 1033 | ## @param sentinel.livenessProbe.failureThreshold Failure threshold for livenessProbe 1034 | ## @param sentinel.livenessProbe.successThreshold Success threshold for livenessProbe 1035 | ## 1036 | livenessProbe: 1037 | enabled: true 1038 | initialDelaySeconds: 20 1039 | periodSeconds: 5 1040 | timeoutSeconds: 5 1041 | successThreshold: 1 1042 | failureThreshold: 5 1043 | ## @param sentinel.readinessProbe.enabled Enable readinessProbe on Redis® Sentinel nodes 1044 | ## @param sentinel.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe 1045 | ## @param sentinel.readinessProbe.periodSeconds Period seconds for readinessProbe 1046 | ## @param sentinel.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe 1047 | ## @param sentinel.readinessProbe.failureThreshold Failure threshold for readinessProbe 1048 | ## @param sentinel.readinessProbe.successThreshold Success threshold for readinessProbe 1049 | ## 1050 | readinessProbe: 1051 | enabled: true 1052 | initialDelaySeconds: 20 1053 | periodSeconds: 5 1054 | timeoutSeconds: 1 1055 | successThreshold: 1 1056 | failureThreshold: 5 1057 | ## @param sentinel.customStartupProbe Custom startupProbe that overrides the default one 1058 | ## 1059 | customStartupProbe: {} 1060 | ## @param sentinel.customLivenessProbe Custom livenessProbe that overrides the default one 1061 | ## 1062 | customLivenessProbe: {} 1063 | ## @param sentinel.customReadinessProbe Custom readinessProbe that overrides the default one 1064 | ## 1065 | customReadinessProbe: {} 1066 | ## Persistence parameters 1067 | ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ 1068 | ## 1069 | persistence: 1070 | ## @param sentinel.persistence.enabled Enable persistence on Redis® sentinel nodes using Persistent Volume Claims (Experimental) 1071 | ## 1072 | enabled: false 1073 | ## @param sentinel.persistence.storageClass Persistent Volume storage class 1074 | ## If defined, storageClassName: 1075 | ## If set to "-", storageClassName: "", which disables dynamic provisioning 1076 | ## If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner 1077 | ## 1078 | storageClass: "" 1079 | ## @param sentinel.persistence.accessModes Persistent Volume access modes 1080 | ## 1081 | accessModes: 1082 | - ReadWriteOnce 1083 | ## @param sentinel.persistence.size Persistent Volume size 1084 | ## 1085 | size: 100Mi 1086 | ## @param sentinel.persistence.annotations Additional custom annotations for the PVC 1087 | ## 1088 | annotations: {} 1089 | ## @param sentinel.persistence.selector Additional labels to match for the PVC 1090 | ## e.g: 1091 | ## selector: 1092 | ## matchLabels: 1093 | ## app: my-app 1094 | ## 1095 | selector: {} 1096 | ## @param sentinel.persistence.dataSource Custom PVC data source 1097 | ## 1098 | dataSource: {} 1099 | ## @param sentinel.persistence.medium Provide a medium for `emptyDir` volumes. 1100 | ## 1101 | medium: "" 1102 | ## Redis® Sentinel resource requests and limits 1103 | ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ 1104 | ## @param sentinel.resources.limits The resources limits for the Redis® Sentinel containers 1105 | ## @param sentinel.resources.requests The requested resources for the Redis® Sentinel containers 1106 | ## 1107 | resources: 1108 | limits: {} 1109 | requests: {} 1110 | ## Configure Container Security Context 1111 | ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod 1112 | ## @param sentinel.containerSecurityContext.enabled Enabled Redis® Sentinel containers' Security Context 1113 | ## @param sentinel.containerSecurityContext.runAsUser Set Redis® Sentinel containers' Security Context runAsUser 1114 | ## 1115 | containerSecurityContext: 1116 | enabled: true 1117 | runAsUser: 1001 1118 | ## @param sentinel.lifecycleHooks for the Redis® sentinel container(s) to automate configuration before or after startup 1119 | ## 1120 | lifecycleHooks: {} 1121 | ## @param sentinel.extraVolumes Optionally specify extra list of additional volumes for the Redis® Sentinel 1122 | ## 1123 | extraVolumes: [] 1124 | ## @param sentinel.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Redis® Sentinel container(s) 1125 | ## 1126 | extraVolumeMounts: [] 1127 | ## Redis® Sentinel service parameters 1128 | ## 1129 | service: 1130 | ## @param sentinel.service.type Redis® Sentinel service type 1131 | ## 1132 | type: ClusterIP 1133 | ## @param sentinel.service.ports.redis Redis® service port for Redis® 1134 | ## @param sentinel.service.ports.sentinel Redis® service port for Redis® Sentinel 1135 | ## 1136 | ports: 1137 | redis: 6379 1138 | sentinel: 26379 1139 | ## @param sentinel.service.nodePorts.redis Node port for Redis® 1140 | ## @param sentinel.service.nodePorts.sentinel Node port for Sentinel 1141 | ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport 1142 | ## NOTE: choose port between <30000-32767> 1143 | ## NOTE: By leaving these values blank, they will be generated by ports-configmap 1144 | ## If setting manually, please leave at least replica.replicaCount + 1 in between sentinel.service.nodePorts.redis and sentinel.service.nodePorts.sentinel to take into account the ports that will be created while incrementing that base port 1145 | ## 1146 | nodePorts: 1147 | redis: "" 1148 | sentinel: "" 1149 | ## @param sentinel.service.externalTrafficPolicy Redis® Sentinel service external traffic policy 1150 | ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip 1151 | ## 1152 | externalTrafficPolicy: Cluster 1153 | ## @param sentinel.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) 1154 | ## 1155 | extraPorts: [] 1156 | ## @param sentinel.service.clusterIP Redis® Sentinel service Cluster IP 1157 | ## 1158 | clusterIP: "" 1159 | ## @param sentinel.service.loadBalancerIP Redis® Sentinel service Load Balancer IP 1160 | ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer 1161 | ## 1162 | loadBalancerIP: "" 1163 | ## @param sentinel.service.loadBalancerSourceRanges Redis® Sentinel service Load Balancer sources 1164 | ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service 1165 | ## e.g. 1166 | ## loadBalancerSourceRanges: 1167 | ## - 10.10.10.0/24 1168 | ## 1169 | loadBalancerSourceRanges: [] 1170 | ## @param sentinel.service.annotations Additional custom annotations for Redis® Sentinel service 1171 | ## 1172 | annotations: {} 1173 | ## @param sentinel.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" 1174 | ## If "ClientIP", consecutive client requests will be directed to the same Pod 1175 | ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies 1176 | ## 1177 | sessionAffinity: None 1178 | ## @param sentinel.service.sessionAffinityConfig Additional settings for the sessionAffinity 1179 | ## sessionAffinityConfig: 1180 | ## clientIP: 1181 | ## timeoutSeconds: 300 1182 | ## 1183 | sessionAffinityConfig: {} 1184 | ## @param sentinel.terminationGracePeriodSeconds Integer setting the termination grace period for the redis-node pods 1185 | ## 1186 | terminationGracePeriodSeconds: 30 1187 | 1188 | ## @section Other Parameters 1189 | ## 1190 | 1191 | ## Network Policy configuration 1192 | ## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ 1193 | ## 1194 | networkPolicy: 1195 | ## @param networkPolicy.enabled Enable creation of NetworkPolicy resources 1196 | ## 1197 | enabled: false 1198 | ## @param networkPolicy.allowExternal Don't require client label for connections 1199 | ## When set to false, only pods with the correct client label will have network access to the ports 1200 | ## Redis® is listening on. When true, Redis® will accept connections from any source 1201 | ## (with the correct destination port). 1202 | ## 1203 | allowExternal: true 1204 | ## @param networkPolicy.extraIngress Add extra ingress rules to the NetworkPolicy 1205 | ## e.g: 1206 | ## extraIngress: 1207 | ## - ports: 1208 | ## - port: 1234 1209 | ## from: 1210 | ## - podSelector: 1211 | ## - matchLabels: 1212 | ## - role: frontend 1213 | ## - podSelector: 1214 | ## - matchExpressions: 1215 | ## - key: role 1216 | ## operator: In 1217 | ## values: 1218 | ## - frontend 1219 | ## 1220 | extraIngress: [] 1221 | ## @param networkPolicy.extraEgress Add extra egress rules to the NetworkPolicy 1222 | ## e.g: 1223 | ## extraEgress: 1224 | ## - ports: 1225 | ## - port: 1234 1226 | ## to: 1227 | ## - podSelector: 1228 | ## - matchLabels: 1229 | ## - role: frontend 1230 | ## - podSelector: 1231 | ## - matchExpressions: 1232 | ## - key: role 1233 | ## operator: In 1234 | ## values: 1235 | ## - frontend 1236 | ## 1237 | extraEgress: [] 1238 | ## @param networkPolicy.ingressNSMatchLabels Labels to match to allow traffic from other namespaces 1239 | ## @param networkPolicy.ingressNSPodMatchLabels Pod labels to match to allow traffic from other namespaces 1240 | ## 1241 | ingressNSMatchLabels: {} 1242 | ingressNSPodMatchLabels: {} 1243 | ## PodSecurityPolicy configuration 1244 | ## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ 1245 | ## 1246 | podSecurityPolicy: 1247 | ## @param podSecurityPolicy.create Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later 1248 | ## 1249 | create: false 1250 | ## @param podSecurityPolicy.enabled Enable PodSecurityPolicy's RBAC rules 1251 | ## 1252 | enabled: false 1253 | ## RBAC configuration 1254 | ## 1255 | rbac: 1256 | ## @param rbac.create Specifies whether RBAC resources should be created 1257 | ## 1258 | create: false 1259 | ## @param rbac.rules Custom RBAC rules to set 1260 | ## e.g: 1261 | ## rules: 1262 | ## - apiGroups: 1263 | ## - "" 1264 | ## resources: 1265 | ## - pods 1266 | ## verbs: 1267 | ## - get 1268 | ## - list 1269 | ## 1270 | rules: [] 1271 | ## ServiceAccount configuration 1272 | ## 1273 | serviceAccount: 1274 | ## @param serviceAccount.create Specifies whether a ServiceAccount should be created 1275 | ## 1276 | create: true 1277 | ## @param serviceAccount.name The name of the ServiceAccount to use. 1278 | ## If not set and create is true, a name is generated using the common.names.fullname template 1279 | ## 1280 | name: "" 1281 | ## @param serviceAccount.automountServiceAccountToken Whether to auto mount the service account token 1282 | ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#use-the-default-service-account-to-access-the-api-server 1283 | ## 1284 | automountServiceAccountToken: true 1285 | ## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount 1286 | ## 1287 | annotations: {} 1288 | ## Redis® Pod Disruption Budget configuration 1289 | ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ 1290 | ## 1291 | pdb: 1292 | ## @param pdb.create Specifies whether a PodDisruptionBudget should be created 1293 | ## 1294 | create: false 1295 | ## @param pdb.minAvailable Min number of pods that must still be available after the eviction 1296 | ## 1297 | minAvailable: 1 1298 | ## @param pdb.maxUnavailable Max number of pods that can be unavailable after the eviction 1299 | ## 1300 | maxUnavailable: "" 1301 | ## TLS configuration 1302 | ## 1303 | tls: 1304 | ## @param tls.enabled Enable TLS traffic 1305 | ## 1306 | enabled: false 1307 | ## @param tls.authClients Require clients to authenticate 1308 | ## 1309 | authClients: true 1310 | ## @param tls.autoGenerated Enable autogenerated certificates 1311 | ## 1312 | autoGenerated: false 1313 | ## @param tls.existingSecret The name of the existing secret that contains the TLS certificates 1314 | ## 1315 | existingSecret: "" 1316 | ## @param tls.certificatesSecret DEPRECATED. Use existingSecret instead. 1317 | ## 1318 | certificatesSecret: "" 1319 | ## @param tls.certFilename Certificate filename 1320 | ## 1321 | certFilename: "" 1322 | ## @param tls.certKeyFilename Certificate Key filename 1323 | ## 1324 | certKeyFilename: "" 1325 | ## @param tls.certCAFilename CA Certificate filename 1326 | ## 1327 | certCAFilename: "" 1328 | ## @param tls.dhParamsFilename File containing DH params (in order to support DH based ciphers) 1329 | ## 1330 | dhParamsFilename: "" 1331 | 1332 | ## @section Metrics Parameters 1333 | ## 1334 | 1335 | metrics: 1336 | ## @param metrics.enabled Start a sidecar prometheus exporter to expose Redis® metrics 1337 | ## 1338 | enabled: false 1339 | ## Bitnami Redis® Exporter image 1340 | ## ref: https://hub.docker.com/r/bitnami/redis-exporter/tags/ 1341 | ## @param metrics.image.registry Redis® Exporter image registry 1342 | ## @param metrics.image.repository Redis® Exporter image repository 1343 | ## @param metrics.image.tag Redis® Redis® Exporter image tag (immutable tags are recommended) 1344 | ## @param metrics.image.pullPolicy Redis® Exporter image pull policy 1345 | ## @param metrics.image.pullSecrets Redis® Exporter image pull secrets 1346 | ## 1347 | image: 1348 | registry: docker.io 1349 | repository: bitnami/redis-exporter 1350 | tag: 1.43.0-debian-11-r10 1351 | pullPolicy: IfNotPresent 1352 | ## Optionally specify an array of imagePullSecrets. 1353 | ## Secrets must be manually created in the namespace. 1354 | ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ 1355 | ## e.g: 1356 | ## pullSecrets: 1357 | ## - myRegistryKeySecretName 1358 | ## 1359 | pullSecrets: [] 1360 | ## @param metrics.command Override default metrics container init command (useful when using custom images) 1361 | ## 1362 | command: [] 1363 | ## @param metrics.redisTargetHost A way to specify an alternative Redis® hostname 1364 | ## Useful for certificate CN/SAN matching 1365 | ## 1366 | redisTargetHost: "localhost" 1367 | ## @param metrics.extraArgs Extra arguments for Redis® exporter, for example: 1368 | ## e.g.: 1369 | ## extraArgs: 1370 | ## check-keys: myKey,myOtherKey 1371 | ## 1372 | extraArgs: {} 1373 | ## @param metrics.extraEnvVars Array with extra environment variables to add to Redis® exporter 1374 | ## e.g: 1375 | ## extraEnvVars: 1376 | ## - name: FOO 1377 | ## value: "bar" 1378 | ## 1379 | extraEnvVars: [] 1380 | ## Configure Container Security Context 1381 | ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod 1382 | ## @param metrics.containerSecurityContext.enabled Enabled Redis® exporter containers' Security Context 1383 | ## @param metrics.containerSecurityContext.runAsUser Set Redis® exporter containers' Security Context runAsUser 1384 | ## 1385 | containerSecurityContext: 1386 | enabled: true 1387 | runAsUser: 1001 1388 | ## @param metrics.extraVolumes Optionally specify extra list of additional volumes for the Redis® metrics sidecar 1389 | ## 1390 | extraVolumes: [] 1391 | ## @param metrics.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Redis® metrics sidecar 1392 | ## 1393 | extraVolumeMounts: [] 1394 | ## Redis® exporter resource requests and limits 1395 | ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ 1396 | ## @param metrics.resources.limits The resources limits for the Redis® exporter container 1397 | ## @param metrics.resources.requests The requested resources for the Redis® exporter container 1398 | ## 1399 | resources: 1400 | limits: {} 1401 | requests: {} 1402 | ## @param metrics.podLabels Extra labels for Redis® exporter pods 1403 | ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ 1404 | ## 1405 | podLabels: {} 1406 | ## @param metrics.podAnnotations [object] Annotations for Redis® exporter pods 1407 | ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ 1408 | ## 1409 | podAnnotations: 1410 | prometheus.io/scrape: "true" 1411 | prometheus.io/port: "9121" 1412 | ## Redis® exporter service parameters 1413 | ## 1414 | service: 1415 | ## @param metrics.service.type Redis® exporter service type 1416 | ## 1417 | type: ClusterIP 1418 | ## @param metrics.service.port Redis® exporter service port 1419 | ## 1420 | port: 9121 1421 | ## @param metrics.service.externalTrafficPolicy Redis® exporter service external traffic policy 1422 | ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip 1423 | ## 1424 | externalTrafficPolicy: Cluster 1425 | ## @param metrics.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) 1426 | ## 1427 | extraPorts: [] 1428 | ## @param metrics.service.loadBalancerIP Redis® exporter service Load Balancer IP 1429 | ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer 1430 | ## 1431 | loadBalancerIP: "" 1432 | ## @param metrics.service.loadBalancerSourceRanges Redis® exporter service Load Balancer sources 1433 | ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service 1434 | ## e.g. 1435 | ## loadBalancerSourceRanges: 1436 | ## - 10.10.10.0/24 1437 | ## 1438 | loadBalancerSourceRanges: [] 1439 | ## @param metrics.service.annotations Additional custom annotations for Redis® exporter service 1440 | ## 1441 | annotations: {} 1442 | ## Prometheus Service Monitor 1443 | ## ref: https://github.com/coreos/prometheus-operator 1444 | ## https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint 1445 | ## 1446 | serviceMonitor: 1447 | ## @param metrics.serviceMonitor.enabled Create ServiceMonitor resource(s) for scraping metrics using PrometheusOperator 1448 | ## 1449 | enabled: false 1450 | ## @param metrics.serviceMonitor.namespace The namespace in which the ServiceMonitor will be created 1451 | ## 1452 | namespace: "" 1453 | ## @param metrics.serviceMonitor.interval The interval at which metrics should be scraped 1454 | ## 1455 | interval: 30s 1456 | ## @param metrics.serviceMonitor.scrapeTimeout The timeout after which the scrape is ended 1457 | ## 1458 | scrapeTimeout: "" 1459 | ## @param metrics.serviceMonitor.relabellings Metrics RelabelConfigs to apply to samples before scraping. 1460 | ## 1461 | relabellings: [] 1462 | ## @param metrics.serviceMonitor.metricRelabelings Metrics RelabelConfigs to apply to samples before ingestion. 1463 | ## 1464 | metricRelabelings: [] 1465 | ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint 1466 | ## 1467 | honorLabels: false 1468 | ## @param metrics.serviceMonitor.additionalLabels Additional labels that can be used so ServiceMonitor resource(s) can be discovered by Prometheus 1469 | ## 1470 | additionalLabels: {} 1471 | ## Custom PrometheusRule to be defined 1472 | ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions 1473 | ## 1474 | prometheusRule: 1475 | ## @param metrics.prometheusRule.enabled Create a custom prometheusRule Resource for scraping metrics using PrometheusOperator 1476 | ## 1477 | enabled: false 1478 | ## @param metrics.prometheusRule.namespace The namespace in which the prometheusRule will be created 1479 | ## 1480 | namespace: "" 1481 | ## @param metrics.prometheusRule.additionalLabels Additional labels for the prometheusRule 1482 | ## 1483 | additionalLabels: {} 1484 | ## @param metrics.prometheusRule.rules Custom Prometheus rules 1485 | ## e.g: 1486 | ## rules: 1487 | ## - alert: RedisDown 1488 | ## expr: redis_up{service="{{ template "common.names.fullname" . }}-metrics"} == 0 1489 | ## for: 2m 1490 | ## labels: 1491 | ## severity: error 1492 | ## annotations: 1493 | ## summary: Redis® instance {{ "{{ $labels.instance }}" }} down 1494 | ## description: Redis® instance {{ "{{ $labels.instance }}" }} is down 1495 | ## - alert: RedisMemoryHigh 1496 | ## expr: > 1497 | ## redis_memory_used_bytes{service="{{ template "common.names.fullname" . }}-metrics"} * 100 1498 | ## / 1499 | ## redis_memory_max_bytes{service="{{ template "common.names.fullname" . }}-metrics"} 1500 | ## > 90 1501 | ## for: 2m 1502 | ## labels: 1503 | ## severity: error 1504 | ## annotations: 1505 | ## summary: Redis® instance {{ "{{ $labels.instance }}" }} is using too much memory 1506 | ## description: | 1507 | ## Redis® instance {{ "{{ $labels.instance }}" }} is using {{ "{{ $value }}" }}% of its available memory. 1508 | ## - alert: RedisKeyEviction 1509 | ## expr: | 1510 | ## increase(redis_evicted_keys_total{service="{{ template "common.names.fullname" . }}-metrics"}[5m]) > 0 1511 | ## for: 1s 1512 | ## labels: 1513 | ## severity: error 1514 | ## annotations: 1515 | ## summary: Redis® instance {{ "{{ $labels.instance }}" }} has evicted keys 1516 | ## description: | 1517 | ## Redis® instance {{ "{{ $labels.instance }}" }} has evicted {{ "{{ $value }}" }} keys in the last 5 minutes. 1518 | ## 1519 | rules: [] 1520 | 1521 | ## @section Init Container Parameters 1522 | ## 1523 | 1524 | ## 'volumePermissions' init container parameters 1525 | ## Changes the owner and group of the persistent volume mount point to runAsUser:fsGroup values 1526 | ## based on the *podSecurityContext/*containerSecurityContext parameters 1527 | ## 1528 | volumePermissions: 1529 | ## @param volumePermissions.enabled Enable init container that changes the owner/group of the PV mount point to `runAsUser:fsGroup` 1530 | ## 1531 | enabled: false 1532 | ## Bitnami Shell image 1533 | ## ref: https://hub.docker.com/r/bitnami/bitnami-shell/tags/ 1534 | ## @param volumePermissions.image.registry Bitnami Shell image registry 1535 | ## @param volumePermissions.image.repository Bitnami Shell image repository 1536 | ## @param volumePermissions.image.tag Bitnami Shell image tag (immutable tags are recommended) 1537 | ## @param volumePermissions.image.pullPolicy Bitnami Shell image pull policy 1538 | ## @param volumePermissions.image.pullSecrets Bitnami Shell image pull secrets 1539 | ## 1540 | image: 1541 | registry: docker.io 1542 | repository: bitnami/bitnami-shell 1543 | tag: 11-debian-11-r17 1544 | pullPolicy: IfNotPresent 1545 | ## Optionally specify an array of imagePullSecrets. 1546 | ## Secrets must be manually created in the namespace. 1547 | ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ 1548 | ## e.g: 1549 | ## pullSecrets: 1550 | ## - myRegistryKeySecretName 1551 | ## 1552 | pullSecrets: [] 1553 | ## Init container's resource requests and limits 1554 | ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ 1555 | ## @param volumePermissions.resources.limits The resources limits for the init container 1556 | ## @param volumePermissions.resources.requests The requested resources for the init container 1557 | ## 1558 | resources: 1559 | limits: {} 1560 | requests: {} 1561 | ## Init container Container Security Context 1562 | ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container 1563 | ## @param volumePermissions.containerSecurityContext.runAsUser Set init container's Security Context runAsUser 1564 | ## NOTE: when runAsUser is set to special value "auto", init container will try to chown the 1565 | ## data folder to auto-determined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` 1566 | ## "auto" is especially useful for OpenShift which has scc with dynamic user ids (and 0 is not allowed) 1567 | ## 1568 | containerSecurityContext: 1569 | runAsUser: 0 1570 | 1571 | ## init-sysctl container parameters 1572 | ## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) 1573 | ## 1574 | sysctl: 1575 | ## @param sysctl.enabled Enable init container to modify Kernel settings 1576 | ## 1577 | enabled: false 1578 | ## Bitnami Shell image 1579 | ## ref: https://hub.docker.com/r/bitnami/bitnami-shell/tags/ 1580 | ## @param sysctl.image.registry Bitnami Shell image registry 1581 | ## @param sysctl.image.repository Bitnami Shell image repository 1582 | ## @param sysctl.image.tag Bitnami Shell image tag (immutable tags are recommended) 1583 | ## @param sysctl.image.pullPolicy Bitnami Shell image pull policy 1584 | ## @param sysctl.image.pullSecrets Bitnami Shell image pull secrets 1585 | ## 1586 | image: 1587 | registry: docker.io 1588 | repository: bitnami/bitnami-shell 1589 | tag: 11-debian-11-r17 1590 | pullPolicy: IfNotPresent 1591 | ## Optionally specify an array of imagePullSecrets. 1592 | ## Secrets must be manually created in the namespace. 1593 | ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ 1594 | ## e.g: 1595 | ## pullSecrets: 1596 | ## - myRegistryKeySecretName 1597 | ## 1598 | pullSecrets: [] 1599 | ## @param sysctl.command Override default init-sysctl container command (useful when using custom images) 1600 | ## 1601 | command: [] 1602 | ## @param sysctl.mountHostSys Mount the host `/sys` folder to `/host-sys` 1603 | ## 1604 | mountHostSys: false 1605 | ## Init container's resource requests and limits 1606 | ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ 1607 | ## @param sysctl.resources.limits The resources limits for the init container 1608 | ## @param sysctl.resources.requests The requested resources for the init container 1609 | ## 1610 | resources: 1611 | limits: {} 1612 | requests: {} 1613 | 1614 | ## @section useExternalDNS Parameters 1615 | ## 1616 | ## @param useExternalDNS.enabled Enable various syntax that would enable external-dns to work. Note this requires a working installation of `external-dns` to be usable. 1617 | ## @param useExternalDNS.additionalAnnotations Extra annotations to be utilized when `external-dns` is enabled. 1618 | ## @param useExternalDNS.annotationKey The annotation key utilized when `external-dns` is enabled. 1619 | ## @param useExternalDNS.suffix The DNS suffix utilized when `external-dns` is enabled. Note that we prepend the suffix with the full name of the release. 1620 | ## 1621 | useExternalDNS: 1622 | enabled: false 1623 | suffix: "" 1624 | annotationKey: external-dns.alpha.kubernetes.io/ 1625 | additionalAnnotations: {} --------------------------------------------------------------------------------