├── .github ├── release.yml └── workflows │ ├── cluster.yml │ ├── tagpr.yml │ └── test.yml ├── cluster-ci.sh ├── .gitignore ├── go.mod ├── LICENSE ├── redis.go ├── CHANGELOG.md ├── go.sum ├── .tagpr ├── compose.yml ├── slog_test.go ├── raus_test.go ├── raus.go ├── CLAUDE.md └── README.md /.github/release.yml: -------------------------------------------------------------------------------- 1 | changelog: 2 | exclude: 3 | labels: 4 | - tagpr 5 | -------------------------------------------------------------------------------- /cluster-ci.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -ex 4 | grep image: compose.yml | sort | uniq | awk '{print $2}' | xargs -I {} docker pull {} 5 | docker compose up --exit-code-from app 6 | -------------------------------------------------------------------------------- /.github/workflows/cluster.yml: -------------------------------------------------------------------------------- 1 | name: cluster test 2 | on: [push] 3 | jobs: 4 | test: 5 | name: Build 6 | runs-on: ubuntu-latest 7 | steps: 8 | - name: Check out code into the Go module directory 9 | uses: actions/checkout@v4 10 | - name: cluster test 11 | run: | 12 | bash cluster-ci.sh 13 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled Object files, Static and Dynamic libs (Shared Objects) 2 | *.o 3 | *.a 4 | *.so 5 | 6 | # Folders 7 | _obj 8 | _test 9 | 10 | # Architecture specific extensions/prefixes 11 | *.[568vq] 12 | [568vq].out 13 | 14 | *.cgo1.go 15 | *.cgo2.c 16 | _cgo_defun.c 17 | _cgo_gotypes.go 18 | _cgo_export.* 19 | 20 | _testmain.go 21 | 22 | *.exe 23 | *.test 24 | *.prof 25 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/fujiwara/raus 2 | 3 | go 1.23 4 | 5 | require ( 6 | github.com/google/go-cmp v0.5.8 7 | github.com/google/uuid v1.6.0 8 | github.com/redis/go-redis/v9 v9.11.0 9 | github.com/soh335/go-test-redisserver v0.1.0 10 | ) 11 | 12 | require ( 13 | github.com/cespare/xxhash/v2 v2.3.0 // indirect 14 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect 15 | ) 16 | -------------------------------------------------------------------------------- /.github/workflows/tagpr.yml: -------------------------------------------------------------------------------- 1 | name: tagpr 2 | on: 3 | push: 4 | branches: ["main"] 5 | 6 | permissions: 7 | pull-requests: write 8 | packages: write 9 | contents: write 10 | actions: write 11 | issues: write 12 | 13 | jobs: 14 | deploy: 15 | runs-on: ubuntu-latest 16 | steps: 17 | - uses: actions/checkout@v4 18 | with: 19 | ref: ${{ github.ref }} 20 | - uses: Songmu/tagpr@v1 21 | id: tagpr 22 | env: 23 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 24 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: test 2 | on: [push] 3 | jobs: 4 | test: 5 | strategy: 6 | matrix: 7 | go: 8 | - "1.23" 9 | - "1.24" 10 | name: Build 11 | runs-on: ubuntu-latest 12 | steps: 13 | - name: Set up Go 14 | uses: actions/setup-go@v5 15 | with: 16 | go-version: ${{ matrix.go }} 17 | id: go 18 | 19 | - name: Check out code into the Go module directory 20 | uses: actions/checkout@v4 21 | 22 | - name: setup redis 23 | uses: shogo82148/actions-setup-redis@v1 24 | with: 25 | redis-version: "7.x" 26 | auto-start: "false" 27 | 28 | - name: Build & Test 29 | run: | 30 | go test -v ./... 31 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 FUJIWARA Shunichiro 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /redis.go: -------------------------------------------------------------------------------- 1 | package raus 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | "github.com/redis/go-redis/v9" 8 | ) 9 | 10 | type RedisClient interface { 11 | Close() error 12 | Subscribe(ctx context.Context, channels ...string) *redis.PubSub 13 | SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) *redis.BoolCmd 14 | Get(ctx context.Context, key string) *redis.StringCmd 15 | Del(ctx context.Context, keys ...string) *redis.IntCmd 16 | Publish(ctx context.Context, channel string, message interface{}) *redis.IntCmd 17 | TxPipeline() redis.Pipeliner 18 | } 19 | 20 | type RedisOptions struct { 21 | Cluster bool 22 | Addrs []string 23 | Username string 24 | Password string 25 | DB int 26 | } 27 | 28 | func (o *RedisOptions) NewClient() RedisClient { 29 | if o.Cluster { 30 | return redis.NewClusterClient(&redis.ClusterOptions{ 31 | Addrs: o.Addrs, 32 | Username: o.Username, 33 | Password: o.Password, 34 | }) 35 | } else { 36 | return redis.NewClient(&redis.Options{ 37 | Network: "tcp", 38 | Addr: o.Addrs[0], 39 | Username: o.Username, 40 | Password: o.Password, 41 | DB: o.DB, 42 | }) 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | ## [v0.3.0](https://github.com/fujiwara/raus/compare/v0.2.0...v0.3.0) - 2025-08-02 4 | - Migrate to log/slog with structured logging by @fujiwara in https://github.com/fujiwara/raus/pull/9 5 | 6 | ## [v0.2.0](https://github.com/fujiwara/raus/compare/v0.1.0...v0.2.0) - 2025-08-01 7 | - Modernize: Update Go 1.23 and replace pkg/errors with fmt.Errorf by @fujiwara in https://github.com/fujiwara/raus/pull/5 8 | - Update Redis client from v8 to v9 by @fujiwara in https://github.com/fujiwara/raus/pull/6 9 | - Update dependencies to latest versions by @fujiwara in https://github.com/fujiwara/raus/pull/7 10 | - Update documentation for Snowflake machine ID purpose by @fujiwara in https://github.com/fujiwara/raus/pull/8 11 | 12 | ## [v0.1.0](https://github.com/fujiwara/raus/compare/v0.0.3...v0.1.0) - 2022-08-16 13 | - Bump versions by @fujiwara in https://github.com/fujiwara/raus/pull/3 14 | - RedisCluster support by @fujiwara in https://github.com/fujiwara/raus/pull/4 15 | 16 | ## [v0.0.3](https://github.com/fujiwara/raus/compare/v0.0.2...v0.0.3) - 2020-05-07 17 | - test on actions by @fujiwara in https://github.com/fujiwara/raus/pull/2 18 | 19 | ## [v0.0.2](https://github.com/fujiwara/raus/commits/v0.0.2) - 2017-12-15 20 | - use multi/exec to holdLock() by @fujiwara in https://github.com/fujiwara/raus/pull/1 21 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= 2 | github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= 3 | github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= 4 | github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= 5 | github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= 6 | github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= 7 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= 8 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= 9 | github.com/gomodule/redigo v1.7.0 h1:ZKld1VOtsGhAe37E7wMxEDgAlGM5dvFY+DiOhSkhP9Y= 10 | github.com/gomodule/redigo v1.7.0/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= 11 | github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= 12 | github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= 13 | github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= 14 | github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= 15 | github.com/redis/go-redis/v9 v9.11.0 h1:E3S08Gl/nJNn5vkxd2i78wZxWAPNZgUNTp8WIJUAiIs= 16 | github.com/redis/go-redis/v9 v9.11.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q5lHuCH/Iw= 17 | github.com/soh335/go-test-redisserver v0.1.0 h1:FZYs/CVmUFP1uHVq7avxU+HpRoFIv2JWzhdV/g2Hyk4= 18 | github.com/soh335/go-test-redisserver v0.1.0/go.mod h1:vofbm8mr+As7DkCPPNA9Jy/KOA6bBl50xd0VUkisMzY= 19 | -------------------------------------------------------------------------------- /.tagpr: -------------------------------------------------------------------------------- 1 | # config file for the tagpr in git config format 2 | # The tagpr generates the initial configuration, which you can rewrite to suit your environment. 3 | # CONFIGURATIONS: 4 | # tagpr.releaseBranch 5 | # Generally, it is "main." It is the branch for releases. The tagpr tracks this branch, 6 | # creates or updates a pull request as a release candidate, or tags when they are merged. 7 | # 8 | # tagpr.versionFile 9 | # Versioning file containing the semantic version needed to be updated at release. 10 | # It will be synchronized with the "git tag". 11 | # Often this is a meta-information file such as gemspec, setup.cfg, package.json, etc. 12 | # Sometimes the source code file, such as version.go or Bar.pm, is used. 13 | # If you do not want to use versioning files but only git tags, specify the "-" string here. 14 | # You can specify multiple version files by comma separated strings. 15 | # 16 | # tagpr.vPrefix 17 | # Flag whether or not v-prefix is added to semver when git tagging. (e.g. v1.2.3 if true) 18 | # This is only a tagging convention, not how it is described in the version file. 19 | # 20 | # tagpr.changelog (Optional) 21 | # Flag whether or not changelog is added or changed during the release. 22 | # 23 | # tagpr.command (Optional) 24 | # Command to change files just before release and versioning. 25 | # 26 | # tagpr.postVersionCommand (Optional) 27 | # Command to change files just after versioning. 28 | # 29 | # tagpr.template (Optional) 30 | # Pull request template file in go template format 31 | # 32 | # tagpr.templateText (Optional) 33 | # Pull request template text in go template format 34 | # 35 | # tagpr.release (Optional) 36 | # GitHub Release creation behavior after tagging [true, draft, false] 37 | # If this value is not set, the release is to be created. 38 | # 39 | # tagpr.majorLabels (Optional) 40 | # Label of major update targets. Default is [major] 41 | # 42 | # tagpr.minorLabels (Optional) 43 | # Label of minor update targets. Default is [minor] 44 | # 45 | # tagpr.commitPrefix (Optional) 46 | # Prefix of commit message. Default is "[tagpr]" 47 | # 48 | [tagpr] 49 | vPrefix = true 50 | releaseBranch = main 51 | versionFile = - 52 | -------------------------------------------------------------------------------- /compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | services: 3 | app: 4 | image: docker.io/golang:1.24-bookworm 5 | volumes: 6 | - .:/go/src/github.com/fujiwara/raus 7 | working_dir: /go/src/github.com/fujiwara/raus 8 | environment: 9 | - 'REDIS_URL=rediscluster://:bitnami@redis-node-0:6379' 10 | command: 11 | - /bin/sh 12 | - -c 13 | - | 14 | sleep 10 15 | go test -v ./... 16 | 17 | redis-node-0: 18 | depends_on: 19 | - app 20 | image: docker.io/bitnami/redis-cluster:7.4 21 | volumes: 22 | - redis-cluster_data-0:/bitnami/redis/data 23 | environment: 24 | - 'REDIS_PASSWORD=bitnami' 25 | - 'REDIS_NODES=redis-node-0 redis-node-1 redis-node-2 redis-node-3 redis-node-4 redis-node-5' 26 | 27 | redis-node-1: 28 | depends_on: 29 | - app 30 | image: docker.io/bitnami/redis-cluster:7.4 31 | volumes: 32 | - redis-cluster_data-1:/bitnami/redis/data 33 | environment: 34 | - 'REDIS_PASSWORD=bitnami' 35 | - 'REDIS_NODES=redis-node-0 redis-node-1 redis-node-2 redis-node-3 redis-node-4 redis-node-5' 36 | 37 | redis-node-2: 38 | depends_on: 39 | - app 40 | image: docker.io/bitnami/redis-cluster:7.4 41 | volumes: 42 | - redis-cluster_data-2:/bitnami/redis/data 43 | environment: 44 | - 'REDIS_PASSWORD=bitnami' 45 | - 'REDIS_NODES=redis-node-0 redis-node-1 redis-node-2 redis-node-3 redis-node-4 redis-node-5' 46 | 47 | redis-node-3: 48 | depends_on: 49 | - app 50 | image: docker.io/bitnami/redis-cluster:7.4 51 | volumes: 52 | - redis-cluster_data-3:/bitnami/redis/data 53 | environment: 54 | - 'REDIS_PASSWORD=bitnami' 55 | - 'REDIS_NODES=redis-node-0 redis-node-1 redis-node-2 redis-node-3 redis-node-4 redis-node-5' 56 | 57 | redis-node-4: 58 | depends_on: 59 | - app 60 | image: docker.io/bitnami/redis-cluster:7.4 61 | volumes: 62 | - redis-cluster_data-4:/bitnami/redis/data 63 | environment: 64 | - 'REDIS_PASSWORD=bitnami' 65 | - 'REDIS_NODES=redis-node-0 redis-node-1 redis-node-2 redis-node-3 redis-node-4 redis-node-5' 66 | 67 | redis-node-5: 68 | image: docker.io/bitnami/redis-cluster:7.4 69 | volumes: 70 | - redis-cluster_data-5:/bitnami/redis/data 71 | depends_on: 72 | - app 73 | - redis-node-0 74 | - redis-node-1 75 | - redis-node-2 76 | - redis-node-3 77 | - redis-node-4 78 | environment: 79 | - 'REDIS_PASSWORD=bitnami' 80 | - 'REDISCLI_AUTH=bitnami' 81 | - 'REDIS_CLUSTER_REPLICAS=1' 82 | - 'REDIS_NODES=redis-node-0 redis-node-1 redis-node-2 redis-node-3 redis-node-4 redis-node-5' 83 | - 'REDIS_CLUSTER_CREATOR=yes' 84 | 85 | volumes: 86 | redis-cluster_data-0: 87 | driver: local 88 | redis-cluster_data-1: 89 | driver: local 90 | redis-cluster_data-2: 91 | driver: local 92 | redis-cluster_data-3: 93 | driver: local 94 | redis-cluster_data-4: 95 | driver: local 96 | redis-cluster_data-5: 97 | driver: local 98 | -------------------------------------------------------------------------------- /slog_test.go: -------------------------------------------------------------------------------- 1 | package raus_test 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "encoding/json" 7 | "log/slog" 8 | "testing" 9 | "time" 10 | 11 | "github.com/fujiwara/raus" 12 | ) 13 | 14 | func TestSlogIntegration(t *testing.T) { 15 | // Create a buffer to capture log output 16 | var buf bytes.Buffer 17 | 18 | // Create JSON handler for structured logging 19 | handler := slog.NewJSONHandler(&buf, &slog.HandlerOptions{ 20 | Level: slog.LevelDebug, 21 | }) 22 | logger := slog.New(handler) 23 | 24 | // Set default logger 25 | raus.SetDefaultSlogLogger(logger) 26 | 27 | // Create raus instance 28 | r, err := raus.New(redisURL, 0, 3) 29 | if err != nil { 30 | t.Fatal(err) 31 | } 32 | 33 | // Test instance-specific logger 34 | var instanceBuf bytes.Buffer 35 | instanceHandler := slog.NewJSONHandler(&instanceBuf, &slog.HandlerOptions{ 36 | Level: slog.LevelInfo, 37 | }) 38 | instanceLogger := slog.New(instanceHandler) 39 | r.SetSlogLogger(instanceLogger) 40 | 41 | // Get machine ID 42 | ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) 43 | defer cancel() 44 | 45 | machineID, errCh, err := r.Get(ctx) 46 | if err != nil { 47 | t.Fatal(err) 48 | } 49 | 50 | // Verify machine ID is valid 51 | if machineID > 3 { 52 | t.Errorf("Invalid machine ID: %d", machineID) 53 | } 54 | 55 | // Let it run for a moment to generate logs 56 | time.Sleep(100 * time.Millisecond) 57 | cancel() 58 | 59 | // Wait for error channel to close 60 | select { 61 | case <-errCh: 62 | case <-time.After(5 * time.Second): 63 | } 64 | 65 | // Verify structured logs were generated 66 | output := instanceBuf.String() 67 | if output == "" { 68 | t.Error("No log output generated") 69 | } 70 | 71 | // Parse JSON logs to verify structure 72 | lines := bytes.Split(instanceBuf.Bytes(), []byte("\n")) 73 | for _, line := range lines { 74 | if len(line) == 0 { 75 | continue 76 | } 77 | 78 | var logEntry map[string]interface{} 79 | if err := json.Unmarshal(line, &logEntry); err != nil { 80 | t.Errorf("Failed to parse JSON log: %v", err) 81 | continue 82 | } 83 | 84 | // Verify required fields exist 85 | if _, ok := logEntry["time"]; !ok { 86 | t.Error("Missing 'time' field in log entry") 87 | } 88 | if _, ok := logEntry["level"]; !ok { 89 | t.Error("Missing 'level' field in log entry") 90 | } 91 | if _, ok := logEntry["msg"]; !ok { 92 | t.Error("Missing 'msg' field in log entry") 93 | } 94 | } 95 | 96 | t.Logf("Generated %d log entries with structured logging", len(lines)-1) 97 | } 98 | 99 | func TestDefaultSlogLogger(t *testing.T) { 100 | // Test that default slog logger works 101 | var buf bytes.Buffer 102 | handler := slog.NewJSONHandler(&buf, &slog.HandlerOptions{ 103 | Level: slog.LevelInfo, 104 | }) 105 | customLogger := slog.New(handler) 106 | 107 | // Set global default 108 | raus.SetDefaultSlogLogger(customLogger) 109 | 110 | r, err := raus.New(redisURL, 0, 3) 111 | if err != nil { 112 | t.Fatal(err) 113 | } 114 | 115 | ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) 116 | defer cancel() 117 | 118 | machineID, errCh, err := r.Get(ctx) 119 | if err != nil { 120 | t.Fatal(err) 121 | } 122 | 123 | if machineID > 3 { 124 | t.Errorf("Invalid machine ID: %d", machineID) 125 | } 126 | 127 | cancel() 128 | select { 129 | case <-errCh: 130 | case <-time.After(2 * time.Second): 131 | } 132 | 133 | // Verify logs were written to our custom logger 134 | output := buf.String() 135 | if output == "" { 136 | t.Error("No log output generated with custom default logger") 137 | } 138 | } 139 | -------------------------------------------------------------------------------- /raus_test.go: -------------------------------------------------------------------------------- 1 | package raus_test 2 | 3 | import ( 4 | "context" 5 | "log" 6 | "os" 7 | "sync" 8 | "testing" 9 | "time" 10 | 11 | "github.com/fujiwara/raus" 12 | "github.com/google/go-cmp/cmp" 13 | redistest "github.com/soh335/go-test-redisserver" 14 | ) 15 | 16 | var invalidMinMaxSet = [][]uint{ 17 | {0, 0}, 18 | {2, 1}, 19 | } 20 | 21 | var redisURL = "redis://localhost:26379" 22 | 23 | func init() { 24 | if u := os.Getenv("REDIS_URL"); u != "" { 25 | log.Println("REDIS_URL=", u) 26 | redisURL = u 27 | } 28 | } 29 | 30 | type parseTest struct { 31 | URI string 32 | Opt *raus.RedisOptions 33 | Namespace string 34 | } 35 | 36 | var parseTestErrorSet = []string{ 37 | "http://example.com", 38 | "redis:///var/tmp/test.sock", 39 | "localhost:6379", 40 | "localhost", 41 | "rediscluster://127.0.0.1/3", 42 | } 43 | 44 | var parseTestSet = []parseTest{ 45 | { 46 | "redis://localhost:6379", 47 | &raus.RedisOptions{ 48 | Addrs: []string{"localhost:6379"}, 49 | DB: 0, 50 | }, 51 | raus.DefaultNamespace, 52 | }, 53 | { 54 | "redis://127.0.0.1/2?ns=foo", 55 | &raus.RedisOptions{ 56 | Addrs: []string{"127.0.0.1:6379"}, 57 | DB: 2, 58 | }, 59 | "foo", 60 | }, 61 | { 62 | "rediscluster://127.0.0.1/?ns=foo", 63 | &raus.RedisOptions{ 64 | Cluster: true, 65 | Addrs: []string{"127.0.0.1:6379"}, 66 | }, 67 | "foo", 68 | }, 69 | { 70 | "rediscluster://127.0.0.1:6380/?ns=foo", 71 | &raus.RedisOptions{ 72 | Cluster: true, 73 | Addrs: []string{"127.0.0.1:6380"}, 74 | }, 75 | "foo", 76 | }, 77 | } 78 | 79 | func TestMain(m *testing.M) { 80 | if os.Getenv("REDIS_URL") == "" { 81 | conf := redistest.Config{"port": "26379", "save": ""} 82 | s, err := redistest.NewServer(true, conf) 83 | if err != nil { 84 | panic(err) 85 | } 86 | code := m.Run() 87 | s.Stop() 88 | os.Exit(code) 89 | } else { 90 | os.Exit(m.Run()) 91 | } 92 | } 93 | 94 | func TestParseRedisURI(t *testing.T) { 95 | for _, ts := range parseTestSet { 96 | opt, ns, err := raus.ParseRedisURI(ts.URI) 97 | t.Logf("uri %s parsed to %#v %s", ts.URI, opt, ns) 98 | if err != nil { 99 | t.Error(err) 100 | } 101 | if diff := cmp.Diff(opt, ts.Opt); diff != "" { 102 | t.Error("unexpected options", diff) 103 | } 104 | if ns != ts.Namespace { 105 | t.Errorf("invalid Namespace %s expected %s", ns, ts.Namespace) 106 | } 107 | } 108 | 109 | for _, ts := range parseTestErrorSet { 110 | opt, ns, err := raus.ParseRedisURI(ts) 111 | t.Logf("uri %s parsed to %#v %s", ts, opt, ns) 112 | if err == nil { 113 | t.Errorf("invalid uri %s should be parse error.", ts) 114 | } 115 | // t.Logf("uri %s parse error: %s", s, err) 116 | } 117 | } 118 | 119 | func TestNew(t *testing.T) { 120 | for _, s := range invalidMinMaxSet { 121 | _, err := raus.New("redis://localhost:6379", s[0], s[1]) 122 | if err != nil { 123 | t.Logf("[min max]=%v returns error:%s", s, err) 124 | } else { 125 | t.Errorf("[min max]=%v must return error", s) 126 | } 127 | } 128 | } 129 | 130 | func ExampleNew() { 131 | // prepere context 132 | ctx, cancel := context.WithCancel(context.Background()) 133 | 134 | r, _ := raus.New(redisURL, 0, 3) 135 | id, ch, _ := r.Get(ctx) 136 | log.Printf("Got id %d", id) 137 | 138 | // watch error 139 | var wg sync.WaitGroup 140 | wg.Add(1) 141 | go func() { 142 | defer wg.Done() 143 | err, more := <-ch 144 | if !more { 145 | // raus shutdown successfully 146 | return 147 | } else { 148 | // fatal error 149 | panic(err) 150 | } 151 | }() 152 | 153 | // Run your application code 154 | 155 | // notify shutdown 156 | cancel() 157 | 158 | wg.Wait() 159 | } 160 | 161 | func TestGet(t *testing.T) { 162 | ctx, cancel := context.WithCancel(context.Background()) 163 | defer cancel() 164 | r, err := raus.New(redisURL, 0, 3) 165 | if err != nil { 166 | t.Error(err) 167 | } 168 | id, ch, err := r.Get(ctx) 169 | if err != nil { 170 | t.Error(err) 171 | t.Fail() 172 | return 173 | } 174 | log.Printf("Got id %d", id) 175 | var wg sync.WaitGroup 176 | wg.Add(1) 177 | go func() { 178 | defer wg.Done() 179 | err, more := <-ch 180 | if !more { 181 | return 182 | } 183 | t.Error(err) 184 | }() 185 | time.Sleep(time.Second) 186 | cancel() 187 | wg.Wait() 188 | } 189 | 190 | func TestGetRace(t *testing.T) { 191 | var wg sync.WaitGroup 192 | for i := 0; i <= 5; i++ { 193 | wg.Add(1) 194 | time.Sleep(500 * time.Millisecond) 195 | go func(i int) { 196 | defer wg.Done() 197 | ctx, cancel := context.WithCancel(context.Background()) 198 | r, err := raus.New(redisURL, 0, 5) 199 | if err != nil { 200 | t.Error(err) 201 | } 202 | id, ch, err := r.Get(ctx) 203 | if err != nil { 204 | t.Error(err) 205 | } 206 | log.Printf("Got id %d", id) 207 | wg.Add(1) 208 | go func() { 209 | defer wg.Done() 210 | err, more := <-ch 211 | if !more { 212 | return 213 | } 214 | t.Error(err) 215 | }() 216 | time.Sleep(5 * time.Second) 217 | cancel() 218 | }(i) 219 | } 220 | wg.Wait() 221 | } 222 | -------------------------------------------------------------------------------- /raus.go: -------------------------------------------------------------------------------- 1 | package raus 2 | 3 | import ( 4 | "context" 5 | crand "crypto/rand" 6 | "encoding/binary" 7 | "fmt" 8 | "log/slog" 9 | "math/rand" 10 | "net" 11 | "net/url" 12 | "os" 13 | "strconv" 14 | "strings" 15 | "time" 16 | 17 | "github.com/google/uuid" 18 | "github.com/redis/go-redis/v9" 19 | ) 20 | 21 | type Raus struct { 22 | rand *rand.Rand 23 | uuid string 24 | id uint 25 | min uint 26 | max uint 27 | redisOptions *RedisOptions 28 | namespace string 29 | pubSubChannel string 30 | channel chan error 31 | logger *slog.Logger 32 | } 33 | 34 | const ( 35 | DefaultNamespace = "raus" 36 | pubSubChannelSuffix = ":broadcast" 37 | ) 38 | 39 | var ( 40 | MaxCandidate = 10 41 | LockExpires = 60 * time.Second 42 | SubscribeTimeout = time.Second * 3 43 | CleanupTimeout = time.Second * 30 44 | defaultLogger = slog.New(slog.NewTextHandler(os.Stderr, nil)) 45 | ) 46 | 47 | type fatal interface { 48 | isFatal() bool 49 | } 50 | 51 | func isFatal(err error) bool { 52 | fe, ok := err.(fatal) 53 | return ok && fe.isFatal() 54 | } 55 | 56 | type fatalError struct { 57 | error 58 | } 59 | 60 | func (e fatalError) isFatal() bool { 61 | return true 62 | } 63 | 64 | // SetDefaultSlogLogger sets the default slog.Logger for new Raus instances. 65 | func SetDefaultSlogLogger(l *slog.Logger) { 66 | defaultLogger = l 67 | } 68 | 69 | // SetSlogLogger sets the slog.Logger for this Raus instance. 70 | func (r *Raus) SetSlogLogger(l *slog.Logger) { 71 | r.logger = l 72 | } 73 | 74 | // New creates *Raus object. 75 | func New(redisURI string, min, max uint) (*Raus, error) { 76 | var s int64 77 | if err := binary.Read(crand.Reader, binary.LittleEndian, &s); err != nil { 78 | s = time.Now().UnixNano() 79 | } 80 | if min >= max { 81 | return nil, fmt.Errorf("max should be greater than min") 82 | } 83 | op, ns, err := ParseRedisURI(redisURI) 84 | if err != nil { 85 | return nil, err 86 | } 87 | u, err := uuid.NewRandom() 88 | if err != nil { 89 | return nil, err 90 | } 91 | return &Raus{ 92 | rand: rand.New(rand.NewSource(s)), 93 | uuid: u.String(), 94 | min: min, 95 | max: max, 96 | redisOptions: op, 97 | namespace: ns, 98 | pubSubChannel: ns + pubSubChannelSuffix, 99 | channel: make(chan error), 100 | logger: defaultLogger, 101 | }, nil 102 | } 103 | 104 | // ParseRedisURI parses uri for redis (redis://host:port/db?ns=namespace) 105 | func ParseRedisURI(s string) (*RedisOptions, string, error) { 106 | u, err := url.Parse(s) 107 | if err != nil { 108 | return nil, "", err 109 | } 110 | op := &RedisOptions{} 111 | switch u.Scheme { 112 | case "redis": 113 | op.Cluster = false 114 | case "rediscluster": 115 | op.Cluster = true 116 | default: 117 | return nil, "", fmt.Errorf("invalid scheme %s", u.Scheme) 118 | } 119 | h, p, err := net.SplitHostPort(u.Host) 120 | if err != nil { 121 | h = u.Host 122 | p = "6379" 123 | } 124 | op.Addrs = []string{h + ":" + p} 125 | 126 | if u.User != nil { 127 | if uname := u.User.Username(); uname != "" { 128 | op.Username = uname 129 | } 130 | if pass, ok := u.User.Password(); ok { 131 | op.Password = pass 132 | } 133 | } 134 | 135 | if u.Path == "" || u.Path == "/" { 136 | op.DB = 0 137 | } else if op.Cluster { 138 | return nil, "", fmt.Errorf("database is not supported for redis cluster") 139 | } else { 140 | ps := strings.Split(u.Path, "/") 141 | if len(ps) > 1 { 142 | i, err := strconv.Atoi(ps[1]) 143 | if err != nil { 144 | return nil, "", fmt.Errorf("invalid database %s", ps[1]) 145 | } 146 | op.DB = i 147 | } else { 148 | op.DB = 0 149 | } 150 | } 151 | ns := u.Query()["ns"] 152 | if len(ns) > 0 { 153 | return op, ns[0], nil 154 | } else { 155 | return op, DefaultNamespace, nil 156 | } 157 | } 158 | 159 | func (r *Raus) size() uint { 160 | return r.max - r.min 161 | } 162 | 163 | // Get gets unique id ranged between min and max. 164 | func (r *Raus) Get(ctx context.Context) (uint, chan error, error) { 165 | if err := r.subscribe(ctx); err != nil { 166 | return 0, r.channel, err 167 | } 168 | go r.publish(ctx) 169 | return r.id, r.channel, nil 170 | } 171 | 172 | func (r *Raus) subscribe(ctx context.Context) error { 173 | // table for looking up unused id 174 | usedIds := make(map[uint]bool, r.size()) 175 | 176 | c := r.redisOptions.NewClient() 177 | defer c.Close() 178 | 179 | // subscribe to channel, and reading other's id (3 sec) 180 | pubsub := c.Subscribe(ctx, r.pubSubChannel) 181 | start := time.Now() 182 | LISTING: 183 | for time.Since(start) < SubscribeTimeout { 184 | select { 185 | case <-ctx.Done(): 186 | return ctx.Err() 187 | default: 188 | } 189 | _msg, err := pubsub.ReceiveTimeout(ctx, SubscribeTimeout) 190 | if err != nil { 191 | break LISTING 192 | } 193 | switch msg := _msg.(type) { 194 | case *redis.Message: 195 | xuuid, xid, err := parsePayload(msg.Payload) 196 | if err != nil { 197 | r.logger.Warn("failed to parse payload", "error", err) 198 | break 199 | } 200 | if xuuid == r.uuid { 201 | // other's uuid is same to myself (X_X) 202 | return fmt.Errorf("duplicate uuid") 203 | } 204 | r.logger.Debug("discovered other instance", "uuid", xuuid, "machine_id", xid) 205 | usedIds[xid] = true 206 | case *redis.Subscription: 207 | default: 208 | return fmt.Errorf("unknown redis message: %#v", _msg) 209 | } 210 | } 211 | 212 | pubsub.Unsubscribe(ctx) 213 | 214 | LOCKING: 215 | for { 216 | select { 217 | case <-ctx.Done(): 218 | return ctx.Err() 219 | default: 220 | } 221 | candidate := make([]uint, 0, MaxCandidate) 222 | for i := r.min; i <= r.max; i++ { 223 | if usedIds[i] { 224 | continue 225 | } 226 | candidate = append(candidate, i) 227 | if len(candidate) >= MaxCandidate { 228 | break 229 | } 230 | } 231 | if len(candidate) == 0 { 232 | return fmt.Errorf("no more available id") 233 | } 234 | r.logger.Debug("selecting candidate machine ids", "candidates", candidate, "available_count", len(candidate)) 235 | // pick up randomly 236 | id := candidate[uint(r.rand.Intn(len(candidate)))] 237 | 238 | // try to lock by SET NX 239 | r.logger.Debug("attempting to acquire machine id lock", "machine_id", id, "lock_key", r.candidateLockKey(id)) 240 | res := c.SetNX( 241 | ctx, 242 | r.candidateLockKey(id), // key 243 | r.uuid, // value 244 | LockExpires, // expiration 245 | ) 246 | if err := res.Err(); err != nil { 247 | return fmt.Errorf("failed to get lock by SET NX: %w", err) 248 | } 249 | if res.Val() { 250 | r.logger.Info("machine id allocated successfully", "machine_id", id, "uuid", r.uuid, "namespace", r.namespace) 251 | r.id = id 252 | break LOCKING 253 | } else { 254 | r.logger.Debug("machine id already in use", "machine_id", id) 255 | usedIds[id] = true 256 | } 257 | } 258 | return nil 259 | } 260 | 261 | func parsePayload(payload string) (string, uint, error) { 262 | s := strings.Split(payload, ":") 263 | if len(s) != 2 { 264 | return "", 0, fmt.Errorf("unexpected data %s", payload) 265 | } 266 | id, err := strconv.ParseUint(s[1], 10, 64) 267 | if err != nil { 268 | return "", 0, fmt.Errorf("unexpected data %s", payload) 269 | } 270 | return s[0], uint(id), nil 271 | } 272 | 273 | func newPayload(uuid string, id uint) string { 274 | return fmt.Sprintf("%s:%d", uuid, id) 275 | } 276 | 277 | func (r *Raus) publish(ctx context.Context) { 278 | c := r.redisOptions.NewClient() 279 | defer close(r.channel) 280 | defer func() { 281 | c.Close() 282 | }() 283 | 284 | ticker := time.NewTicker(1 * time.Second) 285 | for { 286 | select { 287 | case <-ctx.Done(): 288 | r.logger.Info("shutting down machine id coordination", "machine_id", r.id, "namespace", r.namespace) 289 | // returns after releasing a held lock 290 | ctx2, cancel := context.WithTimeout(context.Background(), CleanupTimeout) 291 | defer cancel() 292 | err := c.Del(ctx2, r.lockKey()).Err() 293 | if err != nil { 294 | r.logger.Error("failed to release machine id lock", "error", err, "lock_key", r.lockKey()) 295 | } else { 296 | r.logger.Info("machine id lock released successfully", "machine_id", r.id, "lock_key", r.lockKey()) 297 | } 298 | return 299 | case <-ticker.C: 300 | err := r.holdLock(ctx, c) 301 | if err != nil { 302 | r.logger.Error("machine id coordination error", "error", err, "machine_id", r.id) 303 | if isFatal(err) { 304 | r.channel <- err 305 | return 306 | } 307 | c.Close() 308 | c = r.redisOptions.NewClient() 309 | } 310 | } 311 | } 312 | } 313 | 314 | func (r *Raus) holdLock(ctx context.Context, c RedisClient) error { 315 | if err := c.Publish(ctx, r.pubSubChannel, newPayload(r.uuid, r.id)).Err(); err != nil { 316 | return fmt.Errorf("PUBLISH failed: %w", err) 317 | } 318 | 319 | pipe := c.TxPipeline() 320 | getset := pipe.GetSet(ctx, r.lockKey(), r.uuid) 321 | pipe.Expire(ctx, r.lockKey(), LockExpires) 322 | _, err := pipe.Exec(ctx) 323 | if err != nil { 324 | return fmt.Errorf("GETSET or EXPIRE failed: %w", err) 325 | } 326 | if v := getset.Val(); v != r.uuid { 327 | return fatalError{fmt.Errorf("unexpected uuid got: %s", v)} 328 | } 329 | return nil 330 | } 331 | 332 | func (r *Raus) lockKey() string { 333 | return fmt.Sprintf("%s:id:%d", r.namespace, r.id) 334 | } 335 | 336 | func (r *Raus) candidateLockKey(id uint) string { 337 | return fmt.Sprintf("%s:id:%d", r.namespace, id) 338 | } 339 | -------------------------------------------------------------------------------- /CLAUDE.md: -------------------------------------------------------------------------------- 1 | # raus - Ranged unique ID supplier for Snowflake Algorithm 2 | 3 | ## Overview 4 | 5 | **raus** is a specialized Go library designed for **Snowflake algorithm implementations** that provides distributed machine ID allocation using Redis as coordination backend. Its primary purpose is to ensure unique machine ID assignment across multiple service instances during startup, which is essential for Snowflake-based distributed ID generation systems. 6 | 7 | ## Architecture 8 | 9 | ### Core Components 10 | 11 | - **raus.go**: Main library logic for machine ID allocation 12 | - `Raus` struct: Core machine ID allocator 13 | - `Get()`: Acquires unique machine ID within specified range 14 | - Redis Pub/Sub for inter-instance coordination 15 | - Atomic locking mechanism to prevent machine ID conflicts 16 | 17 | - **redis.go**: Redis connection and client management 18 | - Supports both regular Redis and Redis Cluster 19 | - URI-based configuration (`redis://`, `rediscluster://`) 20 | 21 | ### Key Features 22 | 23 | 1. **Machine ID Allocation**: Safely allocates non-overlapping machine IDs for Snowflake algorithm 24 | 2. **Redis Coordination**: Uses Redis as distributed coordination backend 25 | 3. **Startup-time Assignment**: Optimized for one-time allocation during service startup 26 | 4. **Range-based Configuration**: Flexible machine ID ranges (e.g., 0-1023 for 10-bit) 27 | 5. **Pub/Sub Communication**: Real-time coordination between service instances 28 | 6. **Graceful Cleanup**: Automatic lock release on service shutdown 29 | 30 | ## Primary Use Case: Snowflake Algorithm Support 31 | 32 | ### Snowflake Algorithm Requirements 33 | 34 | The Snowflake algorithm generates unique 64-bit IDs with the following structure: 35 | ``` 36 | ┌─────────────────────────────────────────────────────────────────────┐ 37 | │ Timestamp (41 bits) │ Machine ID (10 bits) │ Sequence (12 bits) │ 38 | └─────────────────────────────────────────────────────────────────────┘ 39 | ``` 40 | 41 | **Machine ID Requirements:** 42 | - Must be unique across all instances 43 | - Typically 0-1023 range for 10-bit machine ID 44 | - Assigned once during service startup 45 | - Consistent throughout service lifetime 46 | 47 | ### How raus Solves This 48 | 49 | raus provides the **Machine ID component** by: 50 | 1. **Coordinated Assignment**: Ensures no two instances get the same machine ID 51 | 2. **Range Management**: Supports various bit-width configurations (8-bit, 10-bit, 12-bit) 52 | 3. **Startup Coordination**: Handles concurrent service startup scenarios 53 | 4. **Environment Separation**: Namespace support for dev/staging/prod environments 54 | 55 | ## Usage Patterns 56 | 57 | ### Basic Snowflake Integration 58 | 59 | ```go 60 | // 1. Allocate machine ID during service startup 61 | r, err := raus.New("redis://localhost:6379", 0, 1023) 62 | machineID, errCh, err := r.Get(ctx) 63 | 64 | // 2. Initialize Snowflake generator with allocated machine ID 65 | snowflake := NewSnowflakeGenerator(machineID) 66 | 67 | // 3. Generate unique IDs throughout service lifetime 68 | id1 := snowflake.NextID() // e.g., 123456789012345678 69 | id2 := snowflake.NextID() // e.g., 123456789012345679 70 | ``` 71 | 72 | ### Machine ID Range Configuration 73 | 74 | ```go 75 | // Standard 10-bit machine ID (supports up to 1024 instances) 76 | raus.New(redisURI, 0, 1023) // 2^10 - 1 77 | 78 | // 8-bit machine ID (smaller deployments, up to 256 instances) 79 | raus.New(redisURI, 0, 255) // 2^8 - 1 80 | 81 | // 12-bit machine ID (larger deployments, up to 4096 instances) 82 | raus.New(redisURI, 0, 4095) // 2^12 - 1 83 | 84 | // Environment-specific ranges 85 | raus.New("redis://localhost:6379?ns=prod", 0, 511) // Production 86 | raus.New("redis://localhost:6379?ns=staging", 512, 767) // Staging 87 | raus.New("redis://localhost:6379?ns=dev", 768, 1023) // Development 88 | ``` 89 | 90 | ## Implementation Details 91 | 92 | ### Machine ID Allocation Process 93 | 94 | 1. **Discovery Phase** (3-second timeout): 95 | - Subscribe to Redis Pub/Sub channel 96 | - Listen for other instances' machine ID announcements 97 | - Build map of currently used machine IDs 98 | 99 | 2. **Candidate Selection**: 100 | - Generate list of available machine IDs from range 101 | - Exclude IDs already claimed by other instances 102 | - Randomly select from available candidates (up to MaxCandidate) 103 | 104 | 3. **Atomic Assignment**: 105 | - Use Redis `SET NX` (set if not exists) to claim machine ID 106 | - Lock expires after LockExpires duration (default: 60 seconds) 107 | - Retry with different candidate if lock acquisition fails 108 | 109 | 4. **Heartbeat Maintenance**: 110 | - Continuously publish machine ID every second 111 | - Refresh lock expiration with `GETSET` + `EXPIRE` 112 | - Monitor for fatal errors (e.g., UUID collisions) 113 | 114 | 5. **Graceful Cleanup**: 115 | - Release lock on context cancellation 116 | - Clean shutdown removes machine ID from coordination 117 | 118 | ### Error Handling 119 | 120 | - **Recoverable Errors**: Network issues, temporary Redis unavailability 121 | - **Fatal Errors**: UUID collisions (extremely rare), lock acquisition failures 122 | - **Context Cancellation**: Triggers cleanup and lock release 123 | 124 | ### Configuration Parameters 125 | 126 | ```go 127 | var ( 128 | MaxCandidate = 10 // Maximum candidate IDs to consider 129 | LockExpires = 60 * time.Second // Lock expiration time 130 | SubscribeTimeout = 3 * time.Second // Pub/Sub listening timeout 131 | CleanupTimeout = 30 * time.Second // Cleanup operation timeout 132 | ) 133 | ``` 134 | 135 | ## Deployment Scenarios 136 | 137 | ### Container Orchestration (Kubernetes) 138 | 139 | ```yaml 140 | apiVersion: apps/v1 141 | kind: Deployment 142 | metadata: 143 | name: snowflake-service 144 | spec: 145 | replicas: 10 # Each pod gets unique machine ID from 0-1023 range 146 | template: 147 | spec: 148 | containers: 149 | - name: app 150 | env: 151 | - name: REDIS_URL 152 | value: "redis://redis-cluster:6379?ns=production" 153 | - name: MACHINE_ID_MIN 154 | value: "0" 155 | - name: MACHINE_ID_MAX 156 | value: "1023" 157 | ``` 158 | 159 | ### Multi-Environment Setup 160 | 161 | - **Development**: `redis://dev-redis:6379?ns=dev` (range: 0-63) 162 | - **Staging**: `redis://staging-redis:6379?ns=staging` (range: 64-127) 163 | - **Production**: `redis://prod-redis:6379?ns=prod` (range: 128-1023) 164 | 165 | ### High Availability 166 | 167 | - **Redis Cluster**: Use `rediscluster://` for Redis coordination HA 168 | - **Redis Sentinel**: Automatic failover for Redis coordination 169 | - **Startup Timeouts**: Configure appropriate timeouts for deployment SLA 170 | 171 | ## Performance Characteristics 172 | 173 | ### Startup Performance 174 | - **Allocation Time**: Typically 3-5 seconds (discovery + assignment) 175 | - **Concurrent Startup**: Handles multiple instances starting simultaneously 176 | - **Retry Logic**: Automatic retry on temporary failures 177 | 178 | ### Runtime Performance 179 | - **Memory Usage**: Minimal (single goroutine + small state) 180 | - **Network Traffic**: Heartbeat every 1 second + Pub/Sub messages 181 | - **CPU Usage**: Very low (periodic Redis operations only) 182 | 183 | ### Scaling Limits 184 | - **Instance Count**: Limited by machine ID range (e.g., 1024 for 10-bit) 185 | - **Redis Load**: Scales with number of instances (O(n) pub/sub messages) 186 | - **Startup Contention**: May increase with concurrent startups 187 | 188 | ## Dependencies 189 | 190 | ```go require 191 | github.com/redis/go-redis/v9 v9.11.0 // Redis client 192 | github.com/google/uuid v1.6.0 // UUID generation for instance identification 193 | ``` 194 | 195 | ## Use Cases Beyond Snowflake 196 | 197 | While primarily designed for Snowflake algorithm, raus can be used for: 198 | 199 | - **Database Sharding**: Shard ID assignment for distributed databases 200 | - **Load Balancer Backends**: Unique backend server identification 201 | - **Message Queue Partitioning**: Consumer group coordination 202 | - **Distributed Cache**: Cache partition assignment 203 | - **Container Orchestration**: Pod/container unique identification 204 | 205 | ## Comparison with Alternatives 206 | 207 | | Approach | Pros | Cons | Best For | 208 | |----------|------|------|----------| 209 | | **raus** | Guaranteed uniqueness, dynamic allocation | Redis dependency, startup latency | Dynamic deployments, cloud environments | 210 | | **Static Config** | Simple, no dependencies | Manual management, scaling limitations | Fixed deployments, small scale | 211 | | **MAC Address** | Hardware-based uniqueness | Privacy issues, virtual environments | Physical servers, legacy systems | 212 | | **IP-based** | Network-derived | IP changes break consistency | Stable network environments | 213 | | **Hostname Hash** | Simple derivation | Hash collisions possible | Development, non-critical systems | 214 | 215 | ## Security Considerations 216 | 217 | - **Redis Access Control**: Use authentication and network isolation 218 | - **Namespace Isolation**: Separate environments with different namespaces 219 | - **Network Security**: Secure Redis communication with TLS 220 | - **Machine ID Exposure**: Consider if machine IDs need to be kept private 221 | 222 | ## Monitoring and Observability 223 | 224 | ### Key Metrics 225 | - Machine ID allocation success rate 226 | - Machine ID allocation duration 227 | - Redis connectivity status 228 | - Active machine ID count per namespace 229 | 230 | ### Logging 231 | - Machine ID allocation events 232 | - Redis connectivity issues 233 | - Lock acquisition failures 234 | - Graceful shutdown events 235 | 236 | ### Alerting 237 | - Machine ID allocation failures 238 | - Redis coordination failures 239 | - Machine ID range exhaustion 240 | 241 | ## Go Version Compatibility 242 | 243 | Requires Go 1.23 or later for: 244 | - Latest error handling with `fmt.Errorf` and `%w` verb 245 | - Context handling improvements 246 | - Performance optimizations 247 | - Security updates 248 | 249 | ## Redis Compatibility 250 | 251 | - **Redis 7.x**: Recommended (full RESP3 support) 252 | - **Redis 6.x**: Supported (RESP3 experimental) 253 | - **Redis Cluster**: Full support for high availability 254 | - **Redis Sentinel**: Supported for automatic failover 255 | 256 | raus uses RESP3 protocol by default but can fall back to RESP2 for compatibility. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # raus 2 | 3 | [![GoDoc](https://godoc.org/github.com/fujiwara/raus?status.svg)](https://godoc.org/github.com/fujiwara/raus) 4 | [![Go Report Card](https://goreportcard.com/badge/github.com/fujiwara/raus)](https://goreportcard.com/report/github.com/fujiwara/raus) 5 | [![MIT License](https://img.shields.io/badge/license-MIT-blue.svg)](LICENSE) 6 | 7 | **Ranged unique ID supplier** - A distributed machine ID generator for Snowflake algorithm implementations using Redis as coordination backend. 8 | 9 | ## Overview 10 | 11 | raus provides unique machine ID allocation for distributed ID generators like the Snowflake algorithm. It ensures no machine ID conflicts across multiple instances during startup, making it essential for implementing distributed unique ID generation systems in microservices and containerized environments. 12 | 13 | The primary use case is allocating unique machine IDs (typically 0-1023 for 10-bit machine ID) that Snowflake algorithm implementations require to generate globally unique IDs across distributed systems. 14 | 15 | ## Features 16 | 17 | - **Machine ID Allocation**: Safe unique machine ID assignment for Snowflake implementations 18 | - **Redis Integration**: Works with both Redis and Redis Cluster for coordination 19 | - **Range-based Assignment**: Allocate machine IDs within specified ranges (e.g., 0-1023) 20 | - **Real-time Coordination**: Uses Redis Pub/Sub for instance communication 21 | - **Startup-time Assignment**: Optimized for one-time ID allocation during service startup 22 | - **Automatic Cleanup**: Graceful lock release on shutdown 23 | - **RESP3 Support**: Compatible with latest Redis protocol 24 | 25 | ## Installation 26 | 27 | ```bash 28 | go get github.com/fujiwara/raus 29 | ``` 30 | 31 | **Requirements:** 32 | - Go 1.23 or later 33 | - Redis 7.x or later (Redis 6.x also supported) 34 | 35 | ## Quick Start 36 | 37 | ### Basic Machine ID Allocation 38 | 39 | ```go 40 | package main 41 | 42 | import ( 43 | "context" 44 | "log" 45 | "time" 46 | 47 | "github.com/fujiwara/raus" 48 | ) 49 | 50 | func main() { 51 | // Allocate machine ID for Snowflake (0-1023 range for 10-bit machine ID) 52 | r, err := raus.New("redis://localhost:6379", 0, 1023) 53 | if err != nil { 54 | log.Fatal(err) 55 | } 56 | 57 | // Get unique machine ID during startup 58 | ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) 59 | defer cancel() 60 | 61 | machineID, errCh, err := r.Get(ctx) 62 | if err != nil { 63 | log.Fatal(err) 64 | } 65 | 66 | log.Printf("Allocated machine ID: %d", machineID) 67 | 68 | // Monitor for errors in background 69 | go func() { 70 | for err := range errCh { 71 | log.Printf("Runtime error: %v", err) 72 | // Handle Redis connectivity issues, etc. 73 | } 74 | }() 75 | 76 | // Use machine ID in your Snowflake implementation 77 | snowflake := NewSnowflake(machineID) 78 | 79 | // Run your application... 80 | runApplication(snowflake) 81 | 82 | // Cleanup on shutdown 83 | cancel() 84 | } 85 | ``` 86 | 87 | ### Integration with Snowflake Algorithm 88 | 89 | ```go 90 | type SnowflakeGenerator struct { 91 | machineID uint64 92 | sequence uint64 93 | lastTime int64 94 | } 95 | 96 | func NewSnowflake(machineID uint) *SnowflakeGenerator { 97 | return &SnowflakeGenerator{ 98 | machineID: uint64(machineID), 99 | } 100 | } 101 | 102 | func (s *SnowflakeGenerator) NextID() uint64 { 103 | // Implement Snowflake algorithm using s.machineID 104 | // Format: timestamp(41) + machineID(10) + sequence(12) 105 | // ... 106 | } 107 | 108 | func main() { 109 | // Get machine ID from raus 110 | r, _ := raus.New("redis://localhost:6379", 0, 1023) 111 | machineID, _, _ := r.Get(context.Background()) 112 | 113 | // Initialize Snowflake with allocated machine ID 114 | snowflake := NewSnowflake(machineID) 115 | 116 | // Generate unique IDs 117 | id1 := snowflake.NextID() 118 | id2 := snowflake.NextID() 119 | } 120 | ``` 121 | 122 | ## Configuration 123 | 124 | ### Redis URI Format 125 | 126 | raus supports flexible Redis URI formats for coordination: 127 | 128 | ```go 129 | // Standard Redis 130 | raus.New("redis://localhost:6379", 0, 1023) 131 | 132 | // Redis with authentication 133 | raus.New("redis://user:password@redis.example.com:6379", 0, 1023) 134 | 135 | // Redis with custom namespace (for multi-environment) 136 | raus.New("redis://localhost:6379?ns=production", 0, 1023) 137 | 138 | // Redis Cluster for high availability 139 | raus.New("rediscluster://cluster.example.com:6379", 0, 1023) 140 | 141 | // Redis Cluster with authentication and namespace 142 | raus.New("rediscluster://user:pass@cluster.example.com:6379?ns=prod", 0, 1023) 143 | ``` 144 | 145 | ### Machine ID Ranges 146 | 147 | Common Snowflake configurations: 148 | 149 | ```go 150 | // Standard 10-bit machine ID (Twitter Snowflake) 151 | raus.New(redisURI, 0, 1023) // 2^10 - 1 = 1023 152 | 153 | // 8-bit machine ID for smaller deployments 154 | raus.New(redisURI, 0, 255) // 2^8 - 1 = 255 155 | 156 | // 12-bit machine ID for larger deployments 157 | raus.New(redisURI, 0, 4095) // 2^12 - 1 = 4095 158 | 159 | // Custom range for specific deployment 160 | raus.New(redisURI, 100, 199) // Reserved range for specific service 161 | ``` 162 | 163 | ## How It Works 164 | 165 | The machine ID allocation process follows these steps: 166 | 167 | 1. **Discovery Phase**: Listen to Redis Pub/Sub channel to discover other instances' machine IDs 168 | 2. **Candidate Selection**: Choose from available machine IDs not used by other instances 169 | 3. **Atomic Assignment**: Use Redis `SET NX` to atomically claim a machine ID 170 | 4. **Heartbeat Broadcasting**: Continuously publish assigned machine ID to coordinate with other instances 171 | 5. **Graceful Release**: Release machine ID lock on service shutdown 172 | 173 | ## Architecture 174 | 175 | ``` 176 | ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐ 177 | │ Service A │ │ Service B │ │ Service C │ 178 | │ MachineID: 0 │ │ MachineID: 1 │ │ MachineID: 2 │ 179 | │ │ │ │ │ │ 180 | │ Snowflake IDs: │ │ Snowflake IDs: │ │ Snowflake IDs: │ 181 | │ 12345...000 │ │ 12345...001 │ │ 12345...002 │ 182 | │ 12345...000 │ │ 12345...001 │ │ 12345...002 │ 183 | └─────────┬───────┘ └─────────┬───────┘ └─────────┬───────┘ 184 | │ │ │ 185 | └──────────────────────┼──────────────────────┘ 186 | │ 187 | ┌────────▼────────┐ 188 | │ Redis │ 189 | │ │ 190 | │ • Machine ID │ 191 | │ Coordination │ 192 | │ • Pub/Sub │ 193 | │ • Atomic Locks │ 194 | └─────────────────┘ 195 | ``` 196 | 197 | ## Advanced Usage 198 | 199 | ### Startup Coordination in Container Environments 200 | 201 | ```go 202 | func initializeMachineID() uint { 203 | // Wait for Redis availability 204 | r, err := raus.New(os.Getenv("REDIS_URL"), 0, 1023) 205 | if err != nil { 206 | log.Fatal("Failed to connect to Redis:", err) 207 | } 208 | 209 | // Use longer timeout for container startup scenarios 210 | ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) 211 | defer cancel() 212 | 213 | machineID, errCh, err := r.Get(ctx) 214 | if err != nil { 215 | log.Fatal("Failed to allocate machine ID:", err) 216 | } 217 | 218 | // Background monitoring for Redis connectivity 219 | go func() { 220 | for err := range errCh { 221 | log.Printf("Machine ID coordination error: %v", err) 222 | // Consider alerting or graceful shutdown on persistent errors 223 | } 224 | }() 225 | 226 | return machineID 227 | } 228 | ``` 229 | 230 | ### Multi-Environment Support 231 | 232 | ```go 233 | // Development environment (smaller range) 234 | devRaus, _ := raus.New("redis://dev-redis:6379?ns=dev", 0, 63) 235 | 236 | // Staging environment 237 | stagingRaus, _ := raus.New("redis://staging-redis:6379?ns=staging", 64, 127) 238 | 239 | // Production environment (larger range) 240 | prodRaus, _ := raus.New("redis://prod-redis:6379?ns=prod", 128, 1023) 241 | ``` 242 | 243 | ### Configuration Tuning 244 | 245 | ```go 246 | // Adjust timing parameters before calling New() 247 | raus.MaxCandidate = 50 // More candidates for large deployments 248 | raus.LockExpires = 60 * time.Second // Longer locks for slow startups 249 | raus.SubscribeTimeout = 10 * time.Second // Longer discovery phase 250 | raus.CleanupTimeout = 30 * time.Second // Cleanup timeout 251 | ``` 252 | 253 | ## Use Cases 254 | 255 | ### Primary Use Cases (Snowflake Algorithm) 256 | - **Distributed ID Generation**: Machine ID allocation for Snowflake implementations 257 | - **Microservices ID Systems**: Unique identifier generation across services 258 | - **Event Sourcing**: Ordered ID generation with embedded machine identification 259 | - **Database Sharding**: Shard-aware ID generation with machine component 260 | 261 | ### Secondary Use Cases 262 | - **Load Balancer Configuration**: Backend server identification 263 | - **Container Orchestration**: Pod/container unique identification 264 | - **Multi-tenant Systems**: Tenant-specific ID generation coordination 265 | - **Distributed Caching**: Cache partition assignment 266 | 267 | ## Comparison with Alternatives 268 | 269 | ### When to Use raus 270 | 271 | **Choose raus when you need:** 272 | - Machine ID coordination for Snowflake algorithm 273 | - Guaranteed non-overlapping ID ranges across instances 274 | - Redis infrastructure already available 275 | - Startup-time ID allocation (not runtime generation) 276 | 277 | ### Alternative Approaches 278 | 279 | | Approach | Use Case | Pros | Cons | 280 | |----------|----------|------|------| 281 | | **raus** | Snowflake machine ID | Guaranteed uniqueness, Redis coordination | Redis dependency, startup complexity | 282 | | **Static Configuration** | Fixed deployments | Simple, no dependencies | Manual management, scaling issues | 283 | | **UUID v1 MAC** | MAC-based machine ID | Hardware-based uniqueness | Privacy concerns, virtual environments | 284 | | **IP-based** | Network-based ID | Automatic from infrastructure | Network changes break IDs | 285 | 286 | ## Testing 287 | 288 | ```bash 289 | # Run unit tests 290 | go test -v 291 | 292 | # Run integration tests with Redis Cluster 293 | docker compose up --exit-code-from app 294 | 295 | # Or use the cluster CI script 296 | ./cluster-ci.sh 297 | ``` 298 | 299 | ## Production Considerations 300 | 301 | ### High Availability 302 | - Use Redis Cluster or Redis Sentinel for Redis HA 303 | - Configure appropriate timeouts for your startup SLA 304 | - Monitor machine ID allocation success rates 305 | 306 | ### Monitoring 307 | - Track machine ID allocation time during startup 308 | - Monitor Redis connectivity from application instances 309 | - Alert on machine ID allocation failures 310 | 311 | ### Scaling 312 | - Plan machine ID ranges based on maximum expected instances 313 | - Consider namespace separation for different environments 314 | - Reserve ranges for different services or deployment patterns 315 | 316 | ## Compatibility 317 | 318 | - **Go**: 1.23+ 319 | - **Redis**: 7.x (recommended), 6.x supported 320 | - **Protocol**: RESP3 (default), RESP2 compatible 321 | - **Clustering**: Redis Cluster supported 322 | - **Snowflake**: Compatible with standard Snowflake algorithm implementations 323 | 324 | ## Contributing 325 | 326 | 1. Fork the repository 327 | 2. Create a feature branch 328 | 3. Add tests for your changes 329 | 4. Ensure all tests pass 330 | 5. Submit a pull request 331 | 332 | ## License 333 | 334 | MIT License 335 | 336 | Copyright (c) 2017 FUJIWARA Shunichiro 337 | 338 | Permission is hereby granted, free of charge, to any person obtaining a copy 339 | of this software and associated documentation files (the "Software"), to deal 340 | in the Software without restriction, including without limitation the rights 341 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 342 | copies of the Software, and to permit persons to whom the Software is 343 | furnished to do so, subject to the following conditions: 344 | 345 | The above copyright notice and this permission notice shall be included in all 346 | copies or substantial portions of the Software. 347 | 348 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 349 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 350 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 351 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 352 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 353 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 354 | SOFTWARE. --------------------------------------------------------------------------------