├── .gitignore ├── LICENSE ├── README.md ├── _examples └── main.go ├── consistent_hashing.go ├── consistent_hashing_benchmark_test.go ├── consistent_hashing_test.go ├── go.mod └── go.sum /.gitignore: -------------------------------------------------------------------------------- 1 | # If you prefer the allow list template instead of the deny list, see community template: 2 | # https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore 3 | # 4 | # Binaries for programs and plugins 5 | *.exe 6 | *.exe~ 7 | *.dll 8 | *.so 9 | *.dylib 10 | 11 | # Test binary, built with `go test -c` 12 | *.test 13 | 14 | # Output of the go coverage tool, specifically when used with LiteIDE 15 | *.out 16 | 17 | # Dependency directories (remove the comment below to include it) 18 | # vendor/ 19 | 20 | # Go workspace file 21 | go.work 22 | go.work.sum 23 | 24 | .idea/ -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Archishman Sengupta 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # Consistent-Hashing 4 | 5 | Consistent-Hashing is a Go library designed for distributed load balancing using consistent hashing, enhanced with bounded loads. It provides efficient key distribution across a set of hosts while ensuring that no single host becomes overloaded beyond a specified limit. 6 | 7 | ## Installation 8 | 9 | To install the package, execute the following command: 10 | 11 | ```bash 12 | go get github.com/ArchishmanSengupta/consistent-hashing 13 | ``` 14 | 15 | ## Usage 16 | 17 | Here's a straightforward example of how to utilize the `consistent_hashing` package: 18 | 19 | ```go 20 | package main 21 | 22 | import ( 23 | "context" 24 | "fmt" 25 | "log" 26 | 27 | "github.com/ArchishmanSengupta/consistent-hashing" 28 | ) 29 | 30 | func main() { 31 | // Create a new ConsistentHashing instance with default configuration 32 | ch, err := consistent_hashing.NewWithConfig(consistent_hashing.Config{}) 33 | if err != nil { 34 | log.Fatal(err) 35 | } 36 | 37 | // Add hosts to the consistent hash ring 38 | hosts := []string{"host1", "host2", "host3", "host4"} 39 | ctx := context.Background() 40 | for _, host := range hosts { 41 | err := ch.Add(ctx, host) 42 | if err != nil { 43 | log.Printf("Error adding host %s: %v", host, err) 44 | } 45 | } 46 | 47 | // Distribute some keys 48 | keys := []string{"key1", "key2", "key3", "key4", "key5"} 49 | for _, key := range keys { 50 | host, err := ch.GetLeast(ctx, key) 51 | if err != nil { 52 | log.Printf("Error getting host for key %s: %v", key, err) 53 | continue 54 | } 55 | fmt.Printf("Key %s assigned to host %s\n", key, host) 56 | 57 | // Increase the load for the assigned host 58 | err = ch.IncreaseLoad(ctx, host) 59 | if err != nil { 60 | log.Printf("Error increasing load for host %s: %v", host, err) 61 | } 62 | } 63 | 64 | // Print current loads 65 | loads := ch.GetLoads() 66 | fmt.Println("Current loads:") 67 | for host, load := range loads { 68 | fmt.Printf("%s: %d\n", host, load) 69 | } 70 | 71 | // Remove a host from the ring 72 | hostToRemove := "host2" 73 | err = ch.Remove(ctx, hostToRemove) 74 | if err != nil { 75 | log.Printf("Error removing host %s: %v", hostToRemove, err) 76 | } 77 | 78 | // Print updated list of hosts 79 | fmt.Println("Remaining hosts after removal:", ch.Hosts()) 80 | } 81 | ``` 82 | ## Output 83 | ``` 84 | Key key1 assigned to host host4 85 | Key key2 assigned to host host3 86 | Key key3 assigned to host host2 87 | Key key4 assigned to host host1 88 | Key key5 assigned to host host4 89 | Current loads: 90 | host2: 1 91 | host3: 1 92 | host4: 2 93 | host1: 1 94 | Remaining hosts after removal: [host1 host3 host4] 95 | ``` 96 | 97 | ## Features 98 | 99 | - **Consistent Hashing with Bounded Loads**: Distributes load evenly across hosts while limiting maximum host load. 100 | - **Customizable Configuration**: Adjust replication factor, load factor, and hash function to suit specific requirements. 101 | - **Thread-Safe Operations**: Ensures safe concurrent access for adding hosts, distributing keys, and managing loads. 102 | - **Efficient Key Distribution**: Uses consistent hashing principles for efficient key assignment and lookup. 103 | 104 | ## Configuration 105 | 106 | Customize consistent hashing behavior by providing a `Config` struct during instance creation: 107 | 108 | ```go 109 | cfg := consistent_hashing.Config{ 110 | ReplicationFactor: 20, // Number of virtual nodes per host 111 | LoadFactor: 1.25, // Maximum load factor before redistribution 112 | HashFunction: fnv.New64a, // Custom hash function (optional) 113 | } 114 | 115 | ch, err := consistent_hashing.NewWithConfig(cfg) 116 | ``` 117 | 118 | ## Benchmarking 119 | 120 | Use the following command to run benchmarks: 121 | 122 | ```bash 123 | go test -bench=. -benchmem 124 | ``` 125 | 126 | Example benchmark results: 127 | 128 | ``` 129 | goos: darwin 130 | goarch: arm64 131 | pkg: github.com/ArchishmanSengupta/consistent-hashing 132 | BenchmarkAdd-10 4626 19088168 ns/op 23748 B/op 895 allocs/op 133 | BenchmarkGet-10 6359968 186.5 ns/op 47 B/op 4 allocs/op 134 | BenchmarkGetLeast-10 180 6606643 ns/op 24 B/op 3 allocs/op 135 | BenchmarkIncreaseLoad-10 13727469 83.96 ns/op 13 B/op 1 allocs/op 136 | BenchmarkRemove-10 139 8671612 ns/op 17696 B/op 1306 allocs/op 137 | BenchmarkParallelOperations-10 884 1297025 ns/op 123 B/op 9 allocs/op 138 | PASS 139 | ok github.com/ArchishmanSengupta/consistent-hashing 160.723s 140 | ``` 141 | 142 | ## API Reference 143 | 144 | ### Methods 145 | 146 | - `NewWithConfig(cfg Config) (*ConsistentHashing, error)`: Creates a new instance of ConsistentHashing with specified configuration. 147 | - `Add(ctx context.Context, host string) error`: Adds a new host to the consistent hash ring. 148 | - `Get(ctx context.Context, key string) (string, error)`: Retrieves the host responsible for a given key. 149 | - `GetLeast(ctx context.Context, key string) (string, error)`: Retrieves the least loaded host for a given key. 150 | - `IncreaseLoad(ctx context.Context, host string) error`: Increases the load for a specified host. 151 | - `DecreaseLoad(ctx context.Context, host string) error`: Decreases the load for a specified host. 152 | - `GetLoads() map[string]int64`: Retrieves the current load for all hosts. 153 | - `Hosts() []string`: Retrieves the list of all hosts in the ring. 154 | - `Remove(ctx context.Context, host string) error`: Removes a host from the ring. 155 | 156 | ## Examples 157 | 158 | ### Adding and Removing Hosts 159 | 160 | ```go 161 | ch, _ := consistent_hashing.NewWithConfig(consistent_hashing.Config{}) 162 | ctx := context.Background() 163 | 164 | // Adding hosts 165 | ch.Add(ctx, "host1") 166 | ch.Add(ctx, "host2") 167 | ch.Add(ctx, "host3") 168 | 169 | // Removing a host 170 | err := ch.Remove(ctx, "host2") 171 | if err != nil { 172 | log.Printf("Error removing host: %v", err) 173 | } 174 | 175 | fmt.Println("Current hosts:", ch.Hosts()) 176 | ``` 177 | 178 | ### Distributing Keys with Load Balancing 179 | 180 | ```go 181 | ch, _ := consistent_hashing.NewWithConfig(consistent_hashing.Config{}) 182 | ctx := context.Background() 183 | 184 | // Add hosts 185 | for i := 1; i <= 5; i++ { 186 | ch.Add(ctx, fmt.Sprintf("host%d", i)) 187 | } 188 | 189 | // Distribute keys 190 | keys := []string{"user1", "user2", "user3", "user4", "user5", "user6", "user7", "user8", "user9", "user10"} 191 | for _, key := range keys { 192 | host, _ := ch.GetLeast(ctx, key) 193 | fmt.Printf("Key %s assigned to %s\n", key, host) 194 | ch.IncreaseLoad(ctx, host) 195 | } 196 | 197 | // Print final loads 198 | loads := ch.GetLoads() 199 | for host, load := range loads { 200 | fmt.Printf("%s load: %d\n", host, load) 201 | } 202 | ``` 203 | 204 | ## Contributing 205 | 206 | Contributions are welcome! Feel free to submit a Pull Request with your enhancements or bug fixes. 207 | 208 | --- 209 | 210 | This README now includes detailed instructions on adding, removing hosts, and showcases example usage scenarios for distributing keys and benchmarking performance. It should provide comprehensive guidance for users looking to integrate and leverage the Consistent-Hashing library effectively. -------------------------------------------------------------------------------- /_examples/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "hash" 7 | "log" 8 | 9 | ch "github.com/ArchishmanSengupta/consistent-hashing" 10 | "github.com/spaolacci/murmur3" 11 | ) 12 | 13 | // stand by for now 14 | func customMurmurHash() hash.Hash64 { 15 | return murmur3.New64() 16 | } 17 | 18 | // printLoads prints the current load of all hosts 19 | func printLoads(c *ch.ConsistentHashing) { 20 | fmt.Println("Current loads:") 21 | for host, load := range c.GetLoads() { 22 | fmt.Printf("Host: %s -> Load: %d\n", host, load) 23 | } 24 | } 25 | 26 | func main() { 27 | // create a context for managing request-scoped values, cancellation, and deadlines. 28 | // for controlling the lifecycle of a request. 29 | ctx := context.Background() 30 | 31 | // create a new ch instance with default config 32 | cfg := ch.Config{ 33 | ReplicationFactor: 3, 34 | LoadFactor: 1.5, 35 | HashFunction: customMurmurHash, 36 | } 37 | 38 | hashRing, err := ch.NewWithConfig(cfg) 39 | if err != nil { 40 | fmt.Printf("Error creating hash ring: %v\n", err) 41 | return 42 | } 43 | 44 | // add hosts to the hash ring 45 | hosts := []string{"127.0.0.1", "127.0.0.2", "127.0.0.3", "127.0.0.4", "127.0.0.5", "127.0.0.6"} 46 | for _, host := range hosts { 47 | err := hashRing.Add(ctx, host) 48 | if err != nil { 49 | log.Fatalf("Failed to add host %s: %v", host, err) 50 | } 51 | } 52 | 53 | // show current hosts 54 | fmt.Println("HOSTs added to the hash ring: ", hashRing.Hosts()) 55 | 56 | // add keys to the hash ring and display the mapped host 57 | users := []string{"striver", "arpitbhayani", "piyushgarg", "hkiratsingh", "archie", "sergeybin"} 58 | fmt.Println("User to Host Mapping: ") 59 | for _, user := range users { 60 | host, err := hashRing.Get(ctx, user) 61 | if err != nil { 62 | log.Fatalf("Failed to get host %s: %v", user, err) 63 | } 64 | fmt.Printf("User: %s -> Host: %s\n", user, host) 65 | } 66 | 67 | // Increment Load on a particular host 68 | fmt.Println("Incrementting load on 127.0.0.2") 69 | err = hashRing.IncreaseLoad(ctx, "127.0.0.2") 70 | if err != nil { 71 | log.Fatalf("Failed to increment load: %v", err) 72 | } 73 | printLoads(hashRing) 74 | 75 | // update load on a host and display loads 76 | fmt.Println("\nUpdate load on 127.0.0.2 to 4") 77 | err = hashRing.UpdateLoad(ctx, "127.0.0.2", 4) 78 | if err != nil { 79 | log.Fatalf("Failed to update load: %v", err) 80 | } 81 | printLoads(hashRing) 82 | 83 | // Decrease Load on a particular host 84 | fmt.Println("Decrease load on 127.0.0.2") 85 | err = hashRing.DecreaseLoad(ctx, "127.0.0.2") 86 | if err != nil { 87 | log.Fatalf("Failed to decrease load: %v", err) 88 | } 89 | printLoads(hashRing) 90 | 91 | // get least loaded host for a user 92 | fmt.Println("\nGetting least loaded host for user 'archie'") 93 | leastLoadedHost, err := hashRing.GetLeast(ctx, "archie") 94 | if err != nil { 95 | log.Fatalf("Failed to get least loaded host for user 'archie': %v", err) 96 | } 97 | fmt.Printf("Least loaded host for user 'archie': %s\n", leastLoadedHost) 98 | 99 | // remove a host and display hosts and loads 100 | fmt.Println("\nRemoving 127.0.0.4") 101 | err = hashRing.Remove(ctx, "127.0.0.4") 102 | if err != nil { 103 | log.Fatalf("Failed to remove host: %v", err) 104 | } 105 | printLoads(hashRing) 106 | } 107 | -------------------------------------------------------------------------------- /consistent_hashing.go: -------------------------------------------------------------------------------- 1 | package consistent_hashing 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "hash" 8 | "hash/fnv" 9 | "log" 10 | "math" 11 | "sort" 12 | "sync" 13 | "sync/atomic" 14 | ) 15 | 16 | // Custom errors 17 | var ( 18 | ErrNoHost = errors.New("no host added") 19 | ErrHostNotFound = errors.New("host not found") 20 | ) 21 | 22 | // Consistent Hashing config parameters 23 | type Config struct { 24 | ReplicationFactor int // no of virtual_nodes per host 25 | LoadFactor float64 // max load factor before redistribution 26 | HashFunction func() hash.Hash64 // for the time being lets keep the hash function simple 27 | } 28 | 29 | // Host is a physical node in the CH hashing ring 30 | type Host struct { 31 | Name string // HostName or identifier 32 | Load int64 // current load on the host 33 | } 34 | 35 | // CH with bounded loads 36 | type ConsistentHashing struct { 37 | config Config 38 | hosts sync.Map // Map of hash value to host 39 | sortedSet []uint64 // sorted slice of hash values 40 | loadMap sync.Map // map of host to Host struct 41 | totalLoad int64 // total load across all hosts 42 | hostList []string // list of all hosts ['uat-server.something.com', 'be-server.something.com'] 43 | mu sync.RWMutex // Mutex for synchronizing access 44 | } 45 | 46 | // New CH instance 47 | func NewWithConfig(cfg Config) (*ConsistentHashing, error) { 48 | if cfg.ReplicationFactor <= 0 { 49 | cfg.ReplicationFactor = 10 50 | } 51 | 52 | if cfg.LoadFactor <= 1 { 53 | cfg.LoadFactor = 1.25 54 | } 55 | 56 | if cfg.HashFunction == nil { 57 | cfg.HashFunction = fnv.New64a 58 | } 59 | 60 | return &ConsistentHashing{ 61 | config: cfg, 62 | sortedSet: make([]uint64, 0), 63 | }, nil 64 | } 65 | 66 | // Add adds a new host to the consistent hashing ring, including its virtual nodes, 67 | // and updates the internal data structures accordingly. It returns an error if the operation fails. 68 | func (c *ConsistentHashing) Add(ctx context.Context, host string) error { 69 | // Acquire a lock to ensure thread safety during the update. 70 | c.mu.Lock() 71 | defer c.mu.Unlock() 72 | 73 | // Check if the host already exists in the loadMap. 74 | if _, ok := c.loadMap.Load(host); ok { 75 | return nil // Host already exists, no further action needed. 76 | } 77 | 78 | // Add the new host with an initial load of 0. 79 | c.loadMap.Store(host, &Host{Name: host, Load: 0}) 80 | c.hostList = append(c.hostList, host) 81 | 82 | // Add virtual nodes for the host based on the replication factor. 83 | for i := 0; i < c.config.ReplicationFactor; i++ { 84 | // Generate a hash value for the virtual node. 85 | h, err := c.Hash(fmt.Sprintf("%s%d", host, i)) 86 | if err != nil { 87 | log.Fatal("key hashing failed", err) // Log fatal error and exit if hashing fails. 88 | } 89 | // Store the virtual node hash and map it to the host. 90 | c.hosts.Store(h, host) 91 | // Append the virtual node hash to the sorted set. 92 | c.sortedSet = append(c.sortedSet, h) 93 | } 94 | 95 | // Sort the hash values in the sorted set. 96 | // This allows efficient key lookups using binary search. 97 | sort.Slice(c.sortedSet, func(i, j int) bool { return c.sortedSet[i] < c.sortedSet[j] }) 98 | 99 | // Return nil to indicate the host was added successfully. 100 | return nil 101 | } 102 | 103 | // Get retrieves the host that should handle the given key in the consistent hashing ring. 104 | // It returns the host name and nil error if successful. If no hosts are added, it returns ErrNoHost. 105 | // If there's an error generating the hash value or searching for it, it returns an appropriate error. 106 | // If the host associated with the hash value is not found, it returns ErrHostNotFound. 107 | func (c *ConsistentHashing) Get(ctx context.Context, key string) (string, error) { 108 | // Acquire a read lock to ensure thread safety during read operations. 109 | c.mu.RLock() 110 | defer c.mu.RUnlock() 111 | 112 | // Return error if no hosts are added 113 | if len(c.hostList) == 0 { 114 | return "", ErrNoHost 115 | } 116 | 117 | // Generate hash value for the given key using the configured hash function. 118 | h, err := c.Hash(key) 119 | if err != nil { 120 | return "", err 121 | } 122 | 123 | // Find the closest index in the sorted set for the generated hash value. 124 | index, err := c.Search(h) 125 | if err != nil { 126 | return "", err 127 | } 128 | 129 | // Retrieve the host associated with the hash value from the hosts map. 130 | if host, ok := c.hosts.Load(c.sortedSet[index]); ok { 131 | return host.(string), nil 132 | } 133 | 134 | // Return an error if the host associated with the hash value is not found. 135 | return "", ErrHostNotFound 136 | } 137 | 138 | // GetLeast retrieves the host that should handle the given key in the consistent hashing ring 139 | // with the least current load. It returns the host name and nil error if successful. 140 | // If no hosts are added, it returns ErrNoHost. If there's an error generating the hash value 141 | // or searching for it, it returns an appropriate error. If no host with acceptable load is found, 142 | // it falls back to returning the initially found host. If no suitable host is found at all, 143 | // it returns ErrHostNotFound. 144 | // Bounded Loads: Research Paper: https://research.googleblog.com/2017/04/consistent-hashing-with-bounded-loads.html 145 | func (c *ConsistentHashing) GetLeast(ctx context.Context, key string) (string, error) { 146 | // Acquire a read lock to ensure thread safety during read operations. 147 | c.mu.RLock() 148 | defer c.mu.RUnlock() 149 | 150 | // Return error if no hosts are added 151 | if len(c.hostList) == 0 { 152 | return "", ErrNoHost 153 | } 154 | 155 | // Generate hash value for the given key using the configured hash function. 156 | h, err := c.Hash(key) 157 | if err != nil { 158 | return "", err 159 | } 160 | 161 | // Find the closest index in the sorted set for the generated hash value. 162 | index, err := c.Search(h) 163 | if err != nil { 164 | return "", err 165 | } 166 | 167 | // Initialize variables to track the host with the least load. 168 | var leastLoadedHost string 169 | var minLoad int64 = math.MaxInt64 170 | 171 | // Iterate through the sorted set to find the host with the least load. 172 | for i := 0; i < len(c.sortedSet); i++ { 173 | nextIndex := (index + i) % len(c.sortedSet) 174 | if host, ok := c.hosts.Load(c.sortedSet[nextIndex]); ok { 175 | // Check if the host's load is acceptable. 176 | if c.LoadOk(host.(string)) { 177 | // Retrieve the load for the host. 178 | if h, ok := c.loadMap.Load(host.(string)); ok { 179 | load := h.(*Host).Load 180 | // Update the least loaded host if found. 181 | if load < minLoad { 182 | minLoad = load 183 | leastLoadedHost = host.(string) 184 | } 185 | } 186 | } 187 | } 188 | } 189 | 190 | // If no suitable host with acceptable load is found, return the initially found host. 191 | if leastLoadedHost == "" { 192 | if host, ok := c.hosts.Load(c.sortedSet[index]); ok { 193 | return host.(string), nil 194 | } 195 | } 196 | 197 | // Return an error if no suitable host is found. 198 | if leastLoadedHost == "" { 199 | return "", ErrHostNotFound 200 | } 201 | 202 | return leastLoadedHost, nil 203 | } 204 | 205 | // IncreaseLoad increments the load for a specific host. 206 | func (c *ConsistentHashing) IncreaseLoad(ctx context.Context, host string) error { 207 | // Check if the host exists in the loadMap. 208 | if h, ok := c.loadMap.Load(host); ok { 209 | // Retrieve the host data from the loaded value. 210 | hostData := h.(*Host) 211 | 212 | // Atomically increment the load for the host by 1. 213 | atomic.AddInt64(&hostData.Load, 1) 214 | 215 | // Atomically increment the total load across all hosts by 1. 216 | atomic.AddInt64(&c.totalLoad, 1) 217 | 218 | // Return nil to indicate successful load increment. 219 | return nil 220 | } 221 | 222 | // Return an error indicating the host was not found. 223 | return ErrHostNotFound 224 | } 225 | 226 | // DecreaseLoad decreases the Load for a specific host. 227 | func (c *ConsistentHashing) DecreaseLoad(ctx context.Context, host string) error { 228 | // Check if the host exists in the loadMap. 229 | if h, ok := c.loadMap.Load(host); ok { 230 | // Retrieve the host data from the loaded value. 231 | hostData := h.(*Host) 232 | 233 | // Atomically decrement the Load for the host by 1. 234 | atomic.AddInt64(&hostData.Load, -1) 235 | 236 | // Atomically decrement the total load across all hosts by 1. 237 | atomic.AddInt64(&c.totalLoad, -1) 238 | 239 | // Return nil to indicate successful load decrement. 240 | return nil 241 | } 242 | 243 | // Return an error indicating the host was not found. 244 | return ErrHostNotFound 245 | } 246 | 247 | // UpdateLoad updates the load for a specific host 248 | func (c *ConsistentHashing) UpdateLoad(ctx context.Context, host string, load int64) error { 249 | // Check if the host exists in the load map 250 | if h, ok := c.loadMap.Load(host); ok { 251 | // Type assert the retrieved value to *Host 252 | hostData := h.(*Host) 253 | 254 | // Update the total load atomically 255 | atomic.AddInt64(&c.totalLoad, -hostData.Load+load) 256 | 257 | // Store the new load value for the host atomically 258 | atomic.StoreInt64(&hostData.Load, load) 259 | 260 | // Successfully updated the load, return nil error 261 | return nil 262 | } 263 | 264 | // If the host is not found, return an error 265 | return ErrHostNotFound 266 | } 267 | 268 | // Remove removes a host from the hash ring 269 | func (c *ConsistentHashing) Remove(ctx context.Context, host string) error { 270 | // Acquire the mutex lock to ensure thread-safety 271 | c.mu.Lock() 272 | // Ensure the mutex is unlocked at the end of the function 273 | defer c.mu.Unlock() 274 | 275 | // Check if the host exists in the load map 276 | if _, ok := c.loadMap.Load(host); !ok { 277 | // If the host is not found, return an error 278 | return ErrHostNotFound 279 | } 280 | 281 | // Remove the virtual nodes associated with the host 282 | for i := 0; i < c.config.ReplicationFactor; i++ { 283 | // Generate a hash for the virtual node 284 | h, err := c.Hash(fmt.Sprintf("%s%d", host, i)) 285 | if err != nil { 286 | // Log an error and exit if hashing fails 287 | log.Fatal("key hashing failed", err) 288 | } 289 | // Delete the virtual node from the hosts map 290 | c.hosts.Delete(h) 291 | // Remove the virtual node from the sorted set 292 | c.removeFromSortedSet(h) 293 | } 294 | // Delete the host from the load map 295 | c.loadMap.Delete(host) 296 | 297 | // Remove the host from the host list 298 | for i, h := range c.hostList { 299 | if h == host { 300 | // Remove the host from the list by creating a new slice without the host 301 | c.hostList = append(c.hostList[:i], c.hostList[i+1:]...) 302 | break 303 | } 304 | } 305 | // Return nil indicating successful removal 306 | return nil 307 | } 308 | 309 | // --------------------------------- Helper Functions --------------------------------- 310 | 311 | // hash generates a 64-bit hash value for a given key using the configured hash function. 312 | // It returns the computed hash value and an error, if any occurred during the hashing process. 313 | func (c *ConsistentHashing) Hash(key string) (uint64, error) { 314 | // Create a new hash object using the configured hash function. 315 | h := c.config.HashFunction() 316 | 317 | // Write the key to the hash object. If an error occurs, panic. 318 | if _, err := h.Write([]byte(key)); err != nil { 319 | panic(err) 320 | } 321 | 322 | // Compute and return the hash value as a 64-bit unsigned integer. 323 | return uint64(h.Sum64()), nil 324 | } 325 | 326 | // Search finds the closest index in the sorted set where the given hash key should be placed. 327 | // It uses binary search to efficiently locate the index. 328 | // For example, if c.sortedSet = [10, 20, 30, 40, 50] and key = 25, 329 | // sort.Search determines that key should be inserted after 20 and before 30, returning index 2. 330 | // The modulo operation (index % len(c.sortedSet)) ensures correct placement within the ring structure 331 | func (c *ConsistentHashing) Search(key uint64) (int, error) { 332 | // Perform a binary search on the sorted set to find the index where key should be inserted. 333 | index := sort.Search(len(c.sortedSet), func(i int) bool { 334 | return c.sortedSet[i] >= key 335 | }) 336 | 337 | // Wrap around the index using modulo operation to ensure it stays within bounds. 338 | // This is necessary for consistent hashing to handle the circular nature of the ring. 339 | index = index % len(c.sortedSet) 340 | 341 | // Return the calculated index where the key should be placed. 342 | return index, nil 343 | } 344 | 345 | // LoadOk checks if the host's current load is below the maximum allowed load. 346 | // It returns true if the host's load is acceptable, otherwise false. 347 | func (c *ConsistentHashing) LoadOk(host string) bool { 348 | // Retrieve the host's load data from the loadMap. 349 | if h, ok := c.loadMap.Load(host); ok { 350 | hostData := h.(*Host) 351 | // Compare the host's current load with the maximum allowed load. 352 | return hostData.Load < c.MaxLoad() 353 | } 354 | // Return false if host data is not found. 355 | return false 356 | } 357 | 358 | // MaxLoad calculates and returns the maximum allowed load per host based on the current 359 | // total load across all hosts and the configured load factor. 360 | func (c *ConsistentHashing) MaxLoad() int64 { 361 | // Retrieve the current total load across all hosts. 362 | totalLoad := atomic.LoadInt64(&c.totalLoad) 363 | 364 | // Ensure totalLoad is at least 1 to avoid division by zero. 365 | if totalLoad == 0 { 366 | totalLoad = 1 367 | } 368 | 369 | // Calculate the average load per host. 370 | avgLoadPerNode := float64(totalLoad) / float64(len(c.hostList)) 371 | 372 | // Ensure avgLoadPerNode is at least 1 to avoid division by zero. 373 | if avgLoadPerNode == 0 { 374 | avgLoadPerNode = 1 375 | } 376 | 377 | // Calculate and return the maximum allowed load per host based on the load factor. 378 | return int64(math.Ceil(avgLoadPerNode * c.config.LoadFactor)) 379 | } 380 | 381 | // GetLoads returns the current load for all hosts 382 | func (c *ConsistentHashing) GetLoads() map[string]int64 { 383 | loads := make(map[string]int64) 384 | c.loadMap.Range(func(key, value interface{}) bool { 385 | loads[key.(string)] = value.(*Host).Load 386 | return true 387 | }) 388 | return loads 389 | } 390 | 391 | func (c *ConsistentHashing) removeFromSortedSet(val uint64) { 392 | // Use binary search to find the index of the value 393 | index := sort.Search(len(c.sortedSet), func(i int) bool { 394 | return c.sortedSet[i] >= val 395 | }) 396 | 397 | // If the value is found, remove it 398 | if index < len(c.sortedSet) && c.sortedSet[index] == val { 399 | // Remove the element by slicing 400 | c.sortedSet = append(c.sortedSet[:index], c.sortedSet[index+1:]...) 401 | } 402 | } 403 | 404 | // Hosts returns the list of current hosts 405 | func (c *ConsistentHashing) Hosts() []string { 406 | c.mu.RLock() 407 | defer c.mu.RUnlock() 408 | return append([]string(nil), c.hostList...) 409 | } 410 | -------------------------------------------------------------------------------- /consistent_hashing_benchmark_test.go: -------------------------------------------------------------------------------- 1 | package consistent_hashing 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "hash/fnv" 7 | "testing" 8 | ) 9 | 10 | func BenchmarkAdd(b *testing.B) { 11 | ch, _ := NewWithConfig(Config{ReplicationFactor: 100, LoadFactor: 1.25, HashFunction: fnv.New64a}) 12 | ctx := context.Background() 13 | 14 | b.ResetTimer() 15 | for i := 0; i < b.N; i++ { 16 | host := fmt.Sprintf("host-%d", i) 17 | _ = ch.Add(ctx, host) 18 | } 19 | } 20 | 21 | func BenchmarkGet(b *testing.B) { 22 | ch, _ := NewWithConfig(Config{ReplicationFactor: 100, LoadFactor: 1.25, HashFunction: fnv.New64a}) 23 | ctx := context.Background() 24 | 25 | // Add some hosts 26 | for i := 0; i < 1000; i++ { 27 | host := fmt.Sprintf("host-%d", i) 28 | _ = ch.Add(ctx, host) 29 | } 30 | 31 | b.ResetTimer() 32 | for i := 0; i < b.N; i++ { 33 | key := fmt.Sprintf("key-%d", i) 34 | _, _ = ch.Get(ctx, key) 35 | } 36 | } 37 | 38 | func BenchmarkGetLeast(b *testing.B) { 39 | ch, _ := NewWithConfig(Config{ReplicationFactor: 100, LoadFactor: 1.25, HashFunction: fnv.New64a}) 40 | ctx := context.Background() 41 | 42 | // Add some hosts 43 | for i := 0; i < 1000; i++ { 44 | host := fmt.Sprintf("host-%d", i) 45 | _ = ch.Add(ctx, host) 46 | } 47 | 48 | b.ResetTimer() 49 | for i := 0; i < b.N; i++ { 50 | key := fmt.Sprintf("key-%d", i) 51 | _, _ = ch.GetLeast(ctx, key) 52 | } 53 | } 54 | 55 | func BenchmarkIncreaseLoad(b *testing.B) { 56 | ch, _ := NewWithConfig(Config{ReplicationFactor: 100, LoadFactor: 1.25, HashFunction: fnv.New64a}) 57 | ctx := context.Background() 58 | 59 | // Add some hosts 60 | for i := 0; i < 1000; i++ { 61 | host := fmt.Sprintf("host-%d", i) 62 | _ = ch.Add(ctx, host) 63 | } 64 | 65 | b.ResetTimer() 66 | for i := 0; i < b.N; i++ { 67 | host := fmt.Sprintf("host-%d", i%1000) 68 | _ = ch.IncreaseLoad(ctx, host) 69 | } 70 | } 71 | 72 | func BenchmarkRemove(b *testing.B) { 73 | ch, _ := NewWithConfig(Config{ReplicationFactor: 100, LoadFactor: 1.25, HashFunction: fnv.New64a}) 74 | ctx := context.Background() 75 | 76 | // Add a fixed number of hosts 77 | numHosts := 1000 78 | for i := 0; i < numHosts; i++ { 79 | host := fmt.Sprintf("host-%d", i) 80 | _ = ch.Add(ctx, host) 81 | } 82 | 83 | b.ResetTimer() 84 | for i := 0; i < b.N; i++ { 85 | host := fmt.Sprintf("host-%d", i%numHosts) 86 | _ = ch.Remove(ctx, host) 87 | // Add the host back to maintain the number of hosts 88 | _ = ch.Add(ctx, host) 89 | } 90 | } 91 | 92 | func BenchmarkParallelOperations(b *testing.B) { 93 | ch, _ := NewWithConfig(Config{ReplicationFactor: 100, LoadFactor: 1.25, HashFunction: fnv.New64a}) 94 | ctx := context.Background() 95 | 96 | // Add initial hosts 97 | for i := 0; i < 1000; i++ { 98 | host := fmt.Sprintf("host-%d", i) 99 | _ = ch.Add(ctx, host) 100 | } 101 | 102 | b.ResetTimer() 103 | b.RunParallel(func(pb *testing.PB) { 104 | i := 0 105 | for pb.Next() { 106 | switch i % 5 { 107 | case 0: 108 | host := fmt.Sprintf("host-%d", i) 109 | _ = ch.Add(ctx, host) 110 | case 1: 111 | key := fmt.Sprintf("key-%d", i) 112 | _, _ = ch.Get(ctx, key) 113 | case 2: 114 | key := fmt.Sprintf("key-%d", i) 115 | _, _ = ch.GetLeast(ctx, key) 116 | case 3: 117 | host := fmt.Sprintf("host-%d", i%1000) 118 | _ = ch.IncreaseLoad(ctx, host) 119 | case 4: 120 | host := fmt.Sprintf("host-%d", i%1000) 121 | _ = ch.Remove(ctx, host) 122 | } 123 | i++ 124 | } 125 | }) 126 | } 127 | -------------------------------------------------------------------------------- /consistent_hashing_test.go: -------------------------------------------------------------------------------- 1 | package consistent_hashing 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "hash/fnv" 7 | "math" 8 | "sync" 9 | "testing" 10 | ) 11 | 12 | func TestNewWithConfig(t *testing.T) { 13 | cfg := Config{ 14 | ReplicationFactor: 10, 15 | LoadFactor: 1.25, 16 | HashFunction: fnv.New64a, 17 | } 18 | ch, err := NewWithConfig(cfg) 19 | if err != nil { 20 | t.Errorf("NewWithConfig failed: %v", err) 21 | } 22 | if ch.config.ReplicationFactor != 10 { 23 | t.Errorf("Expected ReplicationFactor 10, got %d", ch.config.ReplicationFactor) 24 | } 25 | if ch.config.LoadFactor != 1.25 { 26 | t.Errorf("Expected LoadFactor 1.25, got %f", ch.config.LoadFactor) 27 | } 28 | } 29 | 30 | func TestAdd(t *testing.T) { 31 | ch, _ := NewWithConfig(Config{ReplicationFactor: 3, LoadFactor: 1.25, HashFunction: fnv.New64a}) 32 | ctx := context.Background() 33 | err := ch.Add(ctx, "host1") 34 | if err != nil { 35 | t.Errorf("Add failed: %v", err) 36 | } 37 | if len(ch.hostList) != 1 { 38 | t.Errorf("Expected 1 host, got %d", len(ch.hostList)) 39 | } 40 | if len(ch.sortedSet) != 3 { 41 | t.Errorf("Expected 3 virtual nodes, got %d", len(ch.sortedSet)) 42 | } 43 | } 44 | 45 | func TestGet(t *testing.T) { 46 | ch, _ := NewWithConfig(Config{ReplicationFactor: 3, LoadFactor: 1.25, HashFunction: fnv.New64a}) 47 | ctx := context.Background() 48 | ch.Add(ctx, "host1") 49 | ch.Add(ctx, "host2") 50 | host, err := ch.Get(ctx, "key1") 51 | if err != nil { 52 | t.Errorf("Get failed: %v", err) 53 | } 54 | if host != "host1" && host != "host2" { 55 | t.Errorf("Expected host1 or host2, got %s", host) 56 | } 57 | } 58 | 59 | func TestGetLeast(t *testing.T) { 60 | ch, _ := NewWithConfig(Config{ReplicationFactor: 3, LoadFactor: 1.25, HashFunction: fnv.New64a}) 61 | ctx := context.Background() 62 | ch.Add(ctx, "host1") 63 | ch.Add(ctx, "host2") 64 | ch.IncreaseLoad(ctx, "host1") 65 | host, err := ch.GetLeast(ctx, "key1") 66 | if err != nil { 67 | t.Errorf("GetLeast failed: %v", err) 68 | } 69 | if host != "host2" { 70 | t.Errorf("Expected host2, got %s", host) 71 | } 72 | } 73 | 74 | func TestIncreaseLoad(t *testing.T) { 75 | ch, _ := NewWithConfig(Config{ReplicationFactor: 3, LoadFactor: 1.25, HashFunction: fnv.New64a}) 76 | ctx := context.Background() 77 | ch.Add(ctx, "host1") 78 | err := ch.IncreaseLoad(ctx, "host1") 79 | if err != nil { 80 | t.Errorf("IncreaseLoad failed: %v", err) 81 | } 82 | loads := ch.GetLoads() 83 | if loads["host1"] != 1 { 84 | t.Errorf("Expected load 1, got %d", loads["host1"]) 85 | } 86 | } 87 | 88 | func TestDecreaseLoad(t *testing.T) { 89 | ch, _ := NewWithConfig(Config{ReplicationFactor: 3, LoadFactor: 1.25, HashFunction: fnv.New64a}) 90 | ctx := context.Background() 91 | ch.Add(ctx, "host1") 92 | ch.IncreaseLoad(ctx, "host1") 93 | ch.DecreaseLoad(ctx, "host1") 94 | loads := ch.GetLoads() 95 | if loads["host1"] != 0 { 96 | t.Errorf("Expected load 0, got %d", loads["host1"]) 97 | } 98 | } 99 | 100 | func TestUpdateLoad(t *testing.T) { 101 | ch, _ := NewWithConfig(Config{ReplicationFactor: 3, LoadFactor: 1.25, HashFunction: fnv.New64a}) 102 | ctx := context.Background() 103 | ch.Add(ctx, "host1") 104 | err := ch.UpdateLoad(ctx, "host1", 5) 105 | if err != nil { 106 | t.Errorf("UpdateLoad failed: %v", err) 107 | } 108 | loads := ch.GetLoads() 109 | if loads["host1"] != 5 { 110 | t.Errorf("Expected load 5, got %d", loads["host1"]) 111 | } 112 | } 113 | 114 | func TestRemove(t *testing.T) { 115 | ch, _ := NewWithConfig(Config{ReplicationFactor: 3, LoadFactor: 1.25, HashFunction: fnv.New64a}) 116 | ctx := context.Background() 117 | ch.Add(ctx, "host1") 118 | ch.Add(ctx, "host2") 119 | err := ch.Remove(ctx, "host1") 120 | if err != nil { 121 | t.Errorf("Remove failed: %v", err) 122 | } 123 | if len(ch.hostList) != 1 { 124 | t.Errorf("Expected 1 host, got %d", len(ch.hostList)) 125 | } 126 | if ch.hostList[0] != "host2" { 127 | t.Errorf("Expected host2, got %s", ch.hostList[0]) 128 | } 129 | } 130 | 131 | func TestConcurrency(t *testing.T) { 132 | ch, _ := NewWithConfig(Config{ReplicationFactor: 3, LoadFactor: 1.25, HashFunction: fnv.New64a}) 133 | ctx := context.Background() 134 | var wg sync.WaitGroup 135 | for i := 0; i < 1000; i++ { 136 | wg.Add(1) 137 | go func(i int) { 138 | defer wg.Done() 139 | host := fmt.Sprintf("host%d", i) 140 | ch.Add(ctx, host) 141 | ch.Get(ctx, host) 142 | ch.GetLeast(ctx, host) 143 | ch.IncreaseLoad(ctx, host) 144 | ch.DecreaseLoad(ctx, host) 145 | ch.UpdateLoad(ctx, host, int64(i)) 146 | if i%2 == 0 { 147 | ch.Remove(ctx, host) 148 | } 149 | }(i) 150 | } 151 | wg.Wait() 152 | } 153 | 154 | func TestLoadBalancing(t *testing.T) { 155 | ch, _ := NewWithConfig(Config{ReplicationFactor: 100, LoadFactor: 1.25, HashFunction: fnv.New64a}) 156 | ctx := context.Background() 157 | hosts := []string{"host1", "host2", "host3", "host4", "host5"} 158 | 159 | // Add hosts to the consistent hashing instance 160 | for _, host := range hosts { 161 | if err := ch.Add(ctx, host); err != nil { 162 | t.Fatalf("Error adding host %s: %v", host, err) 163 | } 164 | } 165 | 166 | keyCount := 10000 167 | hostCounts := make(map[string]int) 168 | 169 | // Generate keys and assign them to hosts 170 | for i := 0; i < keyCount; i++ { 171 | key := fmt.Sprintf("key%d", i) 172 | host, err := ch.GetLeast(ctx, key) 173 | if err != nil { 174 | t.Fatalf("Error getting host for key %s: %v", key, err) 175 | } 176 | hostCounts[host]++ 177 | if err := ch.IncreaseLoad(ctx, host); err != nil { 178 | t.Fatalf("Error increasing load for host %s: %v", host, err) 179 | } 180 | } 181 | 182 | // Check if the load is reasonably balanced 183 | expectedCount := keyCount / len(hosts) 184 | tolerance := float64(expectedCount) * 0.1 // 10% tolerance 185 | 186 | for host, count := range hostCounts { 187 | if math.Abs(float64(count-expectedCount)) > tolerance { 188 | t.Errorf("Load for %s is not balanced. Expected around %d, got %d", host, expectedCount, count) 189 | } 190 | } 191 | } 192 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/ArchishmanSengupta/consistent-hashing 2 | 3 | go 1.20 4 | 5 | require github.com/spaolacci/murmur3 v1.1.0 6 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= 2 | github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= 3 | --------------------------------------------------------------------------------