├── .gitignore ├── .idea └── modules.xml ├── LICENSE ├── README.md ├── byteview.go ├── cache.go ├── client.go ├── consistenthash ├── con_hash.go └── config.go ├── example └── test.go ├── go.mod ├── go.sum ├── group.go ├── pb ├── kama.pb.go ├── kama.proto └── kama_grpc.pb.go ├── peers.go ├── registry └── register.go ├── server.go ├── singleflight └── singleflight.go ├── store ├── lru.go ├── lru2.go ├── lru2_test.go └── store.go └── utils.go /.gitignore: -------------------------------------------------------------------------------- 1 | .idea/ 2 | .vscode/ -------------------------------------------------------------------------------- /.idea/modules.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 程序员Carl 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 【代码随想录知识星球】项目分享-缓存系统(Go) 2 | 3 | # KamaCache 4 | 5 | ## 核心特性 6 | 7 | ### 1. 分布式架构 8 | - 基于 etcd 的服务注册与发现 9 | - 一致性哈希实现负载均衡 10 | - 节点自动发现和同步 11 | - 支持动态扩缩容 12 | 13 | ### 2. 缓存功能 14 | - 支持 LRU 缓存策略 15 | - 可配置过期时间 16 | - 支持批量操作 17 | - 防止缓存击穿 18 | - 支持按组划分缓存空间 19 | 20 | ### 3. 高性能设计 21 | - 并发安全 22 | - 异步数据同步 23 | - 单飞机制避免缓存击穿 24 | - gRPC 通信 25 | 26 | ## 快速开始 27 | 28 | ### 1. 安装 29 | ```bash 30 | go get github.com/youngyangyang04/KamaCache-Go 31 | ``` 32 | 33 | ### 2. 启动 etcd 34 | ```bash 35 | # 使用 Docker 启动 etcd 36 | docker run -d --name etcd \ 37 | -p 2379:2379 \ 38 | quay.io/coreos/etcd:v3.5.0 \ 39 | etcd --advertise-client-urls http://0.0.0.0:2379 \ 40 | --listen-client-urls http://0.0.0.0:2379 41 | ``` 42 | 43 | ### 3. 运行实例 44 | 45 | 详情见测试 demo:[example/test.go](example/test.go) 46 | 47 | 48 | ### 4. 多节点部署 49 | ```bash 50 | # 启动节点 A 51 | go run example/test.go -port 8001 -node A 52 | 53 | # 启动节点 B 54 | go run example/test.go -port 8002 -node B 55 | 56 | # 启动节点 C 57 | go run example/test.go -port 8003 -node C 58 | ``` 59 | 60 | ### 5. 测试结果 61 | 62 | A 进程: 63 | 64 | ```bash 65 | go run example/test.go -port 8001 -node A 66 | 2025/04/08 10:16:36 [节点A] 启动,地址: :8001 67 | INFO[0000] Created cache group [test] with cacheBytes=2097152, expiration=0s 68 | INFO[0000] [KamaCache] registered peers for group [test] 69 | 2025/04/08 10:16:36 [节点A] 等待节点注册... 70 | 2025/04/08 10:16:36 [节点A] 开始启动服务... 71 | INFO[0000] Server starting at :8001 72 | INFO[0000] Service registered: kama-cache at 172.22.152.216:8001 73 | INFO[0000] Successfully created client for 172.22.152.216:8001 74 | INFO[0000] New service discovered at 172.22.152.216:8001 75 | INFO[0002] Successfully created client for 172.22.152.216:8002 76 | INFO[0002] New service discovered at 172.22.152.216:8002 77 | 78 | === 节点A:设置本地数据 === 79 | INFO[0005] Cache initialized with type lru2, max bytes: 2097152 80 | 节点A: 设置键 key_A 成功 81 | 2025/04/08 10:16:41 [节点A] 等待其他节点准备就绪... 82 | INFO[0005] grpc set request resp: value:"这是节点A的数据" 83 | INFO[0006] Successfully created client for 172.22.152.216:8003 84 | INFO[0006] New service discovered at 172.22.152.216:8003 85 | 2025/04/08 10:17:11 当前已发现的节点: 86 | 2025/04/08 10:17:11 - 172.22.152.216:8002 87 | 2025/04/08 10:17:11 - 172.22.152.216:8003 88 | 2025/04/08 10:17:11 - 172.22.152.216:8001 89 | 90 | === 节点A:获取本地数据 === 91 | 直接查询本地缓存... 92 | 缓存统计: map[cache_closed:false cache_hit_rate:0 cache_hits:0 cache_initialized:true cache_misses:0 cache_size:2 closed:false expiration:0s loader_errors:0 loader_hits:0 loads:0 local_hits:0 local_misses:0 name:test peets:0 peer_misses:0] 93 | 项目有效,将其移至二级缓存 94 | 节点A: 获取本地键 key_A 成功: 这是节点A的数据 95 | 96 | === 节点A:尝试获取远程数据 key_B === 97 | 2025/04/08 10:17:11 [节点A] 开始查找键 key_B 的远程节点 98 | 项目有效,将其移至二级缓存 99 | 节点A: 获取远程键 key_B 成功: 这是节点B的数据 100 | 101 | === 节点A:尝试获取远程数据 key_C === 102 | 2025/04/08 10:17:11 [节点A] 开始查找键 key_C 的远程节点 103 | 节点A: 获取远程键 key_C 成功: 这是节点C的数据 104 | ``` 105 | 106 | B 进程: 107 | 108 | ```bash 109 | go run example/test.go -port 8002 -node B 110 | 2025/04/08 10:16:39 [节点B] 启动,地址: :8002 111 | INFO[0000] Successfully created client for 172.22.152.216:8001 112 | INFO[0000] Discovered service at 172.22.152.216:8001 113 | INFO[0000] Created cache group [test] with cacheBytes=2097152, expiration=0s 114 | INFO[0000] [KamaCache] registered peers for group [test] 115 | 2025/04/08 10:16:39 [节点B] 等待节点注册... 116 | 2025/04/08 10:16:39 [节点B] 开始启动服务... 117 | INFO[0000] Server starting at :8002 118 | INFO[0000] Service registered: kama-cache at 172.22.152.216:8002 119 | INFO[0000] Successfully created client for 172.22.152.216:8002 120 | INFO[0000] New service discovered at 172.22.152.216:8002 121 | INFO[0003] Successfully created client for 172.22.152.216:8003 122 | INFO[0003] New service discovered at 172.22.152.216:8003 123 | 124 | === 节点B:设置本地数据 === 125 | INFO[0005] Cache initialized with type lru2, max bytes: 2097152 126 | 节点B: 设置键 key_B 成功 127 | 2025/04/08 10:16:44 [节点B] 等待其他节点准备就绪... 128 | INFO[0005] grpc set request resp: value:"这是节点B的数据" 129 | 项目有效,将其移至二级缓存 130 | 2025/04/08 10:17:14 当前已发现的节点: 131 | 2025/04/08 10:17:14 - 172.22.152.216:8001 132 | 2025/04/08 10:17:14 - 172.22.152.216:8002 133 | 2025/04/08 10:17:14 - 172.22.152.216:8003 134 | 135 | === 节点B:获取本地数据 === 136 | 直接查询本地缓存... 137 | 缓存统计: map[cache_closed:false cache_hit_rate:1 cache_hits:1 cache_initialized:true cache_misses:0 cache_size:2 closed:false expiration:0s hit_rate:1 loader_errors:0 loader_hits:0 loads:0 local_hits:1 local_misses:0 naest peer_hits:0 peer_misses:0] 138 | 项目有效,将其移至二级缓存 139 | 节点B: 获取本地键 key_B 成功: 这是节点B的数据 140 | 141 | === 节点B:尝试获取远程数据 key_A === 142 | 2025/04/08 10:17:14 [节点B] 开始查找键 key_A 的远程节点 143 | 节点B: 获取远程键 key_A 成功: 这是节点A的数据 144 | 145 | === 节点B:尝试获取远程数据 key_C === 146 | 2025/04/08 10:17:14 [节点B] 开始查找键 key_C 的远程节点 147 | 节点B: 获取远程键 key_C 成功: 这是节点C的数据 148 | ``` 149 | 150 | C 进程: 151 | 152 | ```bash 153 | go run example/test.go -port 8003 -node C 154 | 2025/04/08 10:16:42 [节点C] 启动,地址: :8003 155 | INFO[0000] Successfully created client for 172.22.152.216:8001 156 | INFO[0000] Discovered service at 172.22.152.216:8001 157 | INFO[0000] Successfully created client for 172.22.152.216:8002 158 | INFO[0000] Discovered service at 172.22.152.216:8002 159 | INFO[0000] Created cache group [test] with cacheBytes=2097152, expiration=0s 160 | INFO[0000] [KamaCache] registered peers for group [test] 161 | 2025/04/08 10:16:42 [节点C] 等待节点注册... 162 | 2025/04/08 10:16:42 [节点C] 开始启动服务... 163 | INFO[0000] Server starting at :8003 164 | INFO[0000] Service registered: kama-cache at 172.22.152.216:8003 165 | INFO[0000] Successfully created client for 172.22.152.216:8003 166 | INFO[0000] New service discovered at 172.22.152.216:8003 167 | 168 | === 节点C:设置本地数据 === 169 | INFO[0005] Cache initialized with type lru2, max bytes: 2097152 170 | 节点C: 设置键 key_C 成功 171 | 2025/04/08 10:16:47 [节点C] 等待其他节点准备就绪... 172 | INFO[0005] grpc set request resp: value:"这是节点C的数据" 173 | 2025/04/08 10:17:17 当前已发现的节点: 174 | 2025/04/08 10:17:17 - 172.22.152.216:8001 175 | 2025/04/08 10:17:17 - 172.22.152.216:8002 176 | 2025/04/08 10:17:17 - 172.22.152.216:8003 177 | 178 | === 节点C:获取本地数据 === 179 | 直接查询本地缓存... 180 | 缓存统计: map[cache_closed:false cache_hit_rate:0 cache_hits:0 cache_initialized:true cache_misses:0 cache_size:1 closed:false expiration:0s loader_errors:0 loader_hits:0 loads:0 local_hits:0 local_misses:0 name:test peets:0 peer_misses:0] 181 | 项目有效,将其移至二级缓存 182 | 节点C: 获取本地键 key_C 成功: 这是节点C的数据 183 | 184 | === 节点C:尝试获取远程数据 key_A === 185 | 2025/04/08 10:17:17 [节点C] 开始查找键 key_A 的远程节点 186 | 节点C: 获取远程键 key_A 成功: 这是节点A的数据 187 | 188 | === 节点C:尝试获取远程数据 key_B === 189 | 2025/04/08 10:17:17 [节点C] 开始查找键 key_B 的远程节点 190 | 节点C: 获取远程键 key_B 成功: 这是节点B的数据 191 | ``` 192 | 193 | 194 | ## 配置说明 195 | 196 | ### 服务器配置 197 | ```go 198 | type ServerOptions struct { 199 | EtcdEndpoints []string // etcd 端点 200 | DialTimeout time.Duration // 连接超时 201 | MaxMsgSize int // 最大消息大小 202 | } 203 | ``` 204 | 205 | ### 缓存组配置 206 | ```go 207 | group := kamacache.NewGroup("users", 2<<20, getter, 208 | kamacache.WithExpiration(time.Hour), // 设置过期时间 209 | ) 210 | ``` 211 | 212 | ## 使用示例 213 | 214 | ### 1. 设置缓存 215 | ```go 216 | err := group.Set(ctx, "key", []byte("value")) 217 | ``` 218 | 219 | ### 2. 获取缓存 220 | ```go 221 | value, err := group.Get(ctx, "key") 222 | ``` 223 | 224 | ### 3. 删除缓存 225 | ```go 226 | err := group.Delete(ctx, "key") 227 | ``` 228 | 229 | ## 注意事项 230 | 231 | 1. 确保 etcd 服务可用 232 | 2. 合理配置缓存容量和过期时间 233 | 3. 节点地址不要重复 234 | 4. 建议在生产环境配置 TLS 235 | 236 | ## 性能优化 237 | 238 | 1. 使用一致性哈希实现负载均衡 239 | 2. 异步数据同步减少延迟 240 | 3. 单飞机制避免缓存击穿 241 | 4. 支持批量操作提高吞吐量 242 | 243 | ## 贡献指南 244 | 245 | 欢迎提交 Issue 和 Pull Request。 246 | 247 | ## 许可证 248 | 249 | MIT License 250 | 251 | -------------------------------------------------------------------------------- /byteview.go: -------------------------------------------------------------------------------- 1 | package kamacache 2 | 3 | // ByteView 只读的字节视图,用于缓存数据 4 | type ByteView struct { 5 | b []byte 6 | } 7 | 8 | func (b ByteView) Len() int { 9 | return len(b.b) 10 | } 11 | 12 | func (b ByteView) ByteSLice() []byte { 13 | return cloneBytes(b.b) 14 | } 15 | 16 | func (b ByteView) String() string { 17 | return string(b.b) 18 | } 19 | 20 | func cloneBytes(b []byte) []byte { 21 | c := make([]byte, len(b)) 22 | copy(c, b) 23 | return c 24 | } 25 | -------------------------------------------------------------------------------- /cache.go: -------------------------------------------------------------------------------- 1 | package kamacache 2 | 3 | import ( 4 | "context" 5 | "github.com/youngyangyang04/KamaCache-Go/store" 6 | "sync" 7 | "sync/atomic" 8 | "time" 9 | 10 | "github.com/sirupsen/logrus" 11 | ) 12 | 13 | // Cache 是对底层缓存存储的封装 14 | type Cache struct { 15 | mu sync.RWMutex 16 | store store.Store // 底层存储实现 17 | opts CacheOptions // 缓存配置选项 18 | hits int64 // 缓存命中次数 19 | misses int64 // 缓存未命中次数 20 | initialized int32 // 原子变量,标记缓存是否已初始化 21 | closed int32 // 原子变量,标记缓存是否已关闭 22 | } 23 | 24 | // CacheOptions 缓存配置选项 25 | type CacheOptions struct { 26 | CacheType store.CacheType // 缓存类型: LRU, LRU2 等 27 | MaxBytes int64 // 最大内存使用量 28 | BucketCount uint16 // 缓存桶数量 (用于 LRU2) 29 | CapPerBucket uint16 // 每个缓存桶的容量 (用于 LRU2) 30 | Level2Cap uint16 // 二级缓存桶的容量 (用于 LRU2) 31 | CleanupTime time.Duration // 清理间隔 32 | OnEvicted func(key string, value store.Value) // 驱逐回调 33 | } 34 | 35 | // DefaultCacheOptions 返回默认的缓存配置 36 | func DefaultCacheOptions() CacheOptions { 37 | return CacheOptions{ 38 | CacheType: store.LRU2, 39 | MaxBytes: 8 * 1024 * 1024, // 8MB 40 | BucketCount: 16, 41 | CapPerBucket: 512, 42 | Level2Cap: 256, 43 | CleanupTime: time.Minute, 44 | OnEvicted: nil, 45 | } 46 | } 47 | 48 | // NewCache 创建一个新的缓存实例 49 | func NewCache(opts CacheOptions) *Cache { 50 | return &Cache{ 51 | opts: opts, 52 | } 53 | } 54 | 55 | // ensureInitialized 确保缓存已初始化 56 | func (c *Cache) ensureInitialized() { 57 | // 快速检查缓存是否已初始化,避免不必要的锁争用 58 | if atomic.LoadInt32(&c.initialized) == 1 { 59 | return 60 | } 61 | 62 | // 双重检查锁定模式 63 | c.mu.Lock() 64 | defer c.mu.Unlock() 65 | 66 | if c.initialized == 0 { 67 | // 创建存储选项 68 | storeOpts := store.Options{ 69 | MaxBytes: c.opts.MaxBytes, 70 | BucketCount: c.opts.BucketCount, 71 | CapPerBucket: c.opts.CapPerBucket, 72 | Level2Cap: c.opts.Level2Cap, 73 | CleanupInterval: c.opts.CleanupTime, 74 | OnEvicted: c.opts.OnEvicted, 75 | } 76 | 77 | // 创建存储实例 78 | c.store = store.NewStore(c.opts.CacheType, storeOpts) 79 | 80 | // 标记为已初始化 81 | atomic.StoreInt32(&c.initialized, 1) 82 | 83 | logrus.Infof("Cache initialized with type %s, max bytes: %d", c.opts.CacheType, c.opts.MaxBytes) 84 | } 85 | } 86 | 87 | // Add 向缓存中添加一个 key-value 对 88 | func (c *Cache) Add(key string, value ByteView) { 89 | if atomic.LoadInt32(&c.closed) == 1 { 90 | logrus.Warnf("Attempted to add to a closed cache: %s", key) 91 | return 92 | } 93 | 94 | c.ensureInitialized() 95 | 96 | if err := c.store.Set(key, value); err != nil { 97 | logrus.Warnf("Failed to add key %s to cache: %v", key, err) 98 | } 99 | } 100 | 101 | // Get 从缓存中获取值 102 | func (c *Cache) Get(ctx context.Context, key string) (value ByteView, ok bool) { 103 | if atomic.LoadInt32(&c.closed) == 1 { 104 | return ByteView{}, false 105 | } 106 | 107 | // 如果缓存未初始化,直接返回未命中 108 | if atomic.LoadInt32(&c.initialized) == 0 { 109 | atomic.AddInt64(&c.misses, 1) 110 | return ByteView{}, false 111 | } 112 | 113 | c.mu.RLock() 114 | defer c.mu.RUnlock() 115 | 116 | // 从底层存储获取 117 | val, found := c.store.Get(key) 118 | if !found { 119 | atomic.AddInt64(&c.misses, 1) 120 | return ByteView{}, false 121 | } 122 | 123 | // 更新命中计数 124 | atomic.AddInt64(&c.hits, 1) 125 | 126 | // 转换并返回 127 | if bv, ok := val.(ByteView); ok { 128 | return bv, true 129 | } 130 | 131 | // 类型断言失败 132 | logrus.Warnf("Type assertion failed for key %s, expected ByteView", key) 133 | atomic.AddInt64(&c.misses, 1) 134 | return ByteView{}, false 135 | } 136 | 137 | // AddWithExpiration 向缓存中添加一个带过期时间的 key-value 对 138 | func (c *Cache) AddWithExpiration(key string, value ByteView, expirationTime time.Time) { 139 | if atomic.LoadInt32(&c.closed) == 1 { 140 | logrus.Warnf("Attempted to add to a closed cache: %s", key) 141 | return 142 | } 143 | 144 | c.ensureInitialized() 145 | 146 | // 计算过期时间 147 | expiration := time.Until(expirationTime) 148 | if expiration <= 0 { 149 | logrus.Debugf("Key %s already expired, not adding to cache", key) 150 | return 151 | } 152 | 153 | // 设置到底层存储 154 | if err := c.store.SetWithExpiration(key, value, expiration); err != nil { 155 | logrus.Warnf("Failed to add key %s to cache with expiration: %v", key, err) 156 | } 157 | } 158 | 159 | // Delete 从缓存中删除一个 key 160 | func (c *Cache) Delete(key string) bool { 161 | if atomic.LoadInt32(&c.closed) == 1 || atomic.LoadInt32(&c.initialized) == 0 { 162 | return false 163 | } 164 | 165 | c.mu.RLock() 166 | defer c.mu.RUnlock() 167 | 168 | return c.store.Delete(key) 169 | } 170 | 171 | // Clear 清空缓存 172 | func (c *Cache) Clear() { 173 | if atomic.LoadInt32(&c.closed) == 1 || atomic.LoadInt32(&c.initialized) == 0 { 174 | return 175 | } 176 | 177 | c.mu.Lock() 178 | defer c.mu.Unlock() 179 | 180 | c.store.Clear() 181 | 182 | // 重置统计信息 183 | atomic.StoreInt64(&c.hits, 0) 184 | atomic.StoreInt64(&c.misses, 0) 185 | } 186 | 187 | // Len 返回缓存的当前存储项数量 188 | func (c *Cache) Len() int { 189 | if atomic.LoadInt32(&c.closed) == 1 || atomic.LoadInt32(&c.initialized) == 0 { 190 | return 0 191 | } 192 | 193 | c.mu.RLock() 194 | defer c.mu.RUnlock() 195 | 196 | return c.store.Len() 197 | } 198 | 199 | // Close 关闭缓存,释放资源 200 | func (c *Cache) Close() { 201 | // 如果已经关闭,直接返回 202 | if !atomic.CompareAndSwapInt32(&c.closed, 0, 1) { 203 | return 204 | } 205 | 206 | c.mu.Lock() 207 | defer c.mu.Unlock() 208 | 209 | // 关闭底层存储 210 | if c.store != nil { 211 | if closer, ok := c.store.(interface{ Close() }); ok { 212 | closer.Close() 213 | } 214 | c.store = nil 215 | } 216 | 217 | // 重置缓存状态 218 | atomic.StoreInt32(&c.initialized, 0) 219 | 220 | logrus.Debugf("Cache closed, hits: %d, misses: %d", atomic.LoadInt64(&c.hits), atomic.LoadInt64(&c.misses)) 221 | } 222 | 223 | // Stats 返回缓存统计信息 224 | func (c *Cache) Stats() map[string]interface{} { 225 | stats := map[string]interface{}{ 226 | "initialized": atomic.LoadInt32(&c.initialized) == 1, 227 | "closed": atomic.LoadInt32(&c.closed) == 1, 228 | "hits": atomic.LoadInt64(&c.hits), 229 | "misses": atomic.LoadInt64(&c.misses), 230 | } 231 | 232 | if atomic.LoadInt32(&c.initialized) == 1 { 233 | stats["size"] = c.Len() 234 | 235 | // 计算命中率 236 | totalRequests := stats["hits"].(int64) + stats["misses"].(int64) 237 | if totalRequests > 0 { 238 | stats["hit_rate"] = float64(stats["hits"].(int64)) / float64(totalRequests) 239 | } else { 240 | stats["hit_rate"] = 0.0 241 | } 242 | } 243 | 244 | return stats 245 | } 246 | -------------------------------------------------------------------------------- /client.go: -------------------------------------------------------------------------------- 1 | package kamacache 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "time" 7 | 8 | "github.com/sirupsen/logrus" 9 | pb "github.com/youngyangyang04/KamaCache-Go/pb" 10 | clientv3 "go.etcd.io/etcd/client/v3" 11 | "google.golang.org/grpc" 12 | "google.golang.org/grpc/credentials/insecure" 13 | ) 14 | 15 | type Client struct { 16 | addr string 17 | svcName string 18 | etcdCli *clientv3.Client 19 | conn *grpc.ClientConn 20 | grpcCli pb.KamaCacheClient 21 | } 22 | 23 | var _ Peer = (*Client)(nil) 24 | 25 | func NewClient(addr string, svcName string, etcdCli *clientv3.Client) (*Client, error) { 26 | var err error 27 | if etcdCli == nil { 28 | etcdCli, err = clientv3.New(clientv3.Config{ 29 | Endpoints: []string{"localhost:2379"}, 30 | DialTimeout: 5 * time.Second, 31 | }) 32 | if err != nil { 33 | return nil, fmt.Errorf("failed to create etcd client: %v", err) 34 | } 35 | } 36 | 37 | conn, err := grpc.Dial(addr, 38 | grpc.WithTransportCredentials(insecure.NewCredentials()), 39 | grpc.WithBlock(), 40 | grpc.WithTimeout(10*time.Second), 41 | grpc.WithDefaultCallOptions(grpc.WaitForReady(true)), 42 | ) 43 | if err != nil { 44 | return nil, fmt.Errorf("failed to dial server: %v", err) 45 | } 46 | 47 | grpcClient := pb.NewKamaCacheClient(conn) 48 | 49 | client := &Client{ 50 | addr: addr, 51 | svcName: svcName, 52 | etcdCli: etcdCli, 53 | conn: conn, 54 | grpcCli: grpcClient, 55 | } 56 | 57 | return client, nil 58 | } 59 | 60 | func (c *Client) Get(group, key string) ([]byte, error) { 61 | ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) 62 | defer cancel() 63 | 64 | resp, err := c.grpcCli.Get(ctx, &pb.Request{ 65 | Group: group, 66 | Key: key, 67 | }) 68 | if err != nil { 69 | return nil, fmt.Errorf("failed to get value from kamacache: %v", err) 70 | } 71 | 72 | return resp.GetValue(), nil 73 | } 74 | 75 | func (c *Client) Delete(group, key string) (bool, error) { 76 | ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) 77 | defer cancel() 78 | 79 | resp, err := c.grpcCli.Delete(ctx, &pb.Request{ 80 | Group: group, 81 | Key: key, 82 | }) 83 | if err != nil { 84 | return false, fmt.Errorf("failed to delete value from kamacache: %v", err) 85 | } 86 | 87 | return resp.GetValue(), nil 88 | } 89 | 90 | func (c *Client) Set(ctx context.Context, group, key string, value []byte) error { 91 | resp, err := c.grpcCli.Set(ctx, &pb.Request{ 92 | Group: group, 93 | Key: key, 94 | Value: value, 95 | }) 96 | if err != nil { 97 | return fmt.Errorf("failed to set value to kamacache: %v", err) 98 | } 99 | logrus.Infof("grpc set request resp: %+v", resp) 100 | 101 | return nil 102 | } 103 | 104 | func (c *Client) Close() error { 105 | if c.conn != nil { 106 | return c.conn.Close() 107 | } 108 | return nil 109 | } 110 | -------------------------------------------------------------------------------- /consistenthash/con_hash.go: -------------------------------------------------------------------------------- 1 | package consistenthash 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "math" 7 | "sort" 8 | "sync" 9 | "sync/atomic" 10 | "time" 11 | ) 12 | 13 | // Map 一致性哈希实现 14 | type Map struct { 15 | mu sync.RWMutex 16 | // 配置信息 17 | config *Config 18 | // 哈希环 19 | keys []int 20 | // 哈希环到节点的映射 21 | hashMap map[int]string 22 | // 节点到虚拟节点数量的映射 23 | nodeReplicas map[string]int 24 | // 节点负载统计 25 | nodeCounts map[string]int64 26 | // 总请求数 27 | totalRequests int64 28 | } 29 | 30 | // New 创建一致性哈希实例 31 | func New(opts ...Option) *Map { 32 | m := &Map{ 33 | config: DefaultConfig, 34 | hashMap: make(map[int]string), 35 | nodeReplicas: make(map[string]int), 36 | nodeCounts: make(map[string]int64), 37 | } 38 | 39 | for _, opt := range opts { 40 | opt(m) 41 | } 42 | 43 | m.startBalancer() // 启动负载均衡器 44 | return m 45 | } 46 | 47 | // Option 配置选项 48 | type Option func(*Map) 49 | 50 | // WithConfig 设置配置 51 | func WithConfig(config *Config) Option { 52 | return func(m *Map) { 53 | m.config = config 54 | } 55 | } 56 | 57 | // Add 添加节点 58 | func (m *Map) Add(nodes ...string) error { 59 | if len(nodes) == 0 { 60 | return errors.New("no nodes provided") 61 | } 62 | 63 | m.mu.Lock() 64 | defer m.mu.Unlock() 65 | 66 | for _, node := range nodes { 67 | if node == "" { 68 | continue 69 | } 70 | 71 | // 为节点添加虚拟节点 72 | m.addNode(node, m.config.DefaultReplicas) 73 | } 74 | 75 | // 重新排序 76 | sort.Ints(m.keys) 77 | return nil 78 | } 79 | 80 | // Remove 移除节点 81 | func (m *Map) Remove(node string) error { 82 | if node == "" { 83 | return errors.New("invalid node") 84 | } 85 | 86 | m.mu.Lock() 87 | defer m.mu.Unlock() 88 | 89 | replicas := m.nodeReplicas[node] 90 | if replicas == 0 { 91 | return fmt.Errorf("node %s not found", node) 92 | } 93 | 94 | // 移除节点的所有虚拟节点 95 | for i := 0; i < replicas; i++ { 96 | hash := int(m.config.HashFunc([]byte(fmt.Sprintf("%s-%d", node, i)))) 97 | delete(m.hashMap, hash) 98 | for j := 0; j < len(m.keys); j++ { 99 | if m.keys[j] == hash { 100 | m.keys = append(m.keys[:j], m.keys[j+1:]...) 101 | break 102 | } 103 | } 104 | } 105 | 106 | delete(m.nodeReplicas, node) 107 | delete(m.nodeCounts, node) 108 | return nil 109 | } 110 | 111 | // Get 获取节点 112 | func (m *Map) Get(key string) string { 113 | if key == "" { 114 | return "" 115 | } 116 | 117 | m.mu.RLock() 118 | defer m.mu.RUnlock() 119 | 120 | if len(m.keys) == 0 { 121 | return "" 122 | } 123 | 124 | hash := int(m.config.HashFunc([]byte(key))) 125 | // 二分查找 126 | idx := sort.Search(len(m.keys), func(i int) bool { 127 | return m.keys[i] >= hash 128 | }) 129 | 130 | // 处理边界情况 131 | if idx == len(m.keys) { 132 | idx = 0 133 | } 134 | 135 | node := m.hashMap[m.keys[idx]] 136 | count := m.nodeCounts[node] 137 | m.nodeCounts[node] = count + 1 138 | atomic.AddInt64(&m.totalRequests, 1) 139 | 140 | return node 141 | } 142 | 143 | // addNode 添加节点的虚拟节点 144 | func (m *Map) addNode(node string, replicas int) { 145 | for i := 0; i < replicas; i++ { 146 | hash := int(m.config.HashFunc([]byte(fmt.Sprintf("%s-%d", node, i)))) 147 | m.keys = append(m.keys, hash) 148 | m.hashMap[hash] = node 149 | } 150 | m.nodeReplicas[node] = replicas 151 | } 152 | 153 | // checkAndRebalance 检查并重新平衡虚拟节点 154 | func (m *Map) checkAndRebalance() { 155 | if atomic.LoadInt64(&m.totalRequests) < 1000 { 156 | return // 样本太少,不进行调整 157 | } 158 | 159 | // 计算负载情况 160 | avgLoad := float64(m.totalRequests) / float64(len(m.nodeReplicas)) 161 | var maxDiff float64 162 | 163 | for _, count := range m.nodeCounts { 164 | diff := math.Abs(float64(count) - avgLoad) 165 | if diff/avgLoad > maxDiff { 166 | maxDiff = diff / avgLoad 167 | } 168 | } 169 | 170 | // 如果负载不均衡度超过阈值,调整虚拟节点 171 | if maxDiff > m.config.LoadBalanceThreshold { 172 | m.rebalanceNodes() 173 | } 174 | } 175 | 176 | // rebalanceNodes 重新平衡节点 177 | func (m *Map) rebalanceNodes() { 178 | m.mu.Lock() 179 | defer m.mu.Unlock() 180 | 181 | avgLoad := float64(m.totalRequests) / float64(len(m.nodeReplicas)) 182 | 183 | // 调整每个节点的虚拟节点数量 184 | for node, count := range m.nodeCounts { 185 | currentReplicas := m.nodeReplicas[node] 186 | loadRatio := float64(count) / avgLoad 187 | 188 | var newReplicas int 189 | if loadRatio > 1 { 190 | // 负载过高,减少虚拟节点 191 | newReplicas = int(float64(currentReplicas) / loadRatio) 192 | } else { 193 | // 负载过低,增加虚拟节点 194 | newReplicas = int(float64(currentReplicas) * (2 - loadRatio)) 195 | } 196 | 197 | // 确保在限制范围内 198 | if newReplicas < m.config.MinReplicas { 199 | newReplicas = m.config.MinReplicas 200 | } 201 | if newReplicas > m.config.MaxReplicas { 202 | newReplicas = m.config.MaxReplicas 203 | } 204 | 205 | if newReplicas != currentReplicas { 206 | // 重新添加节点的虚拟节点 207 | if err := m.Remove(node); err != nil { 208 | continue // 如果移除失败,跳过这个节点 209 | } 210 | m.addNode(node, newReplicas) 211 | } 212 | } 213 | 214 | // 重置计数器 215 | for node := range m.nodeCounts { 216 | m.nodeCounts[node] = 0 217 | } 218 | atomic.StoreInt64(&m.totalRequests, 0) 219 | 220 | // 重新排序 221 | sort.Ints(m.keys) 222 | } 223 | 224 | // GetStats 获取负载统计信息 225 | func (m *Map) GetStats() map[string]float64 { 226 | m.mu.RLock() 227 | defer m.mu.RUnlock() 228 | 229 | stats := make(map[string]float64) 230 | total := atomic.LoadInt64(&m.totalRequests) 231 | if total == 0 { 232 | return stats 233 | } 234 | 235 | for node, count := range m.nodeCounts { 236 | stats[node] = float64(count) / float64(total) 237 | } 238 | return stats 239 | } 240 | 241 | // 将checkAndRebalance移到单独的goroutine中 242 | func (m *Map) startBalancer() { 243 | go func() { 244 | ticker := time.NewTicker(time.Second) 245 | defer ticker.Stop() 246 | 247 | for range ticker.C { 248 | m.checkAndRebalance() 249 | } 250 | }() 251 | } 252 | -------------------------------------------------------------------------------- /consistenthash/config.go: -------------------------------------------------------------------------------- 1 | package consistenthash 2 | 3 | import "hash/crc32" 4 | 5 | // Config 一致性哈希配置 6 | type Config struct { 7 | // 每个真实节点对应的虚拟节点数 8 | DefaultReplicas int 9 | // 最小虚拟节点数 10 | MinReplicas int 11 | // 最大虚拟节点数 12 | MaxReplicas int 13 | // 哈希函数 14 | HashFunc func(data []byte) uint32 15 | // 负载均衡阈值,超过此值触发虚拟节点调整 16 | LoadBalanceThreshold float64 17 | } 18 | 19 | // DefaultConfig 默认配置 20 | var DefaultConfig = &Config{ 21 | DefaultReplicas: 50, 22 | MinReplicas: 10, 23 | MaxReplicas: 200, 24 | HashFunc: crc32.ChecksumIEEE, 25 | LoadBalanceThreshold: 0.25, // 25% 的负载不均衡度触发调整 26 | } 27 | -------------------------------------------------------------------------------- /example/test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "flag" 6 | "fmt" 7 | "log" 8 | "time" 9 | 10 | lcache "github.com/youngyangyang04/KamaCache-Go" 11 | ) 12 | 13 | func main() { 14 | // 添加命令行参数,用于区分不同节点 15 | port := flag.Int("port", 8001, "节点端口") 16 | nodeID := flag.String("node", "A", "节点标识符") 17 | flag.Parse() 18 | 19 | addr := fmt.Sprintf(":%d", *port) 20 | log.Printf("[节点%s] 启动,地址: %s", *nodeID, addr) 21 | 22 | // 创建节点 23 | node, err := lcache.NewServer(addr, "kama-cache", 24 | lcache.WithEtcdEndpoints([]string{"localhost:2379"}), 25 | lcache.WithDialTimeout(5*time.Second), 26 | ) 27 | if err != nil { 28 | log.Fatal("创建节点失败:", err) 29 | } 30 | 31 | // 创建节点选择器 32 | picker, err := lcache.NewClientPicker(addr) 33 | if err != nil { 34 | log.Fatal("创建节点选择器失败:", err) 35 | } 36 | 37 | // 创建缓存组 38 | group := lcache.NewGroup("test", 2<<20, lcache.GetterFunc( 39 | func(ctx context.Context, key string) ([]byte, error) { 40 | log.Printf("[节点%s] 触发数据源加载: key=%s", *nodeID, key) 41 | return []byte(fmt.Sprintf("节点%s的数据源值", *nodeID)), nil 42 | }), 43 | ) 44 | 45 | // 注册节点选择器 46 | group.RegisterPeers(picker) 47 | 48 | // 启动节点 49 | go func() { 50 | log.Printf("[节点%s] 开始启动服务...", *nodeID) 51 | if err := node.Start(); err != nil { 52 | log.Fatal("启动节点失败:", err) 53 | } 54 | }() 55 | 56 | // 等待节点注册完成 57 | log.Printf("[节点%s] 等待节点注册...", *nodeID) 58 | time.Sleep(5 * time.Second) 59 | 60 | ctx := context.Background() 61 | 62 | // 设置本节点的特定键值对 63 | localKey := fmt.Sprintf("key_%s", *nodeID) 64 | localValue := []byte(fmt.Sprintf("这是节点%s的数据", *nodeID)) 65 | 66 | fmt.Printf("\n=== 节点%s:设置本地数据 ===\n", *nodeID) 67 | err = group.Set(ctx, localKey, localValue) 68 | if err != nil { 69 | log.Fatal("设置本地数据失败:", err) 70 | } 71 | fmt.Printf("节点%s: 设置键 %s 成功\n", *nodeID, localKey) 72 | 73 | // 等待其他节点也完成设置 74 | log.Printf("[节点%s] 等待其他节点准备就绪...", *nodeID) 75 | time.Sleep(30 * time.Second) 76 | 77 | // 打印当前已发现的节点 78 | picker.PrintPeers() 79 | 80 | // 测试获取本地数据 81 | fmt.Printf("\n=== 节点%s:获取本地数据 ===\n", *nodeID) 82 | fmt.Printf("直接查询本地缓存...\n") 83 | 84 | // 打印缓存统计信息 85 | stats := group.Stats() 86 | fmt.Printf("缓存统计: %+v\n", stats) 87 | 88 | if val, err := group.Get(ctx, localKey); err == nil { 89 | fmt.Printf("节点%s: 获取本地键 %s 成功: %s\n", *nodeID, localKey, val.String()) 90 | } else { 91 | fmt.Printf("节点%s: 获取本地键失败: %v\n", *nodeID, err) 92 | } 93 | 94 | // 测试获取其他节点的数据 95 | otherKeys := []string{"key_A", "key_B", "key_C"} 96 | for _, key := range otherKeys { 97 | if key == localKey { 98 | continue // 跳过本节点的键 99 | } 100 | fmt.Printf("\n=== 节点%s:尝试获取远程数据 %s ===\n", *nodeID, key) 101 | log.Printf("[节点%s] 开始查找键 %s 的远程节点", *nodeID, key) 102 | if val, err := group.Get(ctx, key); err == nil { 103 | fmt.Printf("节点%s: 获取远程键 %s 成功: %s\n", *nodeID, key, val.String()) 104 | } else { 105 | fmt.Printf("节点%s: 获取远程键失败: %v\n", *nodeID, err) 106 | } 107 | } 108 | 109 | // 保持程序运行 110 | select {} 111 | } 112 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/youngyangyang04/KamaCache-Go 2 | 3 | 4 | go 1.22 5 | 6 | toolchain go1.22.11 7 | 8 | require ( 9 | github.com/sirupsen/logrus v1.9.3 10 | google.golang.org/grpc v1.70.0 11 | google.golang.org/protobuf v1.36.4 12 | ) 13 | 14 | require ( 15 | github.com/coreos/go-semver v0.3.0 // indirect 16 | github.com/coreos/go-systemd/v22 v22.3.2 // indirect 17 | github.com/gogo/protobuf v1.3.2 // indirect 18 | github.com/golang/protobuf v1.5.4 // indirect 19 | go.etcd.io/etcd/api/v3 v3.5.18 // indirect 20 | go.etcd.io/etcd/client/pkg/v3 v3.5.18 // indirect 21 | go.uber.org/atomic v1.7.0 // indirect 22 | go.uber.org/multierr v1.6.0 // indirect 23 | go.uber.org/zap v1.17.0 // indirect 24 | google.golang.org/genproto/googleapis/api v0.0.0-20241202173237-19429a94021a // indirect 25 | ) 26 | 27 | require ( 28 | go.etcd.io/etcd/client/v3 v3.5.18 29 | golang.org/x/net v0.34.0 // indirect 30 | golang.org/x/sys v0.29.0 // indirect 31 | golang.org/x/text v0.21.0 // indirect 32 | google.golang.org/genproto/googleapis/rpc v0.0.0-20241202173237-19429a94021a // indirect 33 | ) 34 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= 2 | github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= 3 | github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= 4 | github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= 5 | github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= 6 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 7 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 8 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 9 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= 10 | github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= 11 | github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= 12 | github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= 13 | github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= 14 | github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= 15 | github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= 16 | github.com/go-redis/redis/v7 v7.4.1/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRfnwdHj/Dcg= 17 | github.com/go-redis/redis/v8 v8.11.4/go.mod h1:2Z2wHZXdQpCDXEGzqMockDpNyYvi2l4Pxt6RJr792+w= 18 | github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= 19 | github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= 20 | github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= 21 | github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= 22 | github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 23 | github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 24 | github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= 25 | github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= 26 | github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= 27 | github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= 28 | github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= 29 | github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= 30 | github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= 31 | github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= 32 | github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= 33 | github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= 34 | github.com/gomodule/redigo v1.8.6/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= 35 | github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= 36 | github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= 37 | github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= 38 | github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= 39 | github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= 40 | github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= 41 | github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= 42 | github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= 43 | github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= 44 | github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= 45 | github.com/juguagua/lc-cache v0.0.0-20250125130440-44ca965c1836 h1:YMmTu3u7D6V9jh3U4OHXWP1rbKyVKgTxox+4X8mMQGU= 46 | github.com/juguagua/lc-cache v0.0.0-20250125130440-44ca965c1836/go.mod h1:v9ebzrvKOJfhve6xExxkUrQAxlhaonom5i1PezkqfiA= 47 | github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= 48 | github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= 49 | github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= 50 | github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= 51 | github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= 52 | github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= 53 | github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= 54 | github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= 55 | github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= 56 | github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= 57 | github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= 58 | github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= 59 | github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= 60 | github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= 61 | github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= 62 | github.com/orca-zhang/ecache v1.1.1 h1:krk4+/ihsLPg9/k2zYeRa7dNespTQsFnRcLWaJGFNds= 63 | github.com/orca-zhang/ecache v1.1.1/go.mod h1:1/6HCddx1iAbafDyRdddq5v/0rZf5sCU/fkLZ3BhciQ= 64 | github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= 65 | github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 66 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 67 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 68 | github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= 69 | github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= 70 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 71 | github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= 72 | github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= 73 | github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 74 | github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= 75 | github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= 76 | github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= 77 | github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= 78 | go.etcd.io/etcd/api/v3 v3.5.18 h1:Q4oDAKnmwqTo5lafvB+afbgCDF7E35E4EYV2g+FNGhs= 79 | go.etcd.io/etcd/api/v3 v3.5.18/go.mod h1:uY03Ob2H50077J7Qq0DeehjM/A9S8PhVfbQ1mSaMopU= 80 | go.etcd.io/etcd/client/pkg/v3 v3.5.18 h1:mZPOYw4h8rTk7TeJ5+3udUkfVGBqc+GCjOJYd68QgNM= 81 | go.etcd.io/etcd/client/pkg/v3 v3.5.18/go.mod h1:BxVf2o5wXG9ZJV+/Cu7QNUiJYk4A29sAhoI5tIRsCu4= 82 | go.etcd.io/etcd/client/v3 v3.5.18 h1:nvvYmNHGumkDjZhTHgVU36A9pykGa2K4lAJ0yY7hcXA= 83 | go.etcd.io/etcd/client/v3 v3.5.18/go.mod h1:kmemwOsPU9broExyhYsBxX4spCTDX3yLgPMWtpBXG6E= 84 | go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= 85 | go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= 86 | go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= 87 | go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= 88 | go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= 89 | go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= 90 | go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU= 91 | go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ= 92 | go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= 93 | go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= 94 | go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= 95 | go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= 96 | go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= 97 | go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= 98 | go.uber.org/zap v1.17.0 h1:MTjgFu6ZLKvY6Pvaqk97GlxNBuMpV4Hy/3P6tRGlI2U= 99 | go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= 100 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 101 | golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= 102 | golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= 103 | golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= 104 | golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= 105 | golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 106 | golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 107 | golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 108 | golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 109 | golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 110 | golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= 111 | golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= 112 | golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= 113 | golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= 114 | golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= 115 | golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 116 | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 117 | golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 118 | golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 119 | golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 120 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 121 | golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 122 | golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 123 | golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 124 | golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 125 | golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 126 | golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 127 | golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 128 | golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 129 | golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 130 | golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 131 | golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 132 | golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= 133 | golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= 134 | golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= 135 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 136 | golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= 137 | golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 138 | golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 139 | golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= 140 | golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= 141 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 142 | golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= 143 | golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= 144 | golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= 145 | golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= 146 | golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 147 | golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 148 | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 149 | golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 150 | google.golang.org/genproto/googleapis/api v0.0.0-20241202173237-19429a94021a h1:OAiGFfOiA0v9MRYsSidp3ubZaBnteRUyn3xB2ZQ5G/E= 151 | google.golang.org/genproto/googleapis/api v0.0.0-20241202173237-19429a94021a/go.mod h1:jehYqy3+AhJU9ve55aNOaSml7wUXjF9x6z2LcCfpAhY= 152 | google.golang.org/genproto/googleapis/rpc v0.0.0-20241202173237-19429a94021a h1:hgh8P4EuoxpsuKMXX/To36nOFD7vixReXgn8lPGnt+o= 153 | google.golang.org/genproto/googleapis/rpc v0.0.0-20241202173237-19429a94021a/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU= 154 | google.golang.org/grpc v1.70.0 h1:pWFv03aZoHzlRKHWicjsZytKAiYCtNS0dHbXnIdq7jQ= 155 | google.golang.org/grpc v1.70.0/go.mod h1:ofIJqVKDXx/JiXrwr2IG4/zwdH9txy3IlF40RmcJSQw= 156 | google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= 157 | google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= 158 | google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= 159 | google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= 160 | google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= 161 | google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= 162 | google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= 163 | google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= 164 | google.golang.org/protobuf v1.36.4 h1:6A3ZDJHn/eNqc1i+IdefRzy/9PokBTPvcqMySR7NNIM= 165 | google.golang.org/protobuf v1.36.4/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= 166 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 167 | gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 168 | gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= 169 | gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= 170 | gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 171 | gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 172 | gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 173 | gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 174 | gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 175 | gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= 176 | gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= 177 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 178 | gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 179 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 180 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 181 | -------------------------------------------------------------------------------- /group.go: -------------------------------------------------------------------------------- 1 | package kamacache 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "sync" 8 | "sync/atomic" 9 | "time" 10 | 11 | "github.com/sirupsen/logrus" 12 | "github.com/youngyangyang04/KamaCache-Go/singleflight" 13 | ) 14 | 15 | var ( 16 | groupsMu sync.RWMutex 17 | groups = make(map[string]*Group) 18 | ) 19 | 20 | // ErrKeyRequired 键不能为空错误 21 | var ErrKeyRequired = errors.New("key is required") 22 | 23 | // ErrValueRequired 值不能为空错误 24 | var ErrValueRequired = errors.New("value is required") 25 | 26 | // ErrGroupClosed 组已关闭错误 27 | var ErrGroupClosed = errors.New("cache group is closed") 28 | 29 | // Getter 加载键值的回调函数接口 30 | type Getter interface { 31 | Get(ctx context.Context, key string) ([]byte, error) 32 | } 33 | 34 | // GetterFunc 函数类型实现 Getter 接口 35 | type GetterFunc func(ctx context.Context, key string) ([]byte, error) 36 | 37 | // Get 实现 Getter 接口 38 | func (f GetterFunc) Get(ctx context.Context, key string) ([]byte, error) { 39 | return f(ctx, key) 40 | } 41 | 42 | // Group 是一个缓存命名空间 43 | type Group struct { 44 | name string 45 | getter Getter 46 | mainCache *Cache 47 | peers PeerPicker 48 | loader *singleflight.Group 49 | expiration time.Duration // 缓存过期时间,0表示永不过期 50 | closed int32 // 原子变量,标记组是否已关闭 51 | stats groupStats // 统计信息 52 | } 53 | 54 | // groupStats 保存组的统计信息 55 | type groupStats struct { 56 | loads int64 // 加载次数 57 | localHits int64 // 本地缓存命中次数 58 | localMisses int64 // 本地缓存未命中次数 59 | peerHits int64 // 从对等节点获取成功次数 60 | peerMisses int64 // 从对等节点获取失败次数 61 | loaderHits int64 // 从加载器获取成功次数 62 | loaderErrors int64 // 从加载器获取失败次数 63 | loadDuration int64 // 加载总耗时(纳秒) 64 | } 65 | 66 | // GroupOption 定义Group的配置选项 67 | type GroupOption func(*Group) 68 | 69 | // WithExpiration 设置缓存过期时间 70 | func WithExpiration(d time.Duration) GroupOption { 71 | return func(g *Group) { 72 | g.expiration = d 73 | } 74 | } 75 | 76 | // WithPeers 设置分布式节点 77 | func WithPeers(peers PeerPicker) GroupOption { 78 | return func(g *Group) { 79 | g.peers = peers 80 | } 81 | } 82 | 83 | // WithCacheOptions 设置缓存选项 84 | func WithCacheOptions(opts CacheOptions) GroupOption { 85 | return func(g *Group) { 86 | g.mainCache = NewCache(opts) 87 | } 88 | } 89 | 90 | // NewGroup 创建一个新的 Group 实例 91 | func NewGroup(name string, cacheBytes int64, getter Getter, opts ...GroupOption) *Group { 92 | if getter == nil { 93 | panic("nil Getter") 94 | } 95 | 96 | // 创建默认缓存选项 97 | cacheOpts := DefaultCacheOptions() 98 | cacheOpts.MaxBytes = cacheBytes 99 | 100 | g := &Group{ 101 | name: name, 102 | getter: getter, 103 | mainCache: NewCache(cacheOpts), 104 | loader: &singleflight.Group{}, 105 | } 106 | 107 | // 应用选项 108 | for _, opt := range opts { 109 | opt(g) 110 | } 111 | 112 | // 注册到全局组映射 113 | groupsMu.Lock() 114 | defer groupsMu.Unlock() 115 | 116 | if _, exists := groups[name]; exists { 117 | logrus.Warnf("Group with name %s already exists, will be replaced", name) 118 | } 119 | 120 | groups[name] = g 121 | logrus.Infof("Created cache group [%s] with cacheBytes=%d, expiration=%v", name, cacheBytes, g.expiration) 122 | 123 | return g 124 | } 125 | 126 | // GetGroup 获取指定名称的组 127 | func GetGroup(name string) *Group { 128 | groupsMu.RLock() 129 | defer groupsMu.RUnlock() 130 | return groups[name] 131 | } 132 | 133 | // Get 从缓存获取数据 134 | func (g *Group) Get(ctx context.Context, key string) (ByteView, error) { 135 | // 检查组是否已关闭 136 | if atomic.LoadInt32(&g.closed) == 1 { 137 | return ByteView{}, ErrGroupClosed 138 | } 139 | 140 | if key == "" { 141 | return ByteView{}, ErrKeyRequired 142 | } 143 | 144 | // 从本地缓存获取 145 | view, ok := g.mainCache.Get(ctx, key) 146 | if ok { 147 | atomic.AddInt64(&g.stats.localHits, 1) 148 | return view, nil 149 | } 150 | 151 | atomic.AddInt64(&g.stats.localMisses, 1) 152 | 153 | // 尝试从其他节点获取或加载 154 | return g.load(ctx, key) 155 | } 156 | 157 | // Set 设置缓存值 158 | func (g *Group) Set(ctx context.Context, key string, value []byte) error { 159 | // 检查组是否已关闭 160 | if atomic.LoadInt32(&g.closed) == 1 { 161 | return ErrGroupClosed 162 | } 163 | 164 | if key == "" { 165 | return ErrKeyRequired 166 | } 167 | if len(value) == 0 { 168 | return ErrValueRequired 169 | } 170 | 171 | // 检查是否是从其他节点同步过来的请求 172 | isPeerRequest := ctx.Value("from_peer") != nil 173 | 174 | // 创建缓存视图 175 | view := ByteView{b: cloneBytes(value)} 176 | 177 | // 设置到本地缓存 178 | if g.expiration > 0 { 179 | g.mainCache.AddWithExpiration(key, view, time.Now().Add(g.expiration)) 180 | } else { 181 | g.mainCache.Add(key, view) 182 | } 183 | 184 | // 如果不是从其他节点同步过来的请求,且启用了分布式模式,同步到其他节点 185 | if !isPeerRequest && g.peers != nil { 186 | go g.syncToPeers(ctx, "set", key, value) 187 | } 188 | 189 | return nil 190 | } 191 | 192 | // Delete 删除缓存值 193 | func (g *Group) Delete(ctx context.Context, key string) error { 194 | // 检查组是否已关闭 195 | if atomic.LoadInt32(&g.closed) == 1 { 196 | return ErrGroupClosed 197 | } 198 | 199 | if key == "" { 200 | return ErrKeyRequired 201 | } 202 | 203 | // 从本地缓存删除 204 | g.mainCache.Delete(key) 205 | 206 | // 检查是否是从其他节点同步过来的请求 207 | isPeerRequest := ctx.Value("from_peer") != nil 208 | 209 | // 如果不是从其他节点同步过来的请求,且启用了分布式模式,同步到其他节点 210 | if !isPeerRequest && g.peers != nil { 211 | go g.syncToPeers(ctx, "delete", key, nil) 212 | } 213 | 214 | return nil 215 | } 216 | 217 | // syncToPeers 同步操作到其他节点 218 | func (g *Group) syncToPeers(ctx context.Context, op string, key string, value []byte) { 219 | if g.peers == nil { 220 | return 221 | } 222 | 223 | // 选择对等节点 224 | peer, ok, isSelf := g.peers.PickPeer(key) 225 | if !ok || isSelf { 226 | return 227 | } 228 | 229 | // 创建同步请求上下文 230 | syncCtx := context.WithValue(context.Background(), "from_peer", true) 231 | 232 | var err error 233 | switch op { 234 | case "set": 235 | err = peer.Set(syncCtx, g.name, key, value) 236 | case "delete": 237 | _, err = peer.Delete(g.name, key) 238 | } 239 | 240 | if err != nil { 241 | logrus.Errorf("[KamaCache] failed to sync %s to peer: %v", op, err) 242 | } 243 | } 244 | 245 | // Clear 清空缓存 246 | func (g *Group) Clear() { 247 | // 检查组是否已关闭 248 | if atomic.LoadInt32(&g.closed) == 1 { 249 | return 250 | } 251 | 252 | g.mainCache.Clear() 253 | logrus.Infof("[KamaCache] cleared cache for group [%s]", g.name) 254 | } 255 | 256 | // Close 关闭组并释放资源 257 | func (g *Group) Close() error { 258 | // 如果已经关闭,直接返回 259 | if !atomic.CompareAndSwapInt32(&g.closed, 0, 1) { 260 | return nil 261 | } 262 | 263 | // 关闭本地缓存 264 | if g.mainCache != nil { 265 | g.mainCache.Close() 266 | } 267 | 268 | // 从全局组映射中移除 269 | groupsMu.Lock() 270 | delete(groups, g.name) 271 | groupsMu.Unlock() 272 | 273 | logrus.Infof("[KamaCache] closed cache group [%s]", g.name) 274 | return nil 275 | } 276 | 277 | // load 加载数据 278 | func (g *Group) load(ctx context.Context, key string) (value ByteView, err error) { 279 | // 使用 singleflight 确保并发请求只加载一次 280 | startTime := time.Now() 281 | viewi, err := g.loader.Do(key, func() (interface{}, error) { 282 | return g.loadData(ctx, key) 283 | }) 284 | 285 | // 记录加载时间 286 | loadDuration := time.Since(startTime).Nanoseconds() 287 | atomic.AddInt64(&g.stats.loadDuration, loadDuration) 288 | atomic.AddInt64(&g.stats.loads, 1) 289 | 290 | if err != nil { 291 | atomic.AddInt64(&g.stats.loaderErrors, 1) 292 | return ByteView{}, err 293 | } 294 | 295 | view := viewi.(ByteView) 296 | 297 | // 设置到本地缓存 298 | if g.expiration > 0 { 299 | g.mainCache.AddWithExpiration(key, view, time.Now().Add(g.expiration)) 300 | } else { 301 | g.mainCache.Add(key, view) 302 | } 303 | 304 | return view, nil 305 | } 306 | 307 | // loadData 实际加载数据的方法 308 | func (g *Group) loadData(ctx context.Context, key string) (value ByteView, err error) { 309 | // 尝试从远程节点获取 310 | if g.peers != nil { 311 | peer, ok, isSelf := g.peers.PickPeer(key) 312 | if ok && !isSelf { 313 | value, err := g.getFromPeer(ctx, peer, key) 314 | if err == nil { 315 | atomic.AddInt64(&g.stats.peerHits, 1) 316 | return value, nil 317 | } 318 | 319 | atomic.AddInt64(&g.stats.peerMisses, 1) 320 | logrus.Warnf("[KamaCache] failed to get from peer: %v", err) 321 | } 322 | } 323 | 324 | // 从数据源加载 325 | bytes, err := g.getter.Get(ctx, key) 326 | if err != nil { 327 | return ByteView{}, fmt.Errorf("failed to get data: %w", err) 328 | } 329 | 330 | atomic.AddInt64(&g.stats.loaderHits, 1) 331 | return ByteView{b: cloneBytes(bytes)}, nil 332 | } 333 | 334 | // getFromPeer 从其他节点获取数据 335 | func (g *Group) getFromPeer(ctx context.Context, peer Peer, key string) (ByteView, error) { 336 | bytes, err := peer.Get(g.name, key) 337 | if err != nil { 338 | return ByteView{}, fmt.Errorf("failed to get from peer: %w", err) 339 | } 340 | return ByteView{b: bytes}, nil 341 | } 342 | 343 | // RegisterPeers 注册PeerPicker 344 | func (g *Group) RegisterPeers(peers PeerPicker) { 345 | if g.peers != nil { 346 | panic("RegisterPeers called more than once") 347 | } 348 | g.peers = peers 349 | logrus.Infof("[KamaCache] registered peers for group [%s]", g.name) 350 | } 351 | 352 | // Stats 返回缓存统计信息 353 | func (g *Group) Stats() map[string]interface{} { 354 | stats := map[string]interface{}{ 355 | "name": g.name, 356 | "closed": atomic.LoadInt32(&g.closed) == 1, 357 | "expiration": g.expiration, 358 | "loads": atomic.LoadInt64(&g.stats.loads), 359 | "local_hits": atomic.LoadInt64(&g.stats.localHits), 360 | "local_misses": atomic.LoadInt64(&g.stats.localMisses), 361 | "peer_hits": atomic.LoadInt64(&g.stats.peerHits), 362 | "peer_misses": atomic.LoadInt64(&g.stats.peerMisses), 363 | "loader_hits": atomic.LoadInt64(&g.stats.loaderHits), 364 | "loader_errors": atomic.LoadInt64(&g.stats.loaderErrors), 365 | } 366 | 367 | // 计算各种命中率 368 | totalGets := stats["local_hits"].(int64) + stats["local_misses"].(int64) 369 | if totalGets > 0 { 370 | stats["hit_rate"] = float64(stats["local_hits"].(int64)) / float64(totalGets) 371 | } 372 | 373 | totalLoads := stats["loads"].(int64) 374 | if totalLoads > 0 { 375 | stats["avg_load_time_ms"] = float64(atomic.LoadInt64(&g.stats.loadDuration)) / float64(totalLoads) / float64(time.Millisecond) 376 | } 377 | 378 | // 添加缓存大小 379 | if g.mainCache != nil { 380 | cacheStats := g.mainCache.Stats() 381 | for k, v := range cacheStats { 382 | stats["cache_"+k] = v 383 | } 384 | } 385 | 386 | return stats 387 | } 388 | 389 | // ListGroups 返回所有缓存组的名称 390 | func ListGroups() []string { 391 | groupsMu.RLock() 392 | defer groupsMu.RUnlock() 393 | 394 | names := make([]string, 0, len(groups)) 395 | for name := range groups { 396 | names = append(names, name) 397 | } 398 | 399 | return names 400 | } 401 | 402 | // DestroyGroup 销毁指定名称的缓存组 403 | func DestroyGroup(name string) bool { 404 | groupsMu.Lock() 405 | defer groupsMu.Unlock() 406 | 407 | if g, exists := groups[name]; exists { 408 | g.Close() 409 | delete(groups, name) 410 | logrus.Infof("[KamaCache] destroyed cache group [%s]", name) 411 | return true 412 | } 413 | 414 | return false 415 | } 416 | 417 | // DestroyAllGroups 销毁所有缓存组 418 | func DestroyAllGroups() { 419 | groupsMu.Lock() 420 | defer groupsMu.Unlock() 421 | 422 | for name, g := range groups { 423 | g.Close() 424 | delete(groups, name) 425 | logrus.Infof("[KamaCache] destroyed cache group [%s]", name) 426 | } 427 | } 428 | -------------------------------------------------------------------------------- /pb/kama.pb.go: -------------------------------------------------------------------------------- 1 | // Code generated by protoc-gen-go. DO NOT EDIT. 2 | // versions: 3 | // protoc-gen-go v1.36.4 4 | // protoc v5.29.3 5 | // source: pb/kama.proto 6 | 7 | package __ 8 | 9 | import ( 10 | protoreflect "google.golang.org/protobuf/reflect/protoreflect" 11 | protoimpl "google.golang.org/protobuf/runtime/protoimpl" 12 | reflect "reflect" 13 | sync "sync" 14 | unsafe "unsafe" 15 | ) 16 | 17 | const ( 18 | // Verify that this generated code is sufficiently up-to-date. 19 | _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) 20 | // Verify that runtime/protoimpl is sufficiently up-to-date. 21 | _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) 22 | ) 23 | 24 | type Request struct { 25 | state protoimpl.MessageState `protogen:"open.v1"` 26 | Group string `protobuf:"bytes,1,opt,name=group,proto3" json:"group,omitempty"` 27 | Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` 28 | Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` 29 | unknownFields protoimpl.UnknownFields 30 | sizeCache protoimpl.SizeCache 31 | } 32 | 33 | func (x *Request) Reset() { 34 | *x = Request{} 35 | mi := &file_pb_kama_proto_msgTypes[0] 36 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 37 | ms.StoreMessageInfo(mi) 38 | } 39 | 40 | func (x *Request) String() string { 41 | return protoimpl.X.MessageStringOf(x) 42 | } 43 | 44 | func (*Request) ProtoMessage() {} 45 | 46 | func (x *Request) ProtoReflect() protoreflect.Message { 47 | mi := &file_pb_kama_proto_msgTypes[0] 48 | if x != nil { 49 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 50 | if ms.LoadMessageInfo() == nil { 51 | ms.StoreMessageInfo(mi) 52 | } 53 | return ms 54 | } 55 | return mi.MessageOf(x) 56 | } 57 | 58 | // Deprecated: Use Request.ProtoReflect.Descriptor instead. 59 | func (*Request) Descriptor() ([]byte, []int) { 60 | return file_pb_kama_proto_rawDescGZIP(), []int{0} 61 | } 62 | 63 | func (x *Request) GetGroup() string { 64 | if x != nil { 65 | return x.Group 66 | } 67 | return "" 68 | } 69 | 70 | func (x *Request) GetKey() string { 71 | if x != nil { 72 | return x.Key 73 | } 74 | return "" 75 | } 76 | 77 | func (x *Request) GetValue() []byte { 78 | if x != nil { 79 | return x.Value 80 | } 81 | return nil 82 | } 83 | 84 | type ResponseForGet struct { 85 | state protoimpl.MessageState `protogen:"open.v1"` 86 | Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` 87 | unknownFields protoimpl.UnknownFields 88 | sizeCache protoimpl.SizeCache 89 | } 90 | 91 | func (x *ResponseForGet) Reset() { 92 | *x = ResponseForGet{} 93 | mi := &file_pb_kama_proto_msgTypes[1] 94 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 95 | ms.StoreMessageInfo(mi) 96 | } 97 | 98 | func (x *ResponseForGet) String() string { 99 | return protoimpl.X.MessageStringOf(x) 100 | } 101 | 102 | func (*ResponseForGet) ProtoMessage() {} 103 | 104 | func (x *ResponseForGet) ProtoReflect() protoreflect.Message { 105 | mi := &file_pb_kama_proto_msgTypes[1] 106 | if x != nil { 107 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 108 | if ms.LoadMessageInfo() == nil { 109 | ms.StoreMessageInfo(mi) 110 | } 111 | return ms 112 | } 113 | return mi.MessageOf(x) 114 | } 115 | 116 | // Deprecated: Use ResponseForGet.ProtoReflect.Descriptor instead. 117 | func (*ResponseForGet) Descriptor() ([]byte, []int) { 118 | return file_pb_kama_proto_rawDescGZIP(), []int{1} 119 | } 120 | 121 | func (x *ResponseForGet) GetValue() []byte { 122 | if x != nil { 123 | return x.Value 124 | } 125 | return nil 126 | } 127 | 128 | type ResponseForDelete struct { 129 | state protoimpl.MessageState `protogen:"open.v1"` 130 | Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` 131 | unknownFields protoimpl.UnknownFields 132 | sizeCache protoimpl.SizeCache 133 | } 134 | 135 | func (x *ResponseForDelete) Reset() { 136 | *x = ResponseForDelete{} 137 | mi := &file_pb_kama_proto_msgTypes[2] 138 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 139 | ms.StoreMessageInfo(mi) 140 | } 141 | 142 | func (x *ResponseForDelete) String() string { 143 | return protoimpl.X.MessageStringOf(x) 144 | } 145 | 146 | func (*ResponseForDelete) ProtoMessage() {} 147 | 148 | func (x *ResponseForDelete) ProtoReflect() protoreflect.Message { 149 | mi := &file_pb_kama_proto_msgTypes[2] 150 | if x != nil { 151 | ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) 152 | if ms.LoadMessageInfo() == nil { 153 | ms.StoreMessageInfo(mi) 154 | } 155 | return ms 156 | } 157 | return mi.MessageOf(x) 158 | } 159 | 160 | // Deprecated: Use ResponseForDelete.ProtoReflect.Descriptor instead. 161 | func (*ResponseForDelete) Descriptor() ([]byte, []int) { 162 | return file_pb_kama_proto_rawDescGZIP(), []int{2} 163 | } 164 | 165 | func (x *ResponseForDelete) GetValue() bool { 166 | if x != nil { 167 | return x.Value 168 | } 169 | return false 170 | } 171 | 172 | var File_pb_kama_proto protoreflect.FileDescriptor 173 | 174 | var file_pb_kama_proto_rawDesc = string([]byte{ 175 | 0x0a, 0x0d, 0x70, 0x62, 0x2f, 0x6b, 0x61, 0x6d, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 176 | 0x02, 0x70, 0x62, 0x22, 0x47, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 177 | 0x0a, 0x05, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x67, 178 | 0x72, 0x6f, 0x75, 0x70, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 179 | 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 180 | 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x26, 0x0a, 0x0e, 181 | 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x46, 0x6f, 0x72, 0x47, 0x65, 0x74, 0x12, 0x14, 182 | 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 183 | 0x61, 0x6c, 0x75, 0x65, 0x22, 0x29, 0x0a, 0x11, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 184 | 0x46, 0x6f, 0x72, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 185 | 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x32, 186 | 0x89, 0x01, 0x0a, 0x09, 0x4b, 0x61, 0x6d, 0x61, 0x43, 0x61, 0x63, 0x68, 0x65, 0x12, 0x26, 0x0a, 187 | 0x03, 0x47, 0x65, 0x74, 0x12, 0x0b, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 188 | 0x74, 0x1a, 0x12, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x46, 189 | 0x6f, 0x72, 0x47, 0x65, 0x74, 0x12, 0x26, 0x0a, 0x03, 0x53, 0x65, 0x74, 0x12, 0x0b, 0x2e, 0x70, 190 | 0x62, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x12, 0x2e, 0x70, 0x62, 0x2e, 0x52, 191 | 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x46, 0x6f, 0x72, 0x47, 0x65, 0x74, 0x12, 0x2c, 0x0a, 192 | 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x0b, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x71, 193 | 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 194 | 0x73, 0x65, 0x46, 0x6f, 0x72, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x04, 0x5a, 0x02, 0x2e, 195 | 0x2f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 196 | }) 197 | 198 | var ( 199 | file_pb_kama_proto_rawDescOnce sync.Once 200 | file_pb_kama_proto_rawDescData []byte 201 | ) 202 | 203 | func file_pb_kama_proto_rawDescGZIP() []byte { 204 | file_pb_kama_proto_rawDescOnce.Do(func() { 205 | file_pb_kama_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_pb_kama_proto_rawDesc), len(file_pb_kama_proto_rawDesc))) 206 | }) 207 | return file_pb_kama_proto_rawDescData 208 | } 209 | 210 | var file_pb_kama_proto_msgTypes = make([]protoimpl.MessageInfo, 3) 211 | var file_pb_kama_proto_goTypes = []any{ 212 | (*Request)(nil), // 0: pb.Request 213 | (*ResponseForGet)(nil), // 1: pb.ResponseForGet 214 | (*ResponseForDelete)(nil), // 2: pb.ResponseForDelete 215 | } 216 | var file_pb_kama_proto_depIdxs = []int32{ 217 | 0, // 0: pb.KamaCache.Get:input_type -> pb.Request 218 | 0, // 1: pb.KamaCache.Set:input_type -> pb.Request 219 | 0, // 2: pb.KamaCache.Delete:input_type -> pb.Request 220 | 1, // 3: pb.KamaCache.Get:output_type -> pb.ResponseForGet 221 | 1, // 4: pb.KamaCache.Set:output_type -> pb.ResponseForGet 222 | 2, // 5: pb.KamaCache.Delete:output_type -> pb.ResponseForDelete 223 | 3, // [3:6] is the sub-list for method output_type 224 | 0, // [0:3] is the sub-list for method input_type 225 | 0, // [0:0] is the sub-list for extension type_name 226 | 0, // [0:0] is the sub-list for extension extendee 227 | 0, // [0:0] is the sub-list for field type_name 228 | } 229 | 230 | func init() { file_pb_kama_proto_init() } 231 | func file_pb_kama_proto_init() { 232 | if File_pb_kama_proto != nil { 233 | return 234 | } 235 | type x struct{} 236 | out := protoimpl.TypeBuilder{ 237 | File: protoimpl.DescBuilder{ 238 | GoPackagePath: reflect.TypeOf(x{}).PkgPath(), 239 | RawDescriptor: unsafe.Slice(unsafe.StringData(file_pb_kama_proto_rawDesc), len(file_pb_kama_proto_rawDesc)), 240 | NumEnums: 0, 241 | NumMessages: 3, 242 | NumExtensions: 0, 243 | NumServices: 1, 244 | }, 245 | GoTypes: file_pb_kama_proto_goTypes, 246 | DependencyIndexes: file_pb_kama_proto_depIdxs, 247 | MessageInfos: file_pb_kama_proto_msgTypes, 248 | }.Build() 249 | File_pb_kama_proto = out.File 250 | file_pb_kama_proto_goTypes = nil 251 | file_pb_kama_proto_depIdxs = nil 252 | } 253 | -------------------------------------------------------------------------------- /pb/kama.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | package pb; 3 | 4 | option go_package = "./"; 5 | 6 | message Request { 7 | string group = 1; 8 | string key = 2; 9 | bytes value = 3; 10 | } 11 | 12 | message ResponseForGet { 13 | bytes value = 1; 14 | } 15 | 16 | message ResponseForDelete { 17 | bool value = 1; 18 | } 19 | 20 | service KamaCache { 21 | rpc Get(Request) returns (ResponseForGet); 22 | rpc Set(Request) returns (ResponseForGet); 23 | rpc Delete(Request) returns(ResponseForDelete); 24 | } -------------------------------------------------------------------------------- /pb/kama_grpc.pb.go: -------------------------------------------------------------------------------- 1 | // Code generated by protoc-gen-go-grpc. DO NOT EDIT. 2 | // versions: 3 | // - protoc-gen-go-grpc v1.5.1 4 | // - protoc v5.29.3 5 | // source: pb/kama.proto 6 | 7 | package __ 8 | 9 | import ( 10 | context "context" 11 | grpc "google.golang.org/grpc" 12 | codes "google.golang.org/grpc/codes" 13 | status "google.golang.org/grpc/status" 14 | ) 15 | 16 | // This is a compile-time assertion to ensure that this generated file 17 | // is compatible with the grpc package it is being compiled against. 18 | // Requires gRPC-Go v1.64.0 or later. 19 | const _ = grpc.SupportPackageIsVersion9 20 | 21 | const ( 22 | KamaCache_Get_FullMethodName = "/pb.KamaCache/Get" 23 | KamaCache_Set_FullMethodName = "/pb.KamaCache/Set" 24 | KamaCache_Delete_FullMethodName = "/pb.KamaCache/Delete" 25 | ) 26 | 27 | // KamaCacheClient is the client API for KamaCache service. 28 | // 29 | // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. 30 | type KamaCacheClient interface { 31 | Get(ctx context.Context, in *Request, opts ...grpc.CallOption) (*ResponseForGet, error) 32 | Set(ctx context.Context, in *Request, opts ...grpc.CallOption) (*ResponseForGet, error) 33 | Delete(ctx context.Context, in *Request, opts ...grpc.CallOption) (*ResponseForDelete, error) 34 | } 35 | 36 | type kamaCacheClient struct { 37 | cc grpc.ClientConnInterface 38 | } 39 | 40 | func NewKamaCacheClient(cc grpc.ClientConnInterface) KamaCacheClient { 41 | return &kamaCacheClient{cc} 42 | } 43 | 44 | func (c *kamaCacheClient) Get(ctx context.Context, in *Request, opts ...grpc.CallOption) (*ResponseForGet, error) { 45 | cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) 46 | out := new(ResponseForGet) 47 | err := c.cc.Invoke(ctx, KamaCache_Get_FullMethodName, in, out, cOpts...) 48 | if err != nil { 49 | return nil, err 50 | } 51 | return out, nil 52 | } 53 | 54 | func (c *kamaCacheClient) Set(ctx context.Context, in *Request, opts ...grpc.CallOption) (*ResponseForGet, error) { 55 | cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) 56 | out := new(ResponseForGet) 57 | err := c.cc.Invoke(ctx, KamaCache_Set_FullMethodName, in, out, cOpts...) 58 | if err != nil { 59 | return nil, err 60 | } 61 | return out, nil 62 | } 63 | 64 | func (c *kamaCacheClient) Delete(ctx context.Context, in *Request, opts ...grpc.CallOption) (*ResponseForDelete, error) { 65 | cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) 66 | out := new(ResponseForDelete) 67 | err := c.cc.Invoke(ctx, KamaCache_Delete_FullMethodName, in, out, cOpts...) 68 | if err != nil { 69 | return nil, err 70 | } 71 | return out, nil 72 | } 73 | 74 | // KamaCacheServer is the server API for KamaCache service. 75 | // All implementations must embed UnimplementedKamaCacheServer 76 | // for forward compatibility. 77 | type KamaCacheServer interface { 78 | Get(context.Context, *Request) (*ResponseForGet, error) 79 | Set(context.Context, *Request) (*ResponseForGet, error) 80 | Delete(context.Context, *Request) (*ResponseForDelete, error) 81 | mustEmbedUnimplementedKamaCacheServer() 82 | } 83 | 84 | // UnimplementedKamaCacheServer must be embedded to have 85 | // forward compatible implementations. 86 | // 87 | // NOTE: this should be embedded by value instead of pointer to avoid a nil 88 | // pointer dereference when methods are called. 89 | type UnimplementedKamaCacheServer struct{} 90 | 91 | func (UnimplementedKamaCacheServer) Get(context.Context, *Request) (*ResponseForGet, error) { 92 | return nil, status.Errorf(codes.Unimplemented, "method Get not implemented") 93 | } 94 | func (UnimplementedKamaCacheServer) Set(context.Context, *Request) (*ResponseForGet, error) { 95 | return nil, status.Errorf(codes.Unimplemented, "method Set not implemented") 96 | } 97 | func (UnimplementedKamaCacheServer) Delete(context.Context, *Request) (*ResponseForDelete, error) { 98 | return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") 99 | } 100 | func (UnimplementedKamaCacheServer) mustEmbedUnimplementedKamaCacheServer() {} 101 | func (UnimplementedKamaCacheServer) testEmbeddedByValue() {} 102 | 103 | // UnsafeKamaCacheServer may be embedded to opt out of forward compatibility for this service. 104 | // Use of this interface is not recommended, as added methods to KamaCacheServer will 105 | // result in compilation errors. 106 | type UnsafeKamaCacheServer interface { 107 | mustEmbedUnimplementedKamaCacheServer() 108 | } 109 | 110 | func RegisterKamaCacheServer(s grpc.ServiceRegistrar, srv KamaCacheServer) { 111 | // If the following call pancis, it indicates UnimplementedKamaCacheServer was 112 | // embedded by pointer and is nil. This will cause panics if an 113 | // unimplemented method is ever invoked, so we test this at initialization 114 | // time to prevent it from happening at runtime later due to I/O. 115 | if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { 116 | t.testEmbeddedByValue() 117 | } 118 | s.RegisterService(&KamaCache_ServiceDesc, srv) 119 | } 120 | 121 | func _KamaCache_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { 122 | in := new(Request) 123 | if err := dec(in); err != nil { 124 | return nil, err 125 | } 126 | if interceptor == nil { 127 | return srv.(KamaCacheServer).Get(ctx, in) 128 | } 129 | info := &grpc.UnaryServerInfo{ 130 | Server: srv, 131 | FullMethod: KamaCache_Get_FullMethodName, 132 | } 133 | handler := func(ctx context.Context, req interface{}) (interface{}, error) { 134 | return srv.(KamaCacheServer).Get(ctx, req.(*Request)) 135 | } 136 | return interceptor(ctx, in, info, handler) 137 | } 138 | 139 | func _KamaCache_Set_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { 140 | in := new(Request) 141 | if err := dec(in); err != nil { 142 | return nil, err 143 | } 144 | if interceptor == nil { 145 | return srv.(KamaCacheServer).Set(ctx, in) 146 | } 147 | info := &grpc.UnaryServerInfo{ 148 | Server: srv, 149 | FullMethod: KamaCache_Set_FullMethodName, 150 | } 151 | handler := func(ctx context.Context, req interface{}) (interface{}, error) { 152 | return srv.(KamaCacheServer).Set(ctx, req.(*Request)) 153 | } 154 | return interceptor(ctx, in, info, handler) 155 | } 156 | 157 | func _KamaCache_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { 158 | in := new(Request) 159 | if err := dec(in); err != nil { 160 | return nil, err 161 | } 162 | if interceptor == nil { 163 | return srv.(KamaCacheServer).Delete(ctx, in) 164 | } 165 | info := &grpc.UnaryServerInfo{ 166 | Server: srv, 167 | FullMethod: KamaCache_Delete_FullMethodName, 168 | } 169 | handler := func(ctx context.Context, req interface{}) (interface{}, error) { 170 | return srv.(KamaCacheServer).Delete(ctx, req.(*Request)) 171 | } 172 | return interceptor(ctx, in, info, handler) 173 | } 174 | 175 | // KamaCache_ServiceDesc is the grpc.ServiceDesc for KamaCache service. 176 | // It's only intended for direct use with grpc.RegisterService, 177 | // and not to be introspected or modified (even as a copy) 178 | var KamaCache_ServiceDesc = grpc.ServiceDesc{ 179 | ServiceName: "pb.KamaCache", 180 | HandlerType: (*KamaCacheServer)(nil), 181 | Methods: []grpc.MethodDesc{ 182 | { 183 | MethodName: "Get", 184 | Handler: _KamaCache_Get_Handler, 185 | }, 186 | { 187 | MethodName: "Set", 188 | Handler: _KamaCache_Set_Handler, 189 | }, 190 | { 191 | MethodName: "Delete", 192 | Handler: _KamaCache_Delete_Handler, 193 | }, 194 | }, 195 | Streams: []grpc.StreamDesc{}, 196 | Metadata: "pb/kama.proto", 197 | } 198 | -------------------------------------------------------------------------------- /peers.go: -------------------------------------------------------------------------------- 1 | package kamacache 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "log" 7 | "strings" 8 | "sync" 9 | "time" 10 | 11 | "github.com/sirupsen/logrus" 12 | "github.com/youngyangyang04/KamaCache-Go/consistenthash" 13 | "github.com/youngyangyang04/KamaCache-Go/registry" 14 | clientv3 "go.etcd.io/etcd/client/v3" 15 | ) 16 | 17 | const defaultSvcName = "kama-cache" 18 | 19 | // PeerPicker 定义了peer选择器的接口 20 | type PeerPicker interface { 21 | PickPeer(key string) (peer Peer, ok bool, self bool) 22 | Close() error 23 | } 24 | 25 | // Peer 定义了缓存节点的接口 26 | type Peer interface { 27 | Get(group string, key string) ([]byte, error) 28 | Set(ctx context.Context, group string, key string, value []byte) error 29 | Delete(group string, key string) (bool, error) 30 | Close() error 31 | } 32 | 33 | // ClientPicker 实现了PeerPicker接口 34 | type ClientPicker struct { 35 | selfAddr string 36 | svcName string 37 | mu sync.RWMutex 38 | consHash *consistenthash.Map 39 | clients map[string]*Client 40 | etcdCli *clientv3.Client 41 | ctx context.Context 42 | cancel context.CancelFunc 43 | } 44 | 45 | // PickerOption 定义配置选项 46 | type PickerOption func(*ClientPicker) 47 | 48 | // WithServiceName 设置服务名称 49 | func WithServiceName(name string) PickerOption { 50 | return func(p *ClientPicker) { 51 | p.svcName = name 52 | } 53 | } 54 | 55 | // PrintPeers 打印当前已发现的节点(仅用于调试) 56 | func (p *ClientPicker) PrintPeers() { 57 | p.mu.RLock() 58 | defer p.mu.RUnlock() 59 | 60 | log.Printf("当前已发现的节点:") 61 | for addr := range p.clients { 62 | log.Printf("- %s", addr) 63 | } 64 | } 65 | 66 | // NewClientPicker 创建新的ClientPicker实例 67 | func NewClientPicker(addr string, opts ...PickerOption) (*ClientPicker, error) { 68 | ctx, cancel := context.WithCancel(context.Background()) 69 | picker := &ClientPicker{ 70 | selfAddr: addr, 71 | svcName: defaultSvcName, 72 | clients: make(map[string]*Client), 73 | consHash: consistenthash.New(), 74 | ctx: ctx, 75 | cancel: cancel, 76 | } 77 | 78 | for _, opt := range opts { 79 | opt(picker) 80 | } 81 | 82 | cli, err := clientv3.New(clientv3.Config{ 83 | Endpoints: registry.DefaultConfig.Endpoints, 84 | DialTimeout: registry.DefaultConfig.DialTimeout, 85 | }) 86 | if err != nil { 87 | cancel() 88 | return nil, fmt.Errorf("failed to create etcd client: %v", err) 89 | } 90 | picker.etcdCli = cli 91 | 92 | // 启动服务发现 93 | if err := picker.startServiceDiscovery(); err != nil { 94 | cancel() 95 | cli.Close() 96 | return nil, err 97 | } 98 | 99 | return picker, nil 100 | } 101 | 102 | // startServiceDiscovery 启动服务发现 103 | func (p *ClientPicker) startServiceDiscovery() error { 104 | // 先进行全量更新 105 | if err := p.fetchAllServices(); err != nil { 106 | return err 107 | } 108 | 109 | // 启动增量更新 110 | go p.watchServiceChanges() 111 | return nil 112 | } 113 | 114 | // watchServiceChanges 监听服务实例变化 115 | func (p *ClientPicker) watchServiceChanges() { 116 | watcher := clientv3.NewWatcher(p.etcdCli) 117 | watchChan := watcher.Watch(p.ctx, "/services/"+p.svcName, clientv3.WithPrefix()) 118 | 119 | for { 120 | select { 121 | case <-p.ctx.Done(): 122 | watcher.Close() 123 | return 124 | case resp := <-watchChan: 125 | p.handleWatchEvents(resp.Events) 126 | } 127 | } 128 | } 129 | 130 | // handleWatchEvents 处理监听到的事件 131 | func (p *ClientPicker) handleWatchEvents(events []*clientv3.Event) { 132 | p.mu.Lock() 133 | defer p.mu.Unlock() 134 | 135 | for _, event := range events { 136 | addr := string(event.Kv.Value) 137 | if addr == p.selfAddr { 138 | continue 139 | } 140 | 141 | switch event.Type { 142 | case clientv3.EventTypePut: 143 | if _, exists := p.clients[addr]; !exists { 144 | p.set(addr) 145 | logrus.Infof("New service discovered at %s", addr) 146 | } 147 | case clientv3.EventTypeDelete: 148 | if client, exists := p.clients[addr]; exists { 149 | client.Close() 150 | p.remove(addr) 151 | logrus.Infof("Service removed at %s", addr) 152 | } 153 | } 154 | } 155 | } 156 | 157 | // fetchAllServices 获取所有服务实例 158 | func (p *ClientPicker) fetchAllServices() error { 159 | ctx, cancel := context.WithTimeout(p.ctx, 3*time.Second) 160 | defer cancel() 161 | 162 | resp, err := p.etcdCli.Get(ctx, "/services/"+p.svcName, clientv3.WithPrefix()) 163 | if err != nil { 164 | return fmt.Errorf("failed to get all services: %v", err) 165 | } 166 | 167 | p.mu.Lock() 168 | defer p.mu.Unlock() 169 | 170 | for _, kv := range resp.Kvs { 171 | addr := string(kv.Value) 172 | if addr != "" && addr != p.selfAddr { 173 | p.set(addr) 174 | logrus.Infof("Discovered service at %s", addr) 175 | } 176 | } 177 | return nil 178 | } 179 | 180 | // set 添加服务实例 181 | func (p *ClientPicker) set(addr string) { 182 | if client, err := NewClient(addr, p.svcName, p.etcdCli); err == nil { 183 | p.consHash.Add(addr) 184 | p.clients[addr] = client 185 | logrus.Infof("Successfully created client for %s", addr) 186 | } else { 187 | logrus.Errorf("Failed to create client for %s: %v", addr, err) 188 | } 189 | } 190 | 191 | // remove 移除服务实例 192 | func (p *ClientPicker) remove(addr string) { 193 | p.consHash.Remove(addr) 194 | delete(p.clients, addr) 195 | } 196 | 197 | // PickPeer 选择peer节点 198 | func (p *ClientPicker) PickPeer(key string) (Peer, bool, bool) { 199 | p.mu.RLock() 200 | defer p.mu.RUnlock() 201 | 202 | if addr := p.consHash.Get(key); addr != "" { 203 | if client, ok := p.clients[addr]; ok { 204 | return client, true, addr == p.selfAddr 205 | } 206 | } 207 | return nil, false, false 208 | } 209 | 210 | // Close 关闭所有资源 211 | func (p *ClientPicker) Close() error { 212 | p.cancel() 213 | p.mu.Lock() 214 | defer p.mu.Unlock() 215 | 216 | var errs []error 217 | for addr, client := range p.clients { 218 | if err := client.Close(); err != nil { 219 | errs = append(errs, fmt.Errorf("failed to close client %s: %v", addr, err)) 220 | } 221 | } 222 | 223 | if err := p.etcdCli.Close(); err != nil { 224 | errs = append(errs, fmt.Errorf("failed to close etcd client: %v", err)) 225 | } 226 | 227 | if len(errs) > 0 { 228 | return fmt.Errorf("errors while closing: %v", errs) 229 | } 230 | return nil 231 | } 232 | 233 | // parseAddrFromKey 从etcd key中解析地址 234 | func parseAddrFromKey(key, svcName string) string { 235 | prefix := fmt.Sprintf("/services/%s/", svcName) 236 | if strings.HasPrefix(key, prefix) { 237 | return strings.TrimPrefix(key, prefix) 238 | } 239 | return "" 240 | } 241 | -------------------------------------------------------------------------------- /registry/register.go: -------------------------------------------------------------------------------- 1 | package registry 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "net" 7 | "time" 8 | 9 | "github.com/sirupsen/logrus" 10 | clientv3 "go.etcd.io/etcd/client/v3" 11 | ) 12 | 13 | // Config 定义etcd客户端配置 14 | type Config struct { 15 | Endpoints []string // 集群地址 16 | DialTimeout time.Duration // 连接超时时间 17 | } 18 | 19 | // DefaultConfig 提供默认配置 20 | var DefaultConfig = &Config{ 21 | Endpoints: []string{"localhost:2379"}, 22 | DialTimeout: 5 * time.Second, 23 | } 24 | 25 | // Register 注册服务到etcd 26 | func Register(svcName, addr string, stopCh <-chan error) error { 27 | cli, err := clientv3.New(clientv3.Config{ 28 | Endpoints: DefaultConfig.Endpoints, 29 | DialTimeout: DefaultConfig.DialTimeout, 30 | }) 31 | if err != nil { 32 | return fmt.Errorf("failed to create etcd client: %v", err) 33 | } 34 | 35 | localIP, err := getLocalIP() 36 | if err != nil { 37 | cli.Close() 38 | return fmt.Errorf("failed to get local IP: %v", err) 39 | } 40 | if addr[0] == ':' { 41 | addr = fmt.Sprintf("%s%s", localIP, addr) 42 | } 43 | 44 | // 创建租约 45 | lease, err := cli.Grant(context.Background(), 10) // 增加租约时间到10秒 46 | if err != nil { 47 | cli.Close() 48 | return fmt.Errorf("failed to create lease: %v", err) 49 | } 50 | 51 | // 注册服务,使用完整的key路径 52 | key := fmt.Sprintf("/services/%s/%s", svcName, addr) 53 | _, err = cli.Put(context.Background(), key, addr, clientv3.WithLease(lease.ID)) 54 | if err != nil { 55 | cli.Close() 56 | return fmt.Errorf("failed to put key-value to etcd: %v", err) 57 | } 58 | 59 | // 保持租约 60 | keepAliveCh, err := cli.KeepAlive(context.Background(), lease.ID) 61 | if err != nil { 62 | cli.Close() 63 | return fmt.Errorf("failed to keep lease alive: %v", err) 64 | } 65 | 66 | // 处理租约续期和服务注销 67 | go func() { 68 | defer cli.Close() 69 | for { 70 | select { 71 | case <-stopCh: 72 | // 服务注销,撤销租约 73 | ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) 74 | cli.Revoke(ctx, lease.ID) 75 | cancel() 76 | return 77 | case resp, ok := <-keepAliveCh: 78 | if !ok { 79 | logrus.Warn("keep alive channel closed") 80 | return 81 | } 82 | logrus.Debugf("successfully renewed lease: %d", resp.ID) 83 | } 84 | } 85 | }() 86 | 87 | logrus.Infof("Service registered: %s at %s", svcName, addr) 88 | return nil 89 | } 90 | 91 | func getLocalIP() (string, error) { 92 | addrs, err := net.InterfaceAddrs() 93 | if err != nil { 94 | return "", err 95 | } 96 | 97 | for _, addr := range addrs { 98 | if ipNet, ok := addr.(*net.IPNet); ok && !ipNet.IP.IsLoopback() { 99 | if ipNet.IP.To4() != nil { 100 | return ipNet.IP.String(), nil 101 | } 102 | } 103 | } 104 | 105 | return "", fmt.Errorf("no valid local IP found") 106 | } 107 | -------------------------------------------------------------------------------- /server.go: -------------------------------------------------------------------------------- 1 | package kamacache 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "net" 7 | "sync" 8 | "time" 9 | 10 | "crypto/tls" 11 | 12 | "github.com/sirupsen/logrus" 13 | pb "github.com/youngyangyang04/KamaCache-Go/pb" 14 | "github.com/youngyangyang04/KamaCache-Go/registry" 15 | clientv3 "go.etcd.io/etcd/client/v3" 16 | "google.golang.org/grpc" 17 | "google.golang.org/grpc/credentials" 18 | "google.golang.org/grpc/health" 19 | healthpb "google.golang.org/grpc/health/grpc_health_v1" 20 | ) 21 | 22 | // Server 定义缓存服务器 23 | type Server struct { 24 | pb.UnimplementedKamaCacheServer 25 | addr string // 服务地址 26 | svcName string // 服务名称 27 | groups *sync.Map // 缓存组 28 | grpcServer *grpc.Server // gRPC服务器 29 | etcdCli *clientv3.Client // etcd客户端 30 | stopCh chan error // 停止信号 31 | opts *ServerOptions // 服务器选项 32 | } 33 | 34 | // ServerOptions 服务器配置选项 35 | type ServerOptions struct { 36 | EtcdEndpoints []string // etcd端点 37 | DialTimeout time.Duration // 连接超时 38 | MaxMsgSize int // 最大消息大小 39 | TLS bool // 是否启用TLS 40 | CertFile string // 证书文件 41 | KeyFile string // 密钥文件 42 | } 43 | 44 | // DefaultServerOptions 默认配置 45 | var DefaultServerOptions = &ServerOptions{ 46 | EtcdEndpoints: []string{"localhost:2379"}, 47 | DialTimeout: 5 * time.Second, 48 | MaxMsgSize: 4 << 20, // 4MB 49 | } 50 | 51 | // ServerOption 定义选项函数类型 52 | type ServerOption func(*ServerOptions) 53 | 54 | // WithEtcdEndpoints 设置etcd端点 55 | func WithEtcdEndpoints(endpoints []string) ServerOption { 56 | return func(o *ServerOptions) { 57 | o.EtcdEndpoints = endpoints 58 | } 59 | } 60 | 61 | // WithDialTimeout 设置连接超时 62 | func WithDialTimeout(timeout time.Duration) ServerOption { 63 | return func(o *ServerOptions) { 64 | o.DialTimeout = timeout 65 | } 66 | } 67 | 68 | // WithTLS 设置TLS配置 69 | func WithTLS(certFile, keyFile string) ServerOption { 70 | return func(o *ServerOptions) { 71 | o.TLS = true 72 | o.CertFile = certFile 73 | o.KeyFile = keyFile 74 | } 75 | } 76 | 77 | // NewServer 创建新的服务器实例 78 | func NewServer(addr, svcName string, opts ...ServerOption) (*Server, error) { 79 | options := DefaultServerOptions 80 | for _, opt := range opts { 81 | opt(options) 82 | } 83 | 84 | // 创建etcd客户端 85 | etcdCli, err := clientv3.New(clientv3.Config{ 86 | Endpoints: options.EtcdEndpoints, 87 | DialTimeout: options.DialTimeout, 88 | }) 89 | if err != nil { 90 | return nil, fmt.Errorf("failed to create etcd client: %v", err) 91 | } 92 | 93 | // 创建gRPC服务器 94 | var serverOpts []grpc.ServerOption 95 | serverOpts = append(serverOpts, grpc.MaxRecvMsgSize(options.MaxMsgSize)) 96 | 97 | if options.TLS { 98 | creds, err := loadTLSCredentials(options.CertFile, options.KeyFile) 99 | if err != nil { 100 | return nil, fmt.Errorf("failed to load TLS credentials: %v", err) 101 | } 102 | serverOpts = append(serverOpts, grpc.Creds(creds)) 103 | } 104 | 105 | srv := &Server{ 106 | addr: addr, 107 | svcName: svcName, 108 | groups: &sync.Map{}, 109 | grpcServer: grpc.NewServer(serverOpts...), 110 | etcdCli: etcdCli, 111 | stopCh: make(chan error), 112 | opts: options, 113 | } 114 | 115 | // 注册服务 116 | pb.RegisterKamaCacheServer(srv.grpcServer, srv) 117 | 118 | // 注册健康检查服务 119 | healthServer := health.NewServer() 120 | healthpb.RegisterHealthServer(srv.grpcServer, healthServer) 121 | healthServer.SetServingStatus(svcName, healthpb.HealthCheckResponse_SERVING) 122 | 123 | return srv, nil 124 | } 125 | 126 | // Start 启动服务器 127 | func (s *Server) Start() error { 128 | // 启动gRPC服务器 129 | lis, err := net.Listen("tcp", s.addr) 130 | if err != nil { 131 | return fmt.Errorf("failed to listen: %v", err) 132 | } 133 | 134 | // 注册到etcd 135 | stopCh := make(chan error) 136 | go func() { 137 | if err := registry.Register(s.svcName, s.addr, stopCh); err != nil { 138 | logrus.Errorf("failed to register service: %v", err) 139 | close(stopCh) 140 | return 141 | } 142 | }() 143 | 144 | logrus.Infof("Server starting at %s", s.addr) 145 | return s.grpcServer.Serve(lis) 146 | } 147 | 148 | // Stop 停止服务器 149 | func (s *Server) Stop() { 150 | close(s.stopCh) 151 | s.grpcServer.GracefulStop() 152 | if s.etcdCli != nil { 153 | s.etcdCli.Close() 154 | } 155 | } 156 | 157 | // Get 实现Cache服务的Get方法 158 | func (s *Server) Get(ctx context.Context, req *pb.Request) (*pb.ResponseForGet, error) { 159 | group := GetGroup(req.Group) 160 | if group == nil { 161 | return nil, fmt.Errorf("group %s not found", req.Group) 162 | } 163 | 164 | view, err := group.Get(ctx, req.Key) 165 | if err != nil { 166 | return nil, err 167 | } 168 | 169 | return &pb.ResponseForGet{Value: view.ByteSLice()}, nil 170 | } 171 | 172 | // Set 实现Cache服务的Set方法 173 | func (s *Server) Set(ctx context.Context, req *pb.Request) (*pb.ResponseForGet, error) { 174 | group := GetGroup(req.Group) 175 | if group == nil { 176 | return nil, fmt.Errorf("group %s not found", req.Group) 177 | } 178 | 179 | // 从 context 中获取标记,如果没有则创建新的 context 180 | fromPeer := ctx.Value("from_peer") 181 | if fromPeer == nil { 182 | ctx = context.WithValue(ctx, "from_peer", true) 183 | } 184 | 185 | if err := group.Set(ctx, req.Key, req.Value); err != nil { 186 | return nil, err 187 | } 188 | 189 | return &pb.ResponseForGet{Value: req.Value}, nil 190 | } 191 | 192 | // Delete 实现Cache服务的Delete方法 193 | func (s *Server) Delete(ctx context.Context, req *pb.Request) (*pb.ResponseForDelete, error) { 194 | group := GetGroup(req.Group) 195 | if group == nil { 196 | return nil, fmt.Errorf("group %s not found", req.Group) 197 | } 198 | 199 | err := group.Delete(ctx, req.Key) 200 | return &pb.ResponseForDelete{Value: err == nil}, err 201 | } 202 | 203 | // loadTLSCredentials 加载TLS证书 204 | func loadTLSCredentials(certFile, keyFile string) (credentials.TransportCredentials, error) { 205 | cert, err := tls.LoadX509KeyPair(certFile, keyFile) 206 | if err != nil { 207 | return nil, err 208 | } 209 | return credentials.NewTLS(&tls.Config{ 210 | Certificates: []tls.Certificate{cert}, 211 | }), nil 212 | } 213 | -------------------------------------------------------------------------------- /singleflight/singleflight.go: -------------------------------------------------------------------------------- 1 | package singleflight 2 | 3 | import ( 4 | "sync" 5 | ) 6 | 7 | // 代表正在进行或已结束的请求 8 | type call struct { 9 | wg sync.WaitGroup 10 | val interface{} 11 | err error 12 | } 13 | 14 | // Group manages all kinds of calls 15 | type Group struct { 16 | m sync.Map // 使用sync.Map来优化并发性能 17 | } 18 | 19 | // Do 针对相同的key,保证多次调用Do(),都只会调用一次fn 20 | func (g *Group) Do(key string, fn func() (interface{}, error)) (interface{}, error) { 21 | // Check if there is already an ongoing call for this key 22 | if existing, ok := g.m.Load(key); ok { 23 | c := existing.(*call) 24 | c.wg.Wait() // Wait for the existing request to finish 25 | return c.val, c.err // Return the result from the ongoing call 26 | } 27 | 28 | // If no ongoing request, create a new one 29 | c := &call{} 30 | c.wg.Add(1) 31 | g.m.Store(key, c) // Store the call in the map 32 | 33 | // Execute the function and set the result 34 | c.val, c.err = fn() 35 | c.wg.Done() // Mark the request as done 36 | 37 | // After the request is done, clean up the map 38 | g.m.Delete(key) 39 | 40 | return c.val, c.err 41 | } 42 | -------------------------------------------------------------------------------- /store/lru.go: -------------------------------------------------------------------------------- 1 | package store 2 | 3 | import ( 4 | "container/list" 5 | "sync" 6 | "time" 7 | ) 8 | 9 | // lruCache 是基于标准库 list 的 LRU 缓存实现 10 | type lruCache struct { 11 | mu sync.RWMutex 12 | list *list.List // 双向链表,用于维护 LRU 顺序 13 | items map[string]*list.Element // 键到链表节点的映射 14 | expires map[string]time.Time // 过期时间映射 15 | maxBytes int64 // 最大允许字节数 16 | usedBytes int64 // 当前使用的字节数 17 | onEvicted func(key string, value Value) 18 | cleanupInterval time.Duration 19 | cleanupTicker *time.Ticker 20 | closeCh chan struct{} // 用于优雅关闭清理协程 21 | } 22 | 23 | // lruEntry 表示缓存中的一个条目 24 | type lruEntry struct { 25 | key string 26 | value Value 27 | } 28 | 29 | // newLRUCache 创建一个新的 LRU 缓存实例 30 | func newLRUCache(opts Options) *lruCache { 31 | // 设置默认清理间隔 32 | cleanupInterval := opts.CleanupInterval 33 | if cleanupInterval <= 0 { 34 | cleanupInterval = time.Minute 35 | } 36 | 37 | c := &lruCache{ 38 | list: list.New(), 39 | items: make(map[string]*list.Element), 40 | expires: make(map[string]time.Time), 41 | maxBytes: opts.MaxBytes, 42 | onEvicted: opts.OnEvicted, 43 | cleanupInterval: cleanupInterval, 44 | closeCh: make(chan struct{}), 45 | } 46 | 47 | // 启动定期清理协程 48 | c.cleanupTicker = time.NewTicker(c.cleanupInterval) 49 | go c.cleanupLoop() 50 | 51 | return c 52 | } 53 | 54 | // Get 获取缓存项,如果存在且未过期则返回 55 | func (c *lruCache) Get(key string) (Value, bool) { 56 | c.mu.RLock() 57 | elem, ok := c.items[key] 58 | if !ok { 59 | c.mu.RUnlock() 60 | return nil, false 61 | } 62 | 63 | // 检查是否过期 64 | if expTime, hasExp := c.expires[key]; hasExp && time.Now().After(expTime) { 65 | c.mu.RUnlock() 66 | 67 | // 异步删除过期项,避免在读锁内操作 68 | go c.Delete(key) 69 | 70 | return nil, false 71 | } 72 | 73 | // 获取值并释放读锁 74 | entry := elem.Value.(*lruEntry) 75 | value := entry.value 76 | c.mu.RUnlock() 77 | 78 | // 更新 LRU 位置需要写锁 79 | c.mu.Lock() 80 | // 再次检查元素是否仍然存在(可能在获取写锁期间被其他协程删除) 81 | if _, ok := c.items[key]; ok { 82 | c.list.MoveToBack(elem) 83 | } 84 | c.mu.Unlock() 85 | 86 | return value, true 87 | } 88 | 89 | // Set 添加或更新缓存项 90 | func (c *lruCache) Set(key string, value Value) error { 91 | return c.SetWithExpiration(key, value, 0) 92 | } 93 | 94 | // SetWithExpiration 添加或更新缓存项,并设置过期时间 95 | func (c *lruCache) SetWithExpiration(key string, value Value, expiration time.Duration) error { 96 | if value == nil { 97 | c.Delete(key) 98 | return nil 99 | } 100 | 101 | c.mu.Lock() 102 | defer c.mu.Unlock() 103 | 104 | // 计算过期时间 105 | var expTime time.Time 106 | if expiration > 0 { 107 | expTime = time.Now().Add(expiration) 108 | c.expires[key] = expTime 109 | } else { 110 | delete(c.expires, key) 111 | } 112 | 113 | // 如果键已存在,更新值 114 | if elem, ok := c.items[key]; ok { 115 | oldEntry := elem.Value.(*lruEntry) 116 | c.usedBytes += int64(value.Len() - oldEntry.value.Len()) 117 | oldEntry.value = value 118 | c.list.MoveToBack(elem) 119 | return nil 120 | } 121 | 122 | // 添加新项 123 | entry := &lruEntry{key: key, value: value} 124 | elem := c.list.PushBack(entry) 125 | c.items[key] = elem 126 | c.usedBytes += int64(len(key) + value.Len()) 127 | 128 | // 检查是否需要淘汰旧项 129 | c.evict() 130 | 131 | return nil 132 | } 133 | 134 | // Delete 从缓存中删除指定键的项 135 | func (c *lruCache) Delete(key string) bool { 136 | c.mu.Lock() 137 | defer c.mu.Unlock() 138 | 139 | if elem, ok := c.items[key]; ok { 140 | c.removeElement(elem) 141 | return true 142 | } 143 | return false 144 | } 145 | 146 | // Clear 清空缓存 147 | func (c *lruCache) Clear() { 148 | c.mu.Lock() 149 | defer c.mu.Unlock() 150 | 151 | // 如果设置了回调函数,遍历所有项调用回调 152 | if c.onEvicted != nil { 153 | for _, elem := range c.items { 154 | entry := elem.Value.(*lruEntry) 155 | c.onEvicted(entry.key, entry.value) 156 | } 157 | } 158 | 159 | c.list.Init() 160 | c.items = make(map[string]*list.Element) 161 | c.expires = make(map[string]time.Time) 162 | c.usedBytes = 0 163 | } 164 | 165 | // Len 返回缓存中的项数 166 | func (c *lruCache) Len() int { 167 | c.mu.RLock() 168 | defer c.mu.RUnlock() 169 | return c.list.Len() 170 | } 171 | 172 | // removeElement 从缓存中删除元素,调用此方法前必须持有锁 173 | func (c *lruCache) removeElement(elem *list.Element) { 174 | entry := elem.Value.(*lruEntry) 175 | c.list.Remove(elem) 176 | delete(c.items, entry.key) 177 | delete(c.expires, entry.key) 178 | c.usedBytes -= int64(len(entry.key) + entry.value.Len()) 179 | 180 | if c.onEvicted != nil { 181 | c.onEvicted(entry.key, entry.value) 182 | } 183 | } 184 | 185 | // evict 清理过期和超出内存限制的缓存,调用此方法前必须持有锁 186 | func (c *lruCache) evict() { 187 | // 先清理过期项 188 | now := time.Now() 189 | for key, expTime := range c.expires { 190 | if now.After(expTime) { 191 | if elem, ok := c.items[key]; ok { 192 | c.removeElement(elem) 193 | } 194 | } 195 | } 196 | 197 | // 再根据内存限制清理最久未使用的项 198 | for c.maxBytes > 0 && c.usedBytes > c.maxBytes && c.list.Len() > 0 { 199 | elem := c.list.Front() // 获取最久未使用的项(链表头部) 200 | if elem != nil { 201 | c.removeElement(elem) 202 | } 203 | } 204 | } 205 | 206 | // cleanupLoop 定期清理过期缓存的协程 207 | func (c *lruCache) cleanupLoop() { 208 | for { 209 | select { 210 | case <-c.cleanupTicker.C: 211 | c.mu.Lock() 212 | c.evict() 213 | c.mu.Unlock() 214 | case <-c.closeCh: 215 | return 216 | } 217 | } 218 | } 219 | 220 | // Close 关闭缓存,停止清理协程 221 | func (c *lruCache) Close() { 222 | if c.cleanupTicker != nil { 223 | c.cleanupTicker.Stop() 224 | close(c.closeCh) 225 | } 226 | } 227 | 228 | // GetWithExpiration 获取缓存项及其剩余过期时间 229 | func (c *lruCache) GetWithExpiration(key string) (Value, time.Duration, bool) { 230 | c.mu.RLock() 231 | defer c.mu.RUnlock() 232 | 233 | elem, ok := c.items[key] 234 | if !ok { 235 | return nil, 0, false 236 | } 237 | 238 | // 检查是否过期 239 | now := time.Now() 240 | if expTime, hasExp := c.expires[key]; hasExp { 241 | if now.After(expTime) { 242 | // 已过期 243 | return nil, 0, false 244 | } 245 | 246 | // 计算剩余过期时间 247 | ttl := expTime.Sub(now) 248 | c.list.MoveToBack(elem) 249 | return elem.Value.(*lruEntry).value, ttl, true 250 | } 251 | 252 | // 无过期时间 253 | c.list.MoveToBack(elem) 254 | return elem.Value.(*lruEntry).value, 0, true 255 | } 256 | 257 | // GetExpiration 获取键的过期时间 258 | func (c *lruCache) GetExpiration(key string) (time.Time, bool) { 259 | c.mu.RLock() 260 | defer c.mu.RUnlock() 261 | 262 | expTime, ok := c.expires[key] 263 | return expTime, ok 264 | } 265 | 266 | // UpdateExpiration 更新过期时间 267 | func (c *lruCache) UpdateExpiration(key string, expiration time.Duration) bool { 268 | c.mu.Lock() 269 | defer c.mu.Unlock() 270 | 271 | if _, ok := c.items[key]; !ok { 272 | return false 273 | } 274 | 275 | if expiration > 0 { 276 | c.expires[key] = time.Now().Add(expiration) 277 | } else { 278 | delete(c.expires, key) 279 | } 280 | 281 | return true 282 | } 283 | 284 | // UsedBytes 返回当前使用的字节数 285 | func (c *lruCache) UsedBytes() int64 { 286 | c.mu.RLock() 287 | defer c.mu.RUnlock() 288 | return c.usedBytes 289 | } 290 | 291 | // MaxBytes 返回最大允许字节数 292 | func (c *lruCache) MaxBytes() int64 { 293 | c.mu.RLock() 294 | defer c.mu.RUnlock() 295 | return c.maxBytes 296 | } 297 | 298 | // SetMaxBytes 设置最大允许字节数并触发淘汰 299 | func (c *lruCache) SetMaxBytes(maxBytes int64) { 300 | c.mu.Lock() 301 | defer c.mu.Unlock() 302 | 303 | c.maxBytes = maxBytes 304 | if maxBytes > 0 { 305 | c.evict() 306 | } 307 | } 308 | -------------------------------------------------------------------------------- /store/lru2.go: -------------------------------------------------------------------------------- 1 | package store 2 | 3 | import ( 4 | "fmt" 5 | "sync" 6 | "sync/atomic" 7 | "time" 8 | ) 9 | 10 | type lru2Store struct { 11 | locks []sync.Mutex 12 | caches [][2]*cache 13 | onEvicted func(key string, value Value) 14 | cleanupTick *time.Ticker 15 | mask int32 16 | } 17 | 18 | func newLRU2Cache(opts Options) *lru2Store { 19 | if opts.BucketCount == 0 { 20 | opts.BucketCount = 16 21 | } 22 | if opts.CapPerBucket == 0 { 23 | opts.CapPerBucket = 1024 24 | } 25 | if opts.Level2Cap == 0 { 26 | opts.Level2Cap = 1024 27 | } 28 | if opts.CleanupInterval <= 0 { 29 | opts.CleanupInterval = time.Minute 30 | } 31 | 32 | mask := maskOfNextPowOf2(opts.BucketCount) 33 | s := &lru2Store{ 34 | locks: make([]sync.Mutex, mask+1), 35 | caches: make([][2]*cache, mask+1), 36 | onEvicted: opts.OnEvicted, 37 | cleanupTick: time.NewTicker(opts.CleanupInterval), 38 | mask: int32(mask), 39 | } 40 | 41 | for i := range s.caches { 42 | s.caches[i][0] = Create(opts.CapPerBucket) 43 | s.caches[i][1] = Create(opts.Level2Cap) 44 | } 45 | 46 | if opts.CleanupInterval > 0 { 47 | go s.cleanupLoop() 48 | } 49 | 50 | return s 51 | } 52 | 53 | func (s *lru2Store) Get(key string) (Value, bool) { 54 | idx := hashBKRD(key) & s.mask 55 | s.locks[idx].Lock() 56 | defer s.locks[idx].Unlock() 57 | 58 | currentTime := Now() 59 | 60 | // 首先检查一级缓存 61 | n1, status1, expireAt := s.caches[idx][0].del(key) 62 | if status1 > 0 { 63 | // 从一级缓存找到项目 64 | if expireAt > 0 && currentTime >= expireAt { 65 | // 项目已过期,删除它 66 | s.delete(key, idx) 67 | fmt.Println("找到项目已过期,删除它") 68 | return nil, false 69 | } 70 | 71 | // 项目有效,将其移至二级缓存 72 | s.caches[idx][1].put(key, n1.v, expireAt, s.onEvicted) 73 | fmt.Println("项目有效,将其移至二级缓存") 74 | return n1.v, true 75 | } 76 | 77 | // 一级缓存未找到,检查二级缓存 78 | n2, status2 := s._get(key, idx, 1) 79 | if status2 > 0 && n2 != nil { 80 | if n2.expireAt > 0 && currentTime >= n2.expireAt { 81 | // 项目已过期,删除它 82 | s.delete(key, idx) 83 | fmt.Println("找到项目已过期,删除它") 84 | return nil, false 85 | } 86 | 87 | return n2.v, true 88 | } 89 | 90 | return nil, false 91 | } 92 | 93 | func (s *lru2Store) Set(key string, value Value) error { 94 | return s.SetWithExpiration(key, value, 9999999999999999) 95 | } 96 | 97 | func (s *lru2Store) SetWithExpiration(key string, value Value, expiration time.Duration) error { 98 | // 计算过期时间 - 确保单位一致 99 | expireAt := int64(0) 100 | if expiration > 0 { 101 | // now() 返回纳秒时间戳,确保 expiration 也是纳秒单位 102 | expireAt = Now() + int64(expiration.Nanoseconds()) 103 | } 104 | 105 | idx := hashBKRD(key) & s.mask 106 | s.locks[idx].Lock() 107 | defer s.locks[idx].Unlock() 108 | 109 | // 放入一级缓存 110 | s.caches[idx][0].put(key, value, expireAt, s.onEvicted) 111 | 112 | return nil 113 | } 114 | 115 | // Delete 实现Store接口 116 | func (s *lru2Store) Delete(key string) bool { 117 | idx := hashBKRD(key) & s.mask 118 | s.locks[idx].Lock() 119 | defer s.locks[idx].Unlock() 120 | 121 | return s.delete(key, idx) 122 | } 123 | 124 | // Clear 实现Store接口 125 | func (s *lru2Store) Clear() { 126 | var keys []string 127 | 128 | for i := range s.caches { 129 | s.locks[i].Lock() 130 | 131 | s.caches[i][0].walk(func(key string, value Value, expireAt int64) bool { 132 | keys = append(keys, key) 133 | return true 134 | }) 135 | s.caches[i][1].walk(func(key string, value Value, expireAt int64) bool { 136 | // 检查键是否已经收集(避免重复) 137 | for _, k := range keys { 138 | if key == k { 139 | return true 140 | } 141 | } 142 | keys = append(keys, key) 143 | return true 144 | }) 145 | 146 | s.locks[i].Unlock() 147 | } 148 | 149 | for _, key := range keys { 150 | s.Delete(key) 151 | } 152 | 153 | //s.expirations = sync.Map{} 154 | } 155 | 156 | // Len 实现Store接口 157 | func (s *lru2Store) Len() int { 158 | count := 0 159 | 160 | for i := range s.caches { 161 | s.locks[i].Lock() 162 | 163 | s.caches[i][0].walk(func(key string, value Value, expireAt int64) bool { 164 | count++ 165 | return true 166 | }) 167 | s.caches[i][1].walk(func(key string, value Value, expireAt int64) bool { 168 | count++ 169 | return true 170 | }) 171 | 172 | s.locks[i].Unlock() 173 | } 174 | 175 | return count 176 | } 177 | 178 | // Close 关闭缓存相关资源 179 | func (s *lru2Store) Close() { 180 | if s.cleanupTick != nil { 181 | s.cleanupTick.Stop() 182 | } 183 | } 184 | 185 | // 内部时钟,减少 time.Now() 调用造成的 GC 压力 186 | var clock, p, n = time.Now().UnixNano(), uint16(0), uint16(1) 187 | 188 | // 返回 clock 变量的当前值。atomic.LoadInt64 是原子操作,用于保证在多线程/协程环境中安全地读取 clock 变量的值 189 | func Now() int64 { return atomic.LoadInt64(&clock) } 190 | 191 | func init() { 192 | go func() { 193 | for { 194 | atomic.StoreInt64(&clock, time.Now().UnixNano()) // 每秒校准一次 195 | for i := 0; i < 9; i++ { 196 | time.Sleep(100 * time.Millisecond) 197 | atomic.AddInt64(&clock, int64(100*time.Millisecond)) // 保持 clock 在一个精确的时间范围内,同时避免频繁的系统调用 198 | } 199 | time.Sleep(100 * time.Millisecond) 200 | } 201 | }() 202 | } 203 | 204 | // 实现了 BKDR 哈希算法,用于计算键的哈希值 205 | func hashBKRD(s string) (hash int32) { 206 | for i := 0; i < len(s); i++ { 207 | hash = hash*131 + int32(s[i]) 208 | } 209 | 210 | return hash 211 | } 212 | 213 | // maskOfNextPowOf2 计算大于或等于输入值的最近 2 的幂次方减一作为掩码值 214 | func maskOfNextPowOf2(cap uint16) uint16 { 215 | if cap > 0 && cap&(cap-1) == 0 { 216 | return cap - 1 217 | } 218 | 219 | // 通过多次右移和按位或操作,将二进制中最高的 1 位右边的所有位都填充为 1 220 | cap |= cap >> 1 221 | cap |= cap >> 2 222 | cap |= cap >> 4 223 | 224 | return cap | (cap >> 8) 225 | } 226 | 227 | type node struct { 228 | k string 229 | v Value 230 | expireAt int64 // 过期时间戳,expireAt = 0 表示已删除 231 | } 232 | 233 | // 内部缓存核心实现,包含双向链表和节点存储 234 | type cache struct { 235 | // dlnk[0]是哨兵节点,记录链表头尾,dlnk[0][p]存储尾部索引,dlnk[0][n]存储头部索引 236 | dlnk [][2]uint16 // 双向链表,0 表示前驱,1 表示后继 237 | m []node // 预分配内存存储节点 238 | hmap map[string]uint16 // 键到节点索引的映射 239 | last uint16 // 最后一个节点元素的索引 240 | } 241 | 242 | func Create(cap uint16) *cache { 243 | return &cache{ 244 | dlnk: make([][2]uint16, cap+1), 245 | m: make([]node, cap), 246 | hmap: make(map[string]uint16, cap), 247 | last: 0, 248 | } 249 | } 250 | 251 | // 向缓存中添加项,如果是新增返回 1,更新返回 0 252 | func (c *cache) put(key string, val Value, expireAt int64, onEvicted func(string, Value)) int { 253 | if idx, ok := c.hmap[key]; ok { 254 | c.m[idx-1].v, c.m[idx-1].expireAt = val, expireAt 255 | c.adjust(idx, p, n) // 刷新到链表头部 256 | return 0 257 | } 258 | 259 | if c.last == uint16(cap(c.m)) { 260 | tail := &c.m[c.dlnk[0][p]-1] 261 | if onEvicted != nil && (*tail).expireAt > 0 { 262 | onEvicted((*tail).k, (*tail).v) 263 | } 264 | 265 | delete(c.hmap, (*tail).k) 266 | c.hmap[key], (*tail).k, (*tail).v, (*tail).expireAt = c.dlnk[0][p], key, val, expireAt 267 | c.adjust(c.dlnk[0][p], p, n) 268 | 269 | return 1 270 | } 271 | 272 | c.last++ 273 | if len(c.hmap) <= 0 { 274 | c.dlnk[0][p] = c.last 275 | } else { 276 | c.dlnk[c.dlnk[0][n]][p] = c.last 277 | } 278 | 279 | // 初始化新节点并更新链表指针 280 | c.m[c.last-1].k = key 281 | c.m[c.last-1].v = val 282 | c.m[c.last-1].expireAt = expireAt 283 | c.dlnk[c.last] = [2]uint16{0, c.dlnk[0][n]} 284 | c.hmap[key] = c.last 285 | c.dlnk[0][n] = c.last 286 | 287 | return 1 288 | } 289 | 290 | // 从缓存中获取键对应的节点和状态 291 | func (c *cache) get(key string) (*node, int) { 292 | if idx, ok := c.hmap[key]; ok { 293 | c.adjust(idx, p, n) 294 | return &c.m[idx-1], 1 295 | } 296 | return nil, 0 297 | } 298 | 299 | // 从缓存中删除键对应的项 300 | func (c *cache) del(key string) (*node, int, int64) { 301 | if idx, ok := c.hmap[key]; ok && c.m[idx-1].expireAt > 0 { 302 | e := c.m[idx-1].expireAt 303 | c.m[idx-1].expireAt = 0 // 标记为已删除 304 | c.adjust(idx, n, p) // 移动到链表尾部 305 | return &c.m[idx-1], 1, e 306 | } 307 | 308 | return nil, 0, 0 309 | } 310 | 311 | // 遍历缓存中的所有有效项 312 | func (c *cache) walk(walker func(key string, value Value, expireAt int64) bool) { 313 | for idx := c.dlnk[0][n]; idx != 0; idx = c.dlnk[idx][n] { 314 | if c.m[idx-1].expireAt > 0 && !walker(c.m[idx-1].k, c.m[idx-1].v, c.m[idx-1].expireAt) { 315 | return 316 | } 317 | } 318 | } 319 | 320 | // 调整节点在链表中的位置 321 | // 当 f=0, t=1 时,移动到链表头部;否则移动到链表尾部 322 | func (c *cache) adjust(idx, f, t uint16) { 323 | if c.dlnk[idx][f] != 0 { 324 | c.dlnk[c.dlnk[idx][t]][f] = c.dlnk[idx][f] 325 | c.dlnk[c.dlnk[idx][f]][t] = c.dlnk[idx][t] 326 | c.dlnk[idx][f] = 0 327 | c.dlnk[idx][t] = c.dlnk[0][t] 328 | c.dlnk[c.dlnk[0][t]][f] = idx 329 | c.dlnk[0][t] = idx 330 | } 331 | } 332 | 333 | func (s *lru2Store) _get(key string, idx, level int32) (*node, int) { 334 | if n, st := s.caches[idx][level].get(key); st > 0 && n != nil { 335 | currentTime := Now() 336 | if n.expireAt <= 0 || currentTime >= n.expireAt { 337 | // 过期或已删除 338 | return nil, 0 339 | } 340 | return n, st 341 | } 342 | 343 | return nil, 0 344 | } 345 | 346 | func (s *lru2Store) delete(key string, idx int32) bool { 347 | n1, s1, _ := s.caches[idx][0].del(key) 348 | n2, s2, _ := s.caches[idx][1].del(key) 349 | deleted := s1 > 0 || s2 > 0 350 | 351 | if deleted && s.onEvicted != nil { 352 | if n1 != nil && n1.v != nil { 353 | s.onEvicted(key, n1.v) 354 | } else if n2 != nil && n2.v != nil { 355 | s.onEvicted(key, n2.v) 356 | } 357 | } 358 | 359 | if deleted { 360 | //s.expirations.Delete(key) 361 | } 362 | 363 | return deleted 364 | } 365 | 366 | func (s *lru2Store) cleanupLoop() { 367 | for range s.cleanupTick.C { 368 | currentTime := Now() 369 | 370 | for i := range s.caches { 371 | s.locks[i].Lock() 372 | 373 | // 检查并清理过期项目 374 | var expiredKeys []string 375 | 376 | s.caches[i][0].walk(func(key string, value Value, expireAt int64) bool { 377 | if expireAt > 0 && currentTime >= expireAt { 378 | expiredKeys = append(expiredKeys, key) 379 | } 380 | return true 381 | }) 382 | 383 | s.caches[i][1].walk(func(key string, value Value, expireAt int64) bool { 384 | if expireAt > 0 && currentTime >= expireAt { 385 | for _, k := range expiredKeys { 386 | if key == k { 387 | // 避免重复 388 | return true 389 | } 390 | } 391 | expiredKeys = append(expiredKeys, key) 392 | } 393 | return true 394 | }) 395 | 396 | for _, key := range expiredKeys { 397 | s.delete(key, int32(i)) 398 | } 399 | 400 | s.locks[i].Unlock() 401 | } 402 | } 403 | } 404 | -------------------------------------------------------------------------------- /store/lru2_test.go: -------------------------------------------------------------------------------- 1 | package store 2 | 3 | import ( 4 | "fmt" 5 | "strconv" 6 | "sync" 7 | "testing" 8 | "time" 9 | ) 10 | 11 | // 为测试定义一个简单的Value类型 12 | type testValue string 13 | 14 | func (v testValue) Len() int { 15 | return len(v) 16 | } 17 | 18 | // 测试缓存基本操作 19 | func TestCacheBasic(t *testing.T) { 20 | t.Run("初始化缓存", func(t *testing.T) { 21 | c := Create(10) 22 | if c == nil { 23 | t.Fatal("创建缓存失败") 24 | } 25 | if c.last != 0 { 26 | t.Fatalf("初始last应为0,实际为%d", c.last) 27 | } 28 | if len(c.m) != 10 { 29 | t.Fatalf("缓存容量应为10,实际为%d", len(c.m)) 30 | } 31 | if len(c.dlnk) != 11 { 32 | t.Fatalf("链表长度应为cap+1(11),实际为%d", len(c.dlnk)) 33 | } 34 | }) 35 | 36 | t.Run("添加和获取", func(t *testing.T) { 37 | c := Create(5) 38 | var evictCount int 39 | onEvicted := func(key string, value Value) { 40 | evictCount++ 41 | } 42 | 43 | // 添加新项 44 | status := c.put("key1", testValue("value1"), 100, onEvicted) 45 | if status != 1 { 46 | t.Fatalf("添加新项应返回1,实际返回%d", status) 47 | } 48 | if c.last != 1 { 49 | t.Fatalf("添加一项后last应为1,实际为%d", c.last) 50 | } 51 | 52 | // 获取项 53 | node, status := c.get("key1") 54 | if status != 1 { 55 | t.Fatalf("获取存在项应返回1,实际返回%d", status) 56 | } 57 | if node == nil { 58 | t.Fatal("获取项返回了nil") 59 | } 60 | if node.k != "key1" || node.v.(testValue) != "value1" || node.expireAt != 100 { 61 | t.Fatalf("获取项值不一致: %+v", *node) 62 | } 63 | 64 | // 获取不存在的项 65 | node, status = c.get("不存在") 66 | if status != 0 { 67 | t.Fatalf("获取不存在项应返回0,实际返回%d", status) 68 | } 69 | if node != nil { 70 | t.Fatal("获取不存在项不应返回节点") 71 | } 72 | 73 | // 更新现有项 74 | status = c.put("key1", testValue("新值"), 200, onEvicted) 75 | if status != 0 { 76 | t.Fatalf("更新项应返回0,实际返回%d", status) 77 | } 78 | 79 | // 验证更新后的值 80 | node, _ = c.get("key1") 81 | if node.v.(testValue) != "新值" || node.expireAt != 200 { 82 | t.Fatalf("更新项后值不一致: %+v", *node) 83 | } 84 | }) 85 | 86 | t.Run("删除操作", func(t *testing.T) { 87 | c := Create(5) 88 | 89 | // 添加项 90 | c.put("key1", testValue("value1"), 100, nil) 91 | 92 | // 删除存在的项 93 | node, status, expireAt := c.del("key1") 94 | if status != 1 { 95 | t.Fatalf("删除存在项应返回1,实际返回%d", status) 96 | } 97 | if node == nil { 98 | t.Fatal("删除应返回被删除的节点") 99 | } 100 | if node.expireAt != 0 { 101 | t.Fatalf("删除后节点expireAt应为0,实际为%d", node.expireAt) 102 | } 103 | if expireAt != 100 { 104 | t.Fatalf("删除应返回原始expireAt(100),实际为%d", expireAt) 105 | } 106 | 107 | // 验证删除后无法获取 108 | node, status = c.get("key1") 109 | if status != 1 { 110 | t.Fatal("获取已删除项失败,但键仍应存在于哈希表中") 111 | } 112 | if node.expireAt != 0 { 113 | t.Fatalf("已删除项的expireAt应为0,实际为%d", node.expireAt) 114 | } 115 | 116 | // 删除不存在的项 117 | node, status, _ = c.del("不存在") 118 | if status != 0 { 119 | t.Fatalf("删除不存在项应返回0,实际返回%d", status) 120 | } 121 | if node != nil { 122 | t.Fatal("删除不存在项不应返回节点") 123 | } 124 | }) 125 | 126 | t.Run("容量和淘汰", func(t *testing.T) { 127 | c := Create(3) // 容量为3的缓存 128 | var evictedKeys []string 129 | 130 | onEvicted := func(key string, value Value) { 131 | evictedKeys = append(evictedKeys, key) 132 | } 133 | 134 | // 填满缓存 135 | for i := 1; i <= 3; i++ { 136 | c.put("key"+string(rune('0'+i)), testValue("value"+string(rune('0'+i))), 100, onEvicted) 137 | } 138 | 139 | // 再添加一项,应该淘汰最早的key1 140 | c.put("key4", testValue("value4"), 100, onEvicted) 141 | 142 | if len(evictedKeys) != 1 { 143 | t.Fatalf("应淘汰1项,实际淘汰%d项", len(evictedKeys)) 144 | } 145 | if evictedKeys[0] != "key1" { 146 | t.Fatalf("应淘汰key1,实际淘汰%s", evictedKeys[0]) 147 | } 148 | 149 | // 验证缓存状态 150 | _, status := c.get("key1") 151 | if status != 0 { 152 | t.Fatal("key1应已被淘汰") 153 | } 154 | 155 | for i := 2; i <= 4; i++ { 156 | node, status := c.get("key" + string(rune('0'+i))) 157 | if status != 1 || node == nil { 158 | t.Fatalf("key%d应存在于缓存中", i) 159 | } 160 | } 161 | }) 162 | 163 | t.Run("LRU顺序维护", func(t *testing.T) { 164 | c := Create(3) 165 | 166 | // 按顺序添加3项 167 | for i := 1; i <= 3; i++ { 168 | c.put("key"+string(rune('0'+i)), testValue("value"+string(rune('0'+i))), 100, nil) 169 | } 170 | 171 | // 访问顺序:key1 (最后访问),key2, key3 (最早访问) 172 | c.get("key2") 173 | c.get("key1") 174 | 175 | // 添加新项,应淘汰key3 176 | c.put("key4", testValue("value4"), 100, nil) 177 | 178 | // 验证key3被淘汰 179 | node, status := c.get("key3") 180 | if status != 0 || node != nil { 181 | t.Fatal("key3应已被淘汰") 182 | } 183 | 184 | // 其他键应该存在 185 | for i := 1; i <= 4; i++ { 186 | if i == 3 { 187 | continue 188 | } 189 | _, status := c.get("key" + string(rune('0'+i))) 190 | if status != 1 { 191 | t.Fatalf("key%d应存在于缓存中", i) 192 | } 193 | } 194 | }) 195 | 196 | t.Run("遍历缓存", func(t *testing.T) { 197 | c := Create(5) 198 | 199 | // 添加3项 200 | for i := 1; i <= 3; i++ { 201 | c.put("key"+string(rune('0'+i)), testValue("value"+string(rune('0'+i))), 100, nil) 202 | } 203 | 204 | // 遍历并收集所有键 205 | var keys []string 206 | c.walk(func(key string, value Value, expireAt int64) bool { 207 | keys = append(keys, key) 208 | return true 209 | }) 210 | 211 | // 应有3个键 212 | if len(keys) != 3 { 213 | t.Fatalf("应有3个键,实际有%d个", len(keys)) 214 | } 215 | 216 | // 键应该是反向添加顺序(因为新项是添加到链表头) 217 | expectedKeys := []string{"key3", "key2", "key1"} 218 | for i, key := range expectedKeys { 219 | if i >= len(keys) || keys[i] != key { 220 | t.Fatalf("第%d个键应为%s,实际为%s", i, key, keys[i]) 221 | } 222 | } 223 | 224 | // 测试提前终止遍历 225 | var earlyKeys []string 226 | c.walk(func(key string, value Value, expireAt int64) bool { 227 | earlyKeys = append(earlyKeys, key) 228 | return len(earlyKeys) < 2 // 只收集前2个键 229 | }) 230 | 231 | // 应只有2个键 232 | if len(earlyKeys) != 2 { 233 | t.Fatalf("应有2个键,实际有%d个", len(earlyKeys)) 234 | } 235 | }) 236 | } 237 | 238 | // 测试缓存容量限制和LRU替换策略 239 | func TestCacheLRUEviction(t *testing.T) { 240 | var evictedKeys []string 241 | onEvicted := func(key string, value Value) { 242 | evictedKeys = append(evictedKeys, key) 243 | } 244 | 245 | // 创建一个容量为3的缓存 246 | c := Create(3) 247 | 248 | // 添加3个项,不应该有淘汰 249 | c.put("key1", testValue("value1"), Now()+int64(time.Hour), onEvicted) 250 | c.put("key2", testValue("value2"), Now()+int64(time.Hour), onEvicted) 251 | c.put("key3", testValue("value3"), Now()+int64(time.Hour), onEvicted) 252 | 253 | if len(evictedKeys) != 0 { 254 | t.Errorf("Expected no evictions, got %v", evictedKeys) 255 | } 256 | 257 | // 访问key1使其成为最近使用的 258 | c.get("key1") 259 | 260 | // 添加第4个项,应该淘汰最少使用的key2 261 | c.put("key4", testValue("value4"), Now()+int64(time.Hour), onEvicted) 262 | 263 | if len(evictedKeys) != 1 || evictedKeys[0] != "key2" { 264 | t.Errorf("Expected key2 to be evicted, got %v", evictedKeys) 265 | } 266 | 267 | // 验证key2已被淘汰 268 | node, status := c.get("key2") 269 | if status != 0 || node != nil { 270 | t.Errorf("Expected key2 to be evicted") 271 | } 272 | 273 | // 验证其他键仍然存在 274 | keys := []string{"key1", "key3", "key4"} 275 | for _, key := range keys { 276 | node, status := c.get(key) 277 | if status != 1 || node == nil { 278 | t.Errorf("Expected %s to exist", key) 279 | } 280 | } 281 | } 282 | 283 | // 测试walk方法 284 | func TestCacheWalk(t *testing.T) { 285 | c := Create(5) 286 | 287 | // 添加几个项 288 | c.put("key1", testValue("value1"), Now()+int64(time.Hour), nil) 289 | c.put("key2", testValue("value2"), Now()+int64(time.Hour), nil) 290 | c.put("key3", testValue("value3"), Now()+int64(time.Hour), nil) 291 | 292 | // 删除一个项 293 | c.del("key2") 294 | 295 | // 使用walk收集所有项 296 | var keys []string 297 | c.walk(func(key string, value Value, expireAt int64) bool { 298 | keys = append(keys, key) 299 | return true 300 | }) 301 | 302 | // 验证只有未删除的项被遍历 303 | if len(keys) != 2 || !contains(keys, "key1") || !contains(keys, "key3") || contains(keys, "key2") { 304 | t.Errorf("Walk didn't return expected keys, got %v", keys) 305 | } 306 | 307 | // 测试提前终止遍历 308 | count := 0 309 | c.walk(func(key string, value Value, expireAt int64) bool { 310 | count++ 311 | return false // 只处理第一个项 312 | }) 313 | 314 | if count != 1 { 315 | t.Errorf("Walk didn't stop early as expected") 316 | } 317 | } 318 | 319 | // 测试adjust方法 320 | func TestCacheAdjust(t *testing.T) { 321 | c := Create(5) 322 | 323 | // 添加几个项以形成链表 324 | c.put("key1", testValue("value1"), Now()+int64(time.Hour), nil) 325 | c.put("key2", testValue("value2"), Now()+int64(time.Hour), nil) 326 | c.put("key3", testValue("value3"), Now()+int64(time.Hour), nil) 327 | 328 | // 获取key1的索引 329 | idx1 := c.hmap["key1"] 330 | 331 | // 将key1移动到链表头部 332 | c.adjust(idx1, p, n) 333 | 334 | // 验证key1现在是最近使用的 335 | if c.dlnk[0][n] != idx1 { 336 | t.Errorf("Expected key1 to be at the head of the list") 337 | } 338 | 339 | // 将key1移动到链表尾部 340 | c.adjust(idx1, n, p) 341 | 342 | // 验证key1现在是最少使用的 343 | if c.dlnk[0][p] != idx1 { 344 | t.Errorf("Expected key1 to be at the tail of the list") 345 | } 346 | } 347 | 348 | // 测试lru2Store的基本接口 349 | func TestLRU2StoreBasicOperations(t *testing.T) { 350 | var evictedKeys []string 351 | onEvicted := func(key string, value Value) { 352 | evictedKeys = append(evictedKeys, fmt.Sprintf("%s:%v", key, value)) 353 | } 354 | 355 | opts := Options{ 356 | BucketCount: 4, 357 | CapPerBucket: 2, 358 | Level2Cap: 3, 359 | CleanupInterval: time.Minute, 360 | OnEvicted: onEvicted, 361 | } 362 | 363 | store := newLRU2Cache(opts) 364 | defer store.Close() 365 | 366 | // 测试Set和Get 367 | err := store.Set("key1", testValue("value1")) 368 | if err != nil { 369 | t.Errorf("Set failed: %v", err) 370 | } 371 | 372 | value, found := store.Get("key1") 373 | if !found || value != testValue("value1") { 374 | t.Errorf("Get failed, expected 'value1', got %v, found: %v", value, found) 375 | } 376 | 377 | // 测试更新 378 | err = store.Set("key1", testValue("value1-updated")) 379 | if err != nil { 380 | t.Errorf("Set update failed: %v", err) 381 | } 382 | 383 | value, found = store.Get("key1") 384 | if !found || value != testValue("value1-updated") { 385 | t.Errorf("Get after update failed, expected 'value1-updated', got %v", value) 386 | } 387 | 388 | // 测试不存在的键 389 | value, found = store.Get("nonexistent") 390 | if found { 391 | t.Errorf("Get nonexistent key should return false, got %v, %v", value, found) 392 | } 393 | 394 | // 测试删除 395 | deleted := store.Delete("key1") 396 | if !deleted { 397 | t.Errorf("Delete should return true") 398 | } 399 | 400 | value, found = store.Get("key1") 401 | if found { 402 | t.Errorf("Get after delete should return false, got %v, %v", value, found) 403 | } 404 | 405 | // 测试删除不存在的键 406 | deleted = store.Delete("nonexistent") 407 | if deleted { 408 | t.Errorf("Delete nonexistent key should return false") 409 | } 410 | } 411 | 412 | // 测试LRU2Store的LRU替换策略 413 | func TestLRU2StoreLRUEviction(t *testing.T) { 414 | var evictedKeys []string 415 | onEvicted := func(key string, value Value) { 416 | evictedKeys = append(evictedKeys, key) 417 | } 418 | 419 | opts := Options{ 420 | BucketCount: 1, // 单桶以简化测试 421 | CapPerBucket: 2, // 一级缓存容量 422 | Level2Cap: 2, // 二级缓存容量 423 | CleanupInterval: time.Minute, 424 | OnEvicted: onEvicted, 425 | } 426 | 427 | store := newLRU2Cache(opts) 428 | defer store.Close() 429 | 430 | // 添加超过一级缓存容量的项 431 | store.Set("key1", testValue("value1")) 432 | store.Set("key2", testValue("value2")) 433 | store.Set("key3", testValue("value3")) // 应该淘汰key1到二级缓存 434 | 435 | // key1应该在二级缓存中 436 | value, found := store.Get("key1") 437 | if !found || value != testValue("value1") { 438 | t.Errorf("key1 should be in level2 cache, got %v, found: %v", value, found) 439 | } 440 | 441 | // 添加更多项,超过二级缓存容量 442 | store.Set("key4", testValue("value4")) // 应该淘汰key2到二级缓存 443 | store.Set("key5", testValue("value5")) // 应该淘汰key3,key1应该从二级缓存中被淘汰 444 | 445 | // key1应该已被完全淘汰 446 | value, found = store.Get("key1") 447 | if found { 448 | t.Errorf("key1 should be evicted, got %v, found: %v", value, found) 449 | } 450 | } 451 | 452 | // 测试过期时间 453 | func TestLRU2StoreExpiration(t *testing.T) { 454 | opts := Options{ 455 | BucketCount: 1, 456 | CapPerBucket: 5, 457 | Level2Cap: 5, 458 | CleanupInterval: 100 * time.Millisecond, // 快速清理 459 | OnEvicted: nil, 460 | } 461 | 462 | store := newLRU2Cache(opts) 463 | defer store.Close() 464 | 465 | // 添加一个很快过期的项 466 | shortDuration := 200 * time.Millisecond 467 | store.SetWithExpiration("expires-soon", testValue("value"), shortDuration) 468 | 469 | // 添加一个不会很快过期的项 470 | store.SetWithExpiration("expires-later", testValue("value"), time.Hour) 471 | 472 | // 验证都能获取到 473 | _, found := store.Get("expires-soon") 474 | if !found { 475 | t.Errorf("expires-soon should be found initially") 476 | } 477 | 478 | _, found = store.Get("expires-later") 479 | if !found { 480 | t.Errorf("expires-later should be found") 481 | } 482 | 483 | // 等待短期项过期 484 | time.Sleep(300 * time.Millisecond) 485 | 486 | // 验证短期项已过期,长期项仍存在 487 | _, found = store.Get("expires-soon") 488 | if found { 489 | t.Errorf("expires-soon should have expired") 490 | } 491 | 492 | _, found = store.Get("expires-later") 493 | if !found { 494 | t.Errorf("expires-later should still be valid") 495 | } 496 | } 497 | 498 | // 测试LRU2Store的清理循环 499 | func TestLRU2StoreCleanupLoop(t *testing.T) { 500 | opts := Options{ 501 | BucketCount: 1, 502 | CapPerBucket: 5, 503 | Level2Cap: 5, 504 | CleanupInterval: 100 * time.Millisecond, // 快速清理 505 | OnEvicted: nil, 506 | } 507 | 508 | store := newLRU2Cache(opts) 509 | defer store.Close() 510 | 511 | // 添加几个很快过期的项 512 | shortDuration := 200 * time.Millisecond 513 | store.SetWithExpiration("expires1", testValue("value1"), shortDuration) 514 | store.SetWithExpiration("expires2", testValue("value2"), shortDuration) 515 | 516 | // 添加一个不会很快过期的项 517 | store.SetWithExpiration("keeps", testValue("value"), time.Hour) 518 | 519 | // 等待项过期并被清理循环处理 520 | time.Sleep(500 * time.Millisecond) 521 | 522 | // 验证过期项已被清理 523 | _, found := store.Get("expires1") 524 | if found { 525 | t.Errorf("expires1 should have been cleaned up") 526 | } 527 | 528 | _, found = store.Get("expires2") 529 | if found { 530 | t.Errorf("expires2 should have been cleaned up") 531 | } 532 | 533 | // 验证未过期项仍然存在 534 | _, found = store.Get("keeps") 535 | if !found { 536 | t.Errorf("keeps should still be valid") 537 | } 538 | } 539 | 540 | // 测试LRU2Store的Clear方法 541 | func TestLRU2StoreClear(t *testing.T) { 542 | opts := Options{ 543 | BucketCount: 2, 544 | CapPerBucket: 5, 545 | Level2Cap: 5, 546 | CleanupInterval: time.Minute, 547 | OnEvicted: nil, 548 | } 549 | 550 | store := newLRU2Cache(opts) 551 | defer store.Close() 552 | 553 | // 添加一些项 554 | for i := 0; i < 10; i++ { 555 | store.Set(fmt.Sprintf("key%d", i), testValue(fmt.Sprintf("value%d", i))) 556 | } 557 | 558 | // 验证长度 559 | if length := store.Len(); length != 10 { 560 | t.Errorf("Expected length 10, got %d", length) 561 | } 562 | 563 | // 清空缓存 564 | store.Clear() 565 | 566 | // 验证长度为0 567 | if length := store.Len(); length != 0 { 568 | t.Errorf("Expected length 0 after Clear, got %d", length) 569 | } 570 | 571 | // 验证项已被删除 572 | for i := 0; i < 10; i++ { 573 | _, found := store.Get(fmt.Sprintf("key%d", i)) 574 | if found { 575 | t.Errorf("key%d should not be found after Clear", i) 576 | } 577 | } 578 | } 579 | 580 | // 测试_get内部方法 581 | func TestLRU2Store_Get(t *testing.T) { 582 | opts := Options{ 583 | BucketCount: 1, 584 | CapPerBucket: 5, 585 | Level2Cap: 5, 586 | CleanupInterval: time.Minute, 587 | OnEvicted: nil, 588 | } 589 | 590 | store := newLRU2Cache(opts) 591 | defer store.Close() 592 | 593 | // 向一级缓存添加一个项 594 | idx := hashBKRD("test-key") & store.mask 595 | store.caches[idx][0].put("test-key", testValue("test-value"), Now()+int64(time.Hour), nil) 596 | 597 | // 使用_get直接从一级缓存获取 598 | node, status := store._get("test-key", idx, 0) 599 | if status != 1 || node == nil || node.v != testValue("test-value") { 600 | t.Errorf("_get failed to retrieve from level 0") 601 | } 602 | 603 | // 向二级缓存添加一个项 604 | store.caches[idx][1].put("test-key2", testValue("test-value2"), Now()+int64(time.Hour), nil) 605 | 606 | // 使用_get直接从二级缓存获取 607 | node, status = store._get("test-key2", idx, 1) 608 | if status != 1 || node == nil || node.v != testValue("test-value2") { 609 | t.Errorf("_get failed to retrieve from level 1") 610 | } 611 | 612 | // 测试获取不存在的键 613 | node, status = store._get("nonexistent", idx, 0) 614 | if status != 0 || node != nil { 615 | t.Errorf("_get should return status 0 for nonexistent key") 616 | } 617 | 618 | // 测试过期项 619 | store.caches[idx][0].put("expired", testValue("value"), Now()-1000, nil) // 已过期 620 | node, status = store._get("expired", idx, 0) 621 | if status != 0 || node != nil { 622 | t.Errorf("_get should return status 0 for expired key") 623 | } 624 | } 625 | 626 | // 测试delete内部方法 627 | func TestLRU2StoreDelete(t *testing.T) { 628 | var evictedKeys []string 629 | onEvicted := func(key string, value Value) { 630 | evictedKeys = append(evictedKeys, key) 631 | } 632 | 633 | opts := Options{ 634 | BucketCount: 1, 635 | CapPerBucket: 5, 636 | Level2Cap: 5, 637 | CleanupInterval: time.Minute, 638 | OnEvicted: onEvicted, 639 | } 640 | 641 | store := newLRU2Cache(opts) 642 | defer store.Close() 643 | 644 | // 向一级缓存添加一个项 645 | idx := hashBKRD("test-key") & store.mask 646 | store.caches[idx][0].put("test-key", testValue("test-value"), Now()+int64(time.Hour), nil) 647 | 648 | // 向二级缓存添加一个项 649 | store.caches[idx][1].put("test-key2", testValue("test-value2"), Now()+int64(time.Hour), nil) 650 | 651 | // 删除一级缓存中的项 652 | deleted := store.delete("test-key", idx) 653 | if !deleted { 654 | t.Errorf("delete should return true for existing key") 655 | } 656 | 657 | // 验证项已被删除且回调被调用 658 | if len(evictedKeys) != 1 || evictedKeys[0] != "test-key" { 659 | t.Errorf("OnEvicted callback not called correctly, got %v", evictedKeys) 660 | } 661 | 662 | // 重置回调记录 663 | evictedKeys = nil 664 | 665 | // 删除二级缓存中的项 666 | deleted = store.delete("test-key2", idx) 667 | if !deleted { 668 | t.Errorf("delete should return true for existing key in level 1") 669 | } 670 | 671 | // 验证项已被删除且回调被调用 672 | if len(evictedKeys) != 1 || evictedKeys[0] != "test-key2" { 673 | t.Errorf("OnEvicted callback not called correctly, got %v", evictedKeys) 674 | } 675 | 676 | // 测试删除不存在的键 677 | deleted = store.delete("nonexistent", idx) 678 | if deleted { 679 | t.Errorf("delete should return false for nonexistent key") 680 | } 681 | } 682 | 683 | // 测试并发操作 684 | func TestLRU2StoreConcurrent(t *testing.T) { 685 | opts := Options{ 686 | BucketCount: 8, 687 | CapPerBucket: 100, 688 | Level2Cap: 200, 689 | CleanupInterval: time.Minute, 690 | OnEvicted: nil, 691 | } 692 | 693 | store := newLRU2Cache(opts) 694 | defer store.Close() 695 | 696 | const goroutines = 10 697 | const operationsPerGoroutine = 100 698 | 699 | var wg sync.WaitGroup 700 | wg.Add(goroutines) 701 | 702 | for g := 0; g < goroutines; g++ { 703 | go func(id int) { 704 | defer wg.Done() 705 | 706 | // 每个协程操作自己的一组键 707 | prefix := fmt.Sprintf("g%d-", id) 708 | 709 | // 添加操作 710 | for i := 0; i < operationsPerGoroutine; i++ { 711 | key := prefix + strconv.Itoa(i) 712 | value := testValue(fmt.Sprintf("value-%s", key)) 713 | 714 | err := store.Set(key, value) 715 | if err != nil { 716 | t.Errorf("Set failed: %v", err) 717 | } 718 | } 719 | 720 | // 获取操作 721 | for i := 0; i < operationsPerGoroutine; i++ { 722 | key := prefix + strconv.Itoa(i) 723 | expectedValue := testValue(fmt.Sprintf("value-%s", key)) 724 | 725 | value, found := store.Get(key) 726 | if !found { 727 | t.Errorf("Get failed for key %s", key) 728 | } else if value != expectedValue { 729 | t.Errorf("Get returned wrong value for %s: expected %s, got %v", key, expectedValue, value) 730 | } 731 | } 732 | 733 | // 删除操作 734 | for i := 0; i < operationsPerGoroutine/2; i++ { // 删除一半的键 735 | key := prefix + strconv.Itoa(i) 736 | deleted := store.Delete(key) 737 | if !deleted { 738 | t.Errorf("Delete failed for key %s", key) 739 | } 740 | } 741 | }(g) 742 | } 743 | 744 | wg.Wait() 745 | 746 | // 验证大致长度 747 | // 每个协程添加了operationsPerGoroutine项,又删除了一半 748 | expectedItems := goroutines * operationsPerGoroutine / 2 749 | actualItems := store.Len() 750 | 751 | // 允许一些误差,因为可能有一些键碰撞或未完成的操作 752 | tolerance := expectedItems / 10 753 | if actualItems < expectedItems-tolerance || actualItems > expectedItems+tolerance { 754 | t.Errorf("Expected approximately %d items, got %d", expectedItems, actualItems) 755 | } 756 | } 757 | 758 | // 测试缓存命中率统计 759 | func TestLRU2StoreHitRatio(t *testing.T) { 760 | opts := Options{ 761 | BucketCount: 4, 762 | CapPerBucket: 10, 763 | Level2Cap: 20, 764 | CleanupInterval: time.Minute, 765 | OnEvicted: nil, 766 | } 767 | 768 | store := newLRU2Cache(opts) 769 | defer store.Close() 770 | 771 | // 添加50个项 772 | for i := 0; i < 50; i++ { 773 | store.Set(fmt.Sprintf("key%d", i), testValue(fmt.Sprintf("value%d", i))) 774 | } 775 | 776 | // 统计命中次数 777 | hits := 0 778 | attempts := 0 779 | 780 | // 尝试获取100个键,一半存在,一半不存在 781 | for i := 0; i < 100; i++ { 782 | key := fmt.Sprintf("key%d", i) 783 | _, found := store.Get(key) 784 | attempts++ 785 | if found { 786 | hits++ 787 | } 788 | } 789 | 790 | // 计算命中率 791 | hitRatio := float64(hits) / float64(attempts) 792 | 793 | // 验证命中率大致为0.25-0.35(因为我们添加了50个项但有分桶和LRU淘汰) 794 | if hitRatio < 0.25 || hitRatio > 0.35 { 795 | t.Errorf("Hit ratio out of expected range: got %.2f", hitRatio) 796 | } 797 | } 798 | 799 | // 测试缓存容量增长和性能 800 | func BenchmarkLRU2StoreOperations(b *testing.B) { 801 | opts := Options{ 802 | BucketCount: 16, 803 | CapPerBucket: 1000, 804 | Level2Cap: 2000, 805 | CleanupInterval: time.Minute, 806 | OnEvicted: nil, 807 | } 808 | 809 | store := newLRU2Cache(opts) 810 | defer store.Close() 811 | 812 | // 预填充一些数据 813 | for i := 0; i < 5000; i++ { 814 | store.Set(fmt.Sprintf("init-key%d", i), testValue(fmt.Sprintf("value%d", i))) 815 | } 816 | 817 | b.ResetTimer() 818 | 819 | // 混合操作基准测试 820 | b.Run("MixedOperations", func(b *testing.B) { 821 | for i := 0; i < b.N; i++ { 822 | key := fmt.Sprintf("bench-key%d", i%10000) 823 | 824 | // 75%的几率执行Get,25%的几率执行Set 825 | if i%4 != 0 { 826 | store.Get(key) 827 | } else { 828 | store.Set(key, testValue(fmt.Sprintf("value%d", i))) 829 | } 830 | } 831 | }) 832 | 833 | // Get操作基准测试 834 | b.Run("GetOnly", func(b *testing.B) { 835 | for i := 0; i < b.N; i++ { 836 | key := fmt.Sprintf("init-key%d", i%5000) 837 | store.Get(key) 838 | } 839 | }) 840 | 841 | // Set操作基准测试 842 | b.Run("SetOnly", func(b *testing.B) { 843 | for i := 0; i < b.N; i++ { 844 | key := fmt.Sprintf("new-key%d", i) 845 | store.Set(key, testValue(fmt.Sprintf("value%d", i))) 846 | } 847 | }) 848 | } 849 | 850 | // 辅助函数:检查切片是否包含字符串 851 | func contains(slice []string, str string) bool { 852 | for _, s := range slice { 853 | if s == str { 854 | return true 855 | } 856 | } 857 | return false 858 | } 859 | -------------------------------------------------------------------------------- /store/store.go: -------------------------------------------------------------------------------- 1 | package store 2 | 3 | import "time" 4 | 5 | // Value 缓存值接口 6 | type Value interface { 7 | Len() int // 返回数据大小 8 | } 9 | 10 | // Store 缓存接口 11 | type Store interface { 12 | Get(key string) (Value, bool) 13 | Set(key string, value Value) error 14 | SetWithExpiration(key string, value Value, expiration time.Duration) error 15 | Delete(key string) bool 16 | Clear() 17 | Len() int 18 | Close() 19 | } 20 | 21 | // CacheType 缓存类型 22 | type CacheType string 23 | 24 | const ( 25 | LRU CacheType = "lru" 26 | LRU2 CacheType = "lru2" 27 | ) 28 | 29 | // Options 通用缓存配置选项 30 | type Options struct { 31 | MaxBytes int64 // 最大的缓存字节数(用于 lru) 32 | BucketCount uint16 // 缓存的桶数量(用于 lru-2) 33 | CapPerBucket uint16 // 每个桶的容量(用于 lru-2) 34 | Level2Cap uint16 // lru-2 中二级缓存的容量(用于 lru-2) 35 | CleanupInterval time.Duration 36 | OnEvicted func(key string, value Value) 37 | } 38 | 39 | func NewOptions() Options { 40 | return Options{ 41 | MaxBytes: 8192, 42 | BucketCount: 16, 43 | CapPerBucket: 512, 44 | Level2Cap: 256, 45 | CleanupInterval: time.Minute, 46 | OnEvicted: nil, 47 | } 48 | } 49 | 50 | // NewStore 创建缓存存储实例 51 | func NewStore(cacheType CacheType, opts Options) Store { 52 | switch cacheType { 53 | case LRU2: 54 | return newLRU2Cache(opts) 55 | case LRU: 56 | return newLRUCache(opts) 57 | default: 58 | return newLRUCache(opts) 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /utils.go: -------------------------------------------------------------------------------- 1 | package kamacache 2 | 3 | import "strings" 4 | 5 | func ValidPeerAddr(addr string) bool { 6 | t1 := strings.Split(addr, ":") 7 | if len(t1) != 2 { 8 | return false 9 | } 10 | // TODO: more selections 11 | t2 := strings.Split(t1[0], ".") 12 | if t1[0] != "localhost" && len(t2) != 4 { 13 | return false 14 | } 15 | return true 16 | } 17 | --------------------------------------------------------------------------------